repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
zvolsky/glassclear | languages/ru.py | 157 | 8945 | # coding: utf8
{
'!langcode!': 'ru',
'!langname!': 'Русский',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Изменить" - необязательное выражение вида "field1=\'новое значение\'". Результаты операции JOIN нельзя изменить или удалить.',
'%d days ago': '%d %%{день} тому',
'%d hours ago': '%d %%{час} тому',
'%d minutes ago': '%d %%{минуту} тому',
'%d months ago': '%d %%{месяц} тому',
'%d seconds ago': '%d %%{секунду} тому',
'%d weeks ago': '%d %%{неделю} тому',
'%d years ago': '%d %%{год} тому',
'%s %%{row} deleted': '%%{!удалена[0]} %s %%{строка[0]}',
'%s %%{row} updated': '%%{!изменена[0]} %s %%{строка[0]}',
'%s selected': '%%{!выбрана[0]} %s %%{запись[0]}',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'1 day ago': '1 день тому',
'1 hour ago': '1 час тому',
'1 minute ago': '1 минуту тому',
'1 month ago': '1 месяц тому',
'1 second ago': '1 секунду тому',
'1 week ago': '1 неделю тому',
'1 year ago': '1 год тому',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'административный интерфейс',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'Are you sure you want to delete this object?': 'Вы уверены, что хотите удалить этот объект?',
'Available Databases and Tables': 'Базы данных и таблицы',
'Buy this book': 'Buy this book',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Пустое значение недопустимо',
'Change Password': 'Смените пароль',
'Check to delete': 'Удалить',
'Check to delete:': 'Удалить:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Текущий запрос',
'Current response': 'Текущий ответ',
'Current session': 'Текущая сессия',
'customize me!': 'настройте внешний вид!',
'data uploaded': 'данные загружены',
'Database': 'Database',
'Database %s select': 'выбор базы данных %s',
'db': 'БД',
'DB Model': 'DB Model',
'Delete:': 'Удалить:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Описание',
'design': 'дизайн',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'готово!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit current record': 'Редактировать текущую запись',
'Edit Profile': 'Редактировать профиль',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'Errors',
'export as csv file': 'экспорт в csv-файл',
'FAQ': 'FAQ',
'First name': 'Имя',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Заработало!',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Импорт/экспорт',
'insert new': 'добавить',
'insert new %s': 'добавить %s',
'Internal State': 'Внутренне состояние',
'Introduction': 'Introduction',
'Invalid email': 'Неверный email',
'Invalid login': 'Неверный логин',
'Invalid password': 'Неверный пароль',
'Invalid Query': 'Неверный запрос',
'invalid request': 'неверный запрос',
'Key': 'Key',
'Last name': 'Фамилия',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Вход выполнен',
'Logged out': 'Выход выполнен',
'login': 'вход',
'Login': 'Вход',
'logout': 'выход',
'Logout': 'Выход',
'Lost Password': 'Забыли пароль?',
'Lost password?': 'Lost password?',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Model',
'My Sites': 'My Sites',
'Name': 'Name',
'New password': 'Новый пароль',
'New Record': 'Новая запись',
'new record inserted': 'новая запись добавлена',
'next 100 rows': 'следующие 100 строк',
'No databases in this application': 'В приложении нет баз данных',
'now': 'сейчас',
'Object or table name': 'Object or table name',
'Old password': 'Старый пароль',
'Online examples': 'примеры он-лайн',
'or import from csv file': 'или импорт из csv-файла',
'Origin': 'Происхождение',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Пароль',
'password': 'пароль',
"Password fields don't match": 'Пароли не совпадают',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'предыдущие 100 строк',
'profile': 'профиль',
'Python': 'Python',
'Query:': 'Запрос:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'запись не найдена',
'Record ID': 'ID записи',
'Record id': 'id записи',
'Register': 'Зарегистрироваться',
'Registration identifier': 'Registration identifier',
'Registration key': 'Ключ регистрации',
'Remember me (for 30 days)': 'Запомнить меня (на 30 дней)',
'Reset Password key': 'Сбросить ключ пароля',
'Role': 'Роль',
'Rows in Table': 'Строк в таблице',
'Rows selected': 'Выделено строк',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'состояние',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Submit': 'Отправить',
'Support': 'Support',
'Sure you want to delete this object?': 'Подтвердите удаление объекта',
'Table': 'таблица',
'Table name': 'Имя таблицы',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Запрос" - это условие вида "db.table1.field1==\'значение\'". Выражение вида "db.table1.field1==db.table2.field2" формирует SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Отметка времени',
'Twitter': 'Twitter',
'unable to parse csv file': 'нечитаемый csv-файл',
'Update:': 'Изменить:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Для построение сложных запросов используйте операторы "И": (...)&(...), "ИЛИ": (...)|(...), "НЕ": ~(...).',
'User %(id)s Logged-in': 'Пользователь %(id)s вошёл',
'User %(id)s Logged-out': 'Пользователь %(id)s вышел',
'User %(id)s Password changed': 'Пользователь %(id)s сменил пароль',
'User %(id)s Profile updated': 'Пользователь %(id)s обновил профиль',
'User %(id)s Registered': 'Пользователь %(id)s зарегистрировался',
'User ID': 'ID пользователя',
'Verify Password': 'Повторите пароль',
'Videos': 'Videos',
'View': 'View',
'Welcome': 'Welcome',
'Welcome to web2py': 'Добро пожаловать в web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
RandallDW/Aruba_plugin | plugins/org.python.pydev/pysrc/_pydevd_bundle/pydevd_referrers.py | 1 | 8832 | from _pydevd_bundle.pydevd_constants import dict_contains
import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
import traceback
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#===================================================================================================
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
#Ignore this frame and any caller frame of this frame
ignore_frames = {} #Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
all_objects = None
for r in referrers:
try:
if dict_contains(ignore_frames, r):
continue #Skip the references we may add ourselves
except:
pass #Ok: unhashable type checked...
if r is referrers:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
#Ok, there's one annoying thing: many times we find it in a dict from an instance,
#but with this we don't directly have the class, only the dict, so, to workaround that
#we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass #Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
#Don't use enumerate() because not all Python versions have it.
i = 0
for x in r:
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
i += 1
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additional_in_xml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
#If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
x = None
key = None
val = None
curr_frame = None
ignore_frames = None
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Error getting referrers for:',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
ret.append('</xml>')
ret = ''.join(ret)
return ret
| epl-1.0 |
guker/fbthrift | thrift/lib/py/protocol/TBinaryProtocol.py | 11 | 7690 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack(b"!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack(b"!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack(b"!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack(b"!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack(b"!d", dub)
self.trans.write(buff)
def writeFloat(self, flt):
buff = pack(b"!f", flt)
self.trans.write(buff)
def writeString(self, str):
if sys.version_info[0] >= 3 and not isinstance(str, bytes):
str = str.encode('utf-8')
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(TProtocolException.BAD_VERSION,
'No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack(b'!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack(b'!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack(b'!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack(b'!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack(b'!d', buff)
return val
def readFloat(self):
buff = self.trans.readAll(4)
val, = unpack(b'!f', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory(TBinaryProtocolFactory):
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans, self.strictRead,
self.strictWrite)
| apache-2.0 |
normtown/SickRage | lib/unidecode/x1d4.py | 248 | 3839 | data = (
'A', # 0x00
'B', # 0x01
'C', # 0x02
'D', # 0x03
'E', # 0x04
'F', # 0x05
'G', # 0x06
'H', # 0x07
'I', # 0x08
'J', # 0x09
'K', # 0x0a
'L', # 0x0b
'M', # 0x0c
'N', # 0x0d
'O', # 0x0e
'P', # 0x0f
'Q', # 0x10
'R', # 0x11
'S', # 0x12
'T', # 0x13
'U', # 0x14
'V', # 0x15
'W', # 0x16
'X', # 0x17
'Y', # 0x18
'Z', # 0x19
'a', # 0x1a
'b', # 0x1b
'c', # 0x1c
'd', # 0x1d
'e', # 0x1e
'f', # 0x1f
'g', # 0x20
'h', # 0x21
'i', # 0x22
'j', # 0x23
'k', # 0x24
'l', # 0x25
'm', # 0x26
'n', # 0x27
'o', # 0x28
'p', # 0x29
'q', # 0x2a
'r', # 0x2b
's', # 0x2c
't', # 0x2d
'u', # 0x2e
'v', # 0x2f
'w', # 0x30
'x', # 0x31
'y', # 0x32
'z', # 0x33
'A', # 0x34
'B', # 0x35
'C', # 0x36
'D', # 0x37
'E', # 0x38
'F', # 0x39
'G', # 0x3a
'H', # 0x3b
'I', # 0x3c
'J', # 0x3d
'K', # 0x3e
'L', # 0x3f
'M', # 0x40
'N', # 0x41
'O', # 0x42
'P', # 0x43
'Q', # 0x44
'R', # 0x45
'S', # 0x46
'T', # 0x47
'U', # 0x48
'V', # 0x49
'W', # 0x4a
'X', # 0x4b
'Y', # 0x4c
'Z', # 0x4d
'a', # 0x4e
'b', # 0x4f
'c', # 0x50
'd', # 0x51
'e', # 0x52
'f', # 0x53
'g', # 0x54
'', # 0x55
'i', # 0x56
'j', # 0x57
'k', # 0x58
'l', # 0x59
'm', # 0x5a
'n', # 0x5b
'o', # 0x5c
'p', # 0x5d
'q', # 0x5e
'r', # 0x5f
's', # 0x60
't', # 0x61
'u', # 0x62
'v', # 0x63
'w', # 0x64
'x', # 0x65
'y', # 0x66
'z', # 0x67
'A', # 0x68
'B', # 0x69
'C', # 0x6a
'D', # 0x6b
'E', # 0x6c
'F', # 0x6d
'G', # 0x6e
'H', # 0x6f
'I', # 0x70
'J', # 0x71
'K', # 0x72
'L', # 0x73
'M', # 0x74
'N', # 0x75
'O', # 0x76
'P', # 0x77
'Q', # 0x78
'R', # 0x79
'S', # 0x7a
'T', # 0x7b
'U', # 0x7c
'V', # 0x7d
'W', # 0x7e
'X', # 0x7f
'Y', # 0x80
'Z', # 0x81
'a', # 0x82
'b', # 0x83
'c', # 0x84
'd', # 0x85
'e', # 0x86
'f', # 0x87
'g', # 0x88
'h', # 0x89
'i', # 0x8a
'j', # 0x8b
'k', # 0x8c
'l', # 0x8d
'm', # 0x8e
'n', # 0x8f
'o', # 0x90
'p', # 0x91
'q', # 0x92
'r', # 0x93
's', # 0x94
't', # 0x95
'u', # 0x96
'v', # 0x97
'w', # 0x98
'x', # 0x99
'y', # 0x9a
'z', # 0x9b
'A', # 0x9c
'', # 0x9d
'C', # 0x9e
'D', # 0x9f
'', # 0xa0
'', # 0xa1
'G', # 0xa2
'', # 0xa3
'', # 0xa4
'J', # 0xa5
'K', # 0xa6
'', # 0xa7
'', # 0xa8
'N', # 0xa9
'O', # 0xaa
'P', # 0xab
'Q', # 0xac
'', # 0xad
'S', # 0xae
'T', # 0xaf
'U', # 0xb0
'V', # 0xb1
'W', # 0xb2
'X', # 0xb3
'Y', # 0xb4
'Z', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
'', # 0xba
'f', # 0xbb
'', # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
'', # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'A', # 0xd0
'B', # 0xd1
'C', # 0xd2
'D', # 0xd3
'E', # 0xd4
'F', # 0xd5
'G', # 0xd6
'H', # 0xd7
'I', # 0xd8
'J', # 0xd9
'K', # 0xda
'L', # 0xdb
'M', # 0xdc
'N', # 0xdd
'O', # 0xde
'P', # 0xdf
'Q', # 0xe0
'R', # 0xe1
'S', # 0xe2
'T', # 0xe3
'U', # 0xe4
'V', # 0xe5
'W', # 0xe6
'X', # 0xe7
'Y', # 0xe8
'Z', # 0xe9
'a', # 0xea
'b', # 0xeb
'c', # 0xec
'd', # 0xed
'e', # 0xee
'f', # 0xef
'g', # 0xf0
'h', # 0xf1
'i', # 0xf2
'j', # 0xf3
'k', # 0xf4
'l', # 0xf5
'm', # 0xf6
'n', # 0xf7
'o', # 0xf8
'p', # 0xf9
'q', # 0xfa
'r', # 0xfb
's', # 0xfc
't', # 0xfd
'u', # 0xfe
'v', # 0xff
)
| gpl-3.0 |
robclark/chromium | ppapi/generators/idl_propertynode.py | 13 | 5464 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Hierarchical property system for IDL AST """
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
#
# IDLPropertyNode
#
# A property node is a hierarchically aware system for mapping
# keys to values, such that a local dictionary is search first,
# followed by parent dictionaries in order.
#
class IDLPropertyNode(object):
def __init__(self):
self.parents = []
self.property_map = {}
def Error(self, msg):
name = self.GetProperty('NAME', 'Unknown')
parents = [parent.GetProperty('NAME', '???') for parent in self.parents]
ErrOut.Log('%s [%s] : %s' % (name, ' '.join(parents), msg))
def AddParent(self, parent):
assert parent
self.parents.append(parent)
def SetProperty(self, name, val):
self.property_map[name] = val
def _GetProperty_(self, name):
# Check locally for the property, and return it if found.
prop = self.property_map.get(name, None)
if prop is not None: return prop
# If not, seach parents in order
for parent in self.parents:
prop = parent.GetProperty(name)
if prop is not None: return prop
# Otherwise, it can not be found.
return None
def GetProperty(self, name, default=None):
prop = self._GetProperty_(name)
if prop is None:
return default
else:
return prop
def GetPropertyLocal(self, name, default=None):
# Search for the property, but only locally, returning the
# default if not found.
prop = self.property_map.get(name, default)
return prop
# Regular expression to parse property keys in a string such that a string
# "My string $NAME$" will find the key "NAME".
regex_var = re.compile('(?P<src>[^\\$]+)|(?P<key>\\$\\w+\\$)')
def GetPropertyList(self):
return self.property_map.keys()
# Recursively expands text keys in the form of $KEY$ with the value
# of the property of the same name. Since this is done recursively
# one property can be defined in terms of another.
def Replace(self, text):
itr = IDLPropertyNode.regex_var.finditer(text)
out = ''
for m in itr:
(start, stop) = m.span()
if m.lastgroup == 'src':
out += text[start:stop]
if m.lastgroup == 'key':
key = text[start+1:stop-1]
val = self.GetProperty(key, None)
if not val:
self.Error('No property "%s"' % key)
out += self.Replace(str(val))
return out
#
# Testing functions
#
# Build a property node, setting the properties including a name, and
# associate the children with this new node.
#
def BuildNode(name, props, children=[], parents=[]):
node = IDLPropertyNode()
node.SetProperty('NAME', name)
for prop in props:
toks = prop.split('=')
node.SetProperty(toks[0], toks[1])
for child in children:
child.AddParent(node)
for parent in parents:
node.AddParent(parent)
return node
def ExpectProp(node, name, val):
found = node.GetProperty(name)
if found != val:
ErrOut.Log('Got property %s expecting %s' % (found, val))
return 1
return 0
#
# Verify property inheritance
#
def PropertyTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectProp(top, 'Left', 'Top')
errors += ExpectProp(top, 'Right', 'Top')
errors += ExpectProp(left, 'Left', 'Left')
errors += ExpectProp(left, 'Right', 'Top')
errors += ExpectProp(right, 'Left', 'Top')
errors += ExpectProp(right, 'Right', 'Right')
if not errors: InfoOut.Log('Passed PropertyTest')
return errors
def ExpectText(node, text, val):
found = node.Replace(text)
if found != val:
ErrOut.Log('Got replacement %s expecting %s' % (found, val))
return 1
return 0
#
# Verify text replacement
#
def ReplaceTest():
errors = 0
left = BuildNode('Left', ['Left=Left'])
right = BuildNode('Right', ['Right=Right'])
top = BuildNode('Top', ['Left=Top', 'Right=Top'], [left, right])
errors += ExpectText(top, '$Left$', 'Top')
errors += ExpectText(top, '$Right$', 'Top')
errors += ExpectText(left, '$Left$', 'Left')
errors += ExpectText(left, '$Right$', 'Top')
errors += ExpectText(right, '$Left$', 'Top')
errors += ExpectText(right, '$Right$', 'Right')
if not errors: InfoOut.Log('Passed ReplaceTest')
return errors
def MultiParentTest():
errors = 0
parent1 = BuildNode('parent1', ['PARENT1=parent1', 'TOPMOST=$TOP$'])
parent2 = BuildNode('parent2', ['PARENT1=parent2', 'PARENT2=parent2'])
child = BuildNode('child', ['CHILD=child'], parents=[parent1, parent2])
BuildNode('top', ['TOP=top'], children=[parent1])
errors += ExpectText(child, '$CHILD$', 'child')
errors += ExpectText(child, '$PARENT1$', 'parent1')
errors += ExpectText(child, '$PARENT2$', 'parent2')
# Verify recursive resolution
errors += ExpectText(child, '$TOPMOST$', 'top')
if not errors: InfoOut.Log('Passed MultiParentTest')
return errors
def Main():
errors = 0
errors += PropertyTest()
errors += ReplaceTest()
errors += MultiParentTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
vivekramamoorthy/tcpdump | doc/conf.py | 3 | 10475 | # -*- coding: utf-8 -*-
#
# Tcpdump library documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
from os.path import join, dirname, abspath
from topology_lib_tcpdump import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinxcontrib.plantuml',
'sphinx.ext.graphviz',
'autoapi.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tcpdump library'
copyright = '2016, Hewlett Packard Enterprise Development LP'
author = 'Hewlett Packard Enterprise Development LP'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'topology_lib_tcpdumpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
'index', 'topology_lib_tcpdump.tex',
'Tcpdump library Documentation',
'Hewlett Packard Enterprise Development LP', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'topology_lib_tcpdump',
'Tcpdump library Documentation',
[author], 1
),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'topology_lib_tcpdump',
'Tcpdump library Documentation',
author,
'topology_lib_tcpdump',
(
'Tcpdump library'
),
'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Add style overrides
def setup(app):
app.add_stylesheet('styles/custom.css')
# autoapi configuration
autoapi_modules = {
'topology_lib_tcpdump': None
}
# Configure PlantUML
plantuml = 'java -jar ' + join(dirname(abspath(__name__)), 'plantuml.8030.jar')
plantuml_output_format = 'svg'
# Configure Graphviz
graphviz_output_format = 'svg'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None)
}
# Setup theme if not building in readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) is not None
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] | apache-2.0 |
adamcaudill/yawast | yawast/external/spinner.py | 1 | 1596 | # From: https://stackoverflow.com/a/39504463
# License: Creative Commons Attribution-Share Alike
# Copyright: Victor Moyseenko
import sys
import threading
import time
class Spinner:
running = False
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in "|/-\\":
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
try:
if sys.stdout.isatty():
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write("\b")
sys.stdout.flush()
except Exception:
# we don't care what happens here
pass
self.running = False
def start(self):
self.running = True
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self, exception=None):
self.busy = False
time.sleep(self.delay)
while self.running:
pass
sys.stdout.write(" ")
sys.stdout.flush()
sys.stdout.write("\b")
sys.stdout.flush()
if exception is not None:
return False
def __enter__(self):
self.start()
return self
def __exit__(self, exception, value, tb):
return self.stop(exception)
| mit |
BackupTheBerlios/pyhttpd-svn | core/baseHTTPRequestHandler.py | 1 | 11117 | # -*- coding: utf-8 -*-
##################################################################
# pyHTTPd
# $Id$
# (c) 2006 by Tim Taubert
##################################################################
import os, sys, socket, time, mimetools
from mimetypes import MimeTypes
from baseConfig import pConfig
import baseRoutines
DEFAULT_ERROR_MESSAGE = "<head><title>Error response</title></head><body><h1>Error response</h1><p>Error code %(code)d.</p><p>Message: %(message)s.</p><p>Error code explanation: %(code)s = %(explain)s.</body>"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class pHTTPRequestHandler:
rbufsize = 0
wbufsize = 0
sys_version = "Python/2.4"
server_version = "BaseHTTP/"
protocol_version = "HTTP/1.0"
# message-like class used to parse headers
MessageClass = mimetools.Message
# needed for timestamp formatting
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# standard conform http response codes
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted', 'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No response', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices', 'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not modified', 'Document has not changed since given time'),
305: ('Use Proxy', 'You must use proxy specified in Location to access this resource.'),
307: ('Temporary Redirect', 'Object moved temporarily -- see URI list'),
400: ('Bad request', 'Bad request syntax or unsupported method'),
401: ('Unauthorized', 'No permission -- see authorization schemes'),
402: ('Payment required', 'No payment -- see charging schemes'),
403: ('Forbidden', 'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed', 'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'),
408: ('Request Time-out', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone', 'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable', 'Cannot satisfy request range.'),
417: ('Expectation Failed', 'Expect condition could not be satisfied.'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not Implemented', 'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service temporarily overloaded', 'The server cannot process the request due to a high load'),
504: ('Gateway timeout', 'The gateway server did not receive a timely response'),
505: ('HTTP Version not supported', 'Cannot fulfill request.'),
}
###################################################################################
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
def do_HEAD(self):
print "HEAD cmd used"
def do_PUT(self):
print "PUT cmd used"
def do_GET(self):
self.handleCommand()
def do_POST(self):
self.handleCommand()
def handleCommand(self):
self.handleFileFlag = True
baseRoutines.parsePaths(self)
# trigger the "before" hook
self.modules.triggerBefore(self, self.command)
if not os.path.isfile(self.path):
if os.path.isfile(pConfig.getValue("base.docroot")+self.path):
self.path = pConfig.getValue("base.docroot")+self.path
elif os.path.isfile(pConfig.getValue("base.docroot")+"/"+self.path):
self.path = pConfig.getValue("base.docroot")+"/"+self.path
else:
self.send_response(404)
self.end_headers()
self.handleFileFlag = False
if self.handleFileFlag:
try:
self.handleFile(self.path)
except:
pass
# trigger the "after" hook
self.modules.triggerAfter(self, self.command)
def handleFile(self, filename):
fd = open(filename)
content = fd.read()
fd.close()
self.send_response(200)
mime = MimeTypes()
mimetype = mime.guess_type(filename)
self.send_header("Content-Type", mimetype[0])
if mimetype[1]:
self.send_header("Content-Encoding", mimetype[1])
self.send_header("Content-Length", str(len(content)))
self.end_headers()
self.wfile.write(content)
self.wfile.flush()
def parse_request(self):
self.command = None # set in case of error on the first line
self.request_version = version = "HTTP/0.9" # Default
self.close_connection = 1
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if hasattr(self, mname):
getattr(self, mname)()
else:
self.send_error(501, "Unsupported method (%r)" % self.command)
def handle(self):
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", "text/html")
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
self.log_message('"%s" %s %s', self.requestline, str(code), str(size))
def log_error(self, *args):
self.log_message(*args)
def log_message(self, format, *args):
sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format % args))
def version_string(self):
return self.server_version + ' ' + self.sys_version
# returns the current date and time formatted for a message header
def date_time_string(self):
now = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
# returns the current time formatted for logging
def log_date_time_string(self):
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year,
hh, mm, ss)
return s
def address_string(self):
host, port = self.client_address[:2]
return socket.getfqdn(host)
| gpl-2.0 |
szaydel/psutil | psutil/_pslinux.py | 1 | 40630 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import os
import errno
import socket
import struct
import sys
import base64
import re
import warnings
import _psutil_posix
import _psutil_linux
from psutil import _psposix
from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._common import *
from psutil._compat import PY3, xrange, long, namedtuple, wraps
from _psutil_linux import RLIM_INFINITY
from _psutil_linux import (RLIMIT_AS, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
RLIMIT_FSIZE, RLIMIT_LOCKS, RLIMIT_MEMLOCK,
RLIMIT_MSGQUEUE, RLIMIT_NICE, RLIMIT_NOFILE,
RLIMIT_NPROC, RLIMIT_RSS, RLIMIT_RTPRIO,
RLIMIT_RTTIME, RLIMIT_SIGPENDING, RLIMIT_STACK)
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
# process resources constants
"RLIM_INFINITY",
"RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA", "RLIMIT_FSIZE",
"RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_MSGQUEUE", "RLIMIT_NICE",
"RLIMIT_NOFILE", "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_RTPRIO",
"RLIMIT_RTTIME", "RLIMIT_SIGPENDING", "RLIMIT_STACK",
# other
"phymem_buffers", "cached_phymem"]
def get_system_boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
f = open('/proc/stat', 'r')
try:
for line in f:
if line.startswith('btime'):
return float(line.strip().split()[1])
raise RuntimeError("line 'btime' not found")
finally:
f.close()
def _get_num_cpus():
"""Return the number of CPUs on the system"""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
f = open('/proc/cpuinfo', 'r')
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
if line.lower().startswith('processor'):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# http://code.google.com/p/psutil/issues/detail?id=200
# try to parse /proc/stat as a last resort
if num == 0:
f = open('/proc/stat', 'r')
try:
lines = f.readlines()
finally:
f.close()
search = re.compile('cpu\d')
for line in lines:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
raise RuntimeError("couldn't determine platform's NUM_CPUS")
return num
# Number of clock ticks per second
_CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
_PAGESIZE = os.sysconf("SC_PAGE_SIZE")
# Since these constants get determined at import time we do not want to
# crash immediately; instead we'll set them to None and most likely
# we'll crash later as they're used for determining process CPU stats
# and creation_time
try:
BOOT_TIME = get_system_boot_time()
except Exception:
BOOT_TIME = None
warnings.warn("couldn't determine platform's BOOT_TIME", RuntimeWarning)
try:
NUM_CPUS = _get_num_cpus()
except Exception:
NUM_CPUS = None
warnings.warn("couldn't determine platform's NUM_CPUS", RuntimeWarning)
try:
TOTAL_PHYMEM = _psutil_linux.get_sysinfo()[0]
except Exception:
TOTAL_PHYMEM = None
warnings.warn("couldn't determine platform's TOTAL_PHYMEM", RuntimeWarning)
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
_TCP_STATES_TABLE = {"01" : CONN_ESTABLISHED,
"02" : CONN_SYN_SENT,
"03" : CONN_SYN_RECV,
"04" : CONN_FIN_WAIT1,
"05" : CONN_FIN_WAIT2,
"06" : CONN_TIME_WAIT,
"07" : CONN_CLOSE,
"08" : CONN_CLOSE_WAIT,
"09" : CONN_LAST_ACK,
"0A" : CONN_LISTEN,
"0B" : CONN_CLOSING
}
# --- system memory functions
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# linux specific
'active',
'inactive',
'buffers',
'cached']))
def virtual_memory():
total, free, buffers, shared, _, _ = _psutil_linux.get_sysinfo()
cached = active = inactive = None
f = open('/proc/meminfo', 'r')
try:
for line in f:
if line.startswith('Cached:'):
cached = int(line.split()[1]) * 1024
elif line.startswith('Active:'):
active = int(line.split()[1]) * 1024
elif line.startswith('Inactive:'):
inactive = int(line.split()[1]) * 1024
if cached is not None \
and active is not None \
and inactive is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
finally:
f.close()
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = _psutil_linux.get_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
f = open("/proc/vmstat", "r")
sin = sout = None
try:
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith('pswpin'):
sin = int(line.split(' ')[1]) * 4 * 1024
elif line.startswith('pswpout'):
sout = int(line.split(' ')[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# http://code.google.com/p/psutil/issues/detail?id=313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
finally:
f.close()
return nt_swapmeminfo(total, used, free, percent, sin, sout)
# --- XXX deprecated memory functions
@deprecated('psutil.virtual_memory().cached')
def cached_phymem():
return virtual_memory().cached
@deprecated('psutil.virtual_memory().buffers')
def phymem_buffers():
return virtual_memory().buffers
# --- system CPU functions
@memoize
def _get_cputimes_ntuple():
""" Return a (nt, rindex) tuple depending on the CPU times available
on this Linux kernel version which may be:
user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]
"""
f = open('/proc/stat', 'r')
try:
values = f.readline().split()[1:]
finally:
f.close()
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
rindex = 8
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
rindex += 1
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
rindex += 1
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
rindex += 1
return (namedtuple('cputimes', ' '.join(fields)), rindex)
def get_system_cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]
Last 3 fields may not be available on all Linux kernel versions.
"""
f = open('/proc/stat', 'r')
try:
values = f.readline().split()
finally:
f.close()
nt, rindex = _get_cputimes_ntuple()
fields = values[1:rindex]
fields = [float(x) / _CLOCK_TICKS for x in fields]
return nt(*fields)
def get_system_per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
nt, rindex = _get_cputimes_ntuple()
cpus = []
f = open('/proc/stat', 'r')
try:
# get rid of the first line which refers to system wide CPU stats
f.readline()
for line in f:
if line.startswith('cpu'):
fields = line.split()[1:rindex]
fields = [float(x) / _CLOCK_TICKS for x in fields]
entry = nt(*fields)
cpus.append(entry)
return cpus
finally:
f.close()
# --- system disk functions
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of nameduples"""
phydevs = []
f = open("/proc/filesystems", "r")
try:
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
finally:
f.close()
retlist = []
partitions = _psutil_linux.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in phydevs:
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
get_disk_usage = _psposix.get_disk_usage
# --- other sysetm functions
def get_system_users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = _psutil_linux.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0':
hostname = 'localhost'
nt = nt_user(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
# --- process functions
def get_pid_list():
"""Returns a list of PIDs currently running on the system."""
pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]
return pids
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "r")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
colon = line.find(':')
assert colon > 0, line
name = line[:colon].strip()
fields = line[colon+1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[2])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
f = open("/proc/partitions", "r")
try:
lines = f.readlines()[2:]
finally:
f.close()
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# http://code.google.com/p/psutil/issues/detail?id=338
partitions.append(name)
#
retdict = {}
f = open("/proc/diskstats", "r")
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
line.split()[:11]
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
# taken from /fs/proc/array.c
_status_map = {"R" : STATUS_RUNNING,
"S" : STATUS_SLEEPING,
"D" : STATUS_DISK_SLEEP,
"T" : STATUS_STOPPED,
"t" : STATUS_TRACING_STOP,
"Z" : STATUS_ZOMBIE,
"X" : STATUS_DEAD,
"x" : STATUS_DEAD,
"K" : STATUS_WAKE_KILL,
"W" : STATUS_WAKING}
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError:
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
f = open("/proc/%s/stat" % self.pid)
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def get_process_exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError):
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s/exe" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
exe = exe.replace('\x00', '')
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
if exe.endswith(" (deleted)") and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def get_process_cmdline(self):
f = open("/proc/%s/cmdline" % self.pid)
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def get_process_terminal(self):
tmap = _psposix._get_terminal_map()
f = open("/proc/%s/stat" % self.pid)
try:
tty_nr = int(f.read().split(' ')[6])
finally:
f.close()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_process_io_counters(self):
f = open("/proc/%s/io" % self.pid)
try:
for line in f:
if line.startswith("rchar"):
read_count = int(line.split()[1])
elif line.startswith("wchar"):
write_count = int(line.split()[1])
elif line.startswith("read_bytes"):
read_bytes = int(line.split()[1])
elif line.startswith("write_bytes"):
write_bytes = int(line.split()[1])
return nt_io(read_count, write_count, read_bytes, write_bytes)
finally:
f.close()
if not os.path.exists('/proc/%s/io' % os.getpid()):
def get_process_io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel " \
"too old?)" % self.pid)
@wrap_exceptions
def get_cpu_times(self):
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
return nt_cputimes(utime, stime)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_create_time(self):
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(')') + 2:]
values = st.split(' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
starttime = (float(values[19]) / _CLOCK_TICKS) + BOOT_TIME
return starttime
@wrap_exceptions
def get_memory_info(self):
f = open("/proc/%s/statm" % self.pid)
try:
vms, rss = f.readline().split()[:2]
return nt_meminfo(int(rss) * _PAGESIZE,
int(vms) * _PAGESIZE)
finally:
f.close()
_nt_ext_mem = namedtuple('meminfo', 'rss vms shared text lib data dirty')
@wrap_exceptions
def get_ext_memory_info(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
f = open("/proc/%s/statm" % self.pid)
try:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * _PAGESIZE for x in f.readline().split()[:7]]
finally:
f.close()
return self._nt_ext_mem(rss, vms, shared, text, lib, data, dirty)
_mmap_base_fields = ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap',]
nt_mmap_grouped = namedtuple('mmap', ' '.join(_mmap_base_fields))
nt_mmap_ext = namedtuple('mmap', 'addr perms ' + ' '.join(_mmap_base_fields))
def get_memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid)
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to interpret" \
" line %r" % line)
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0))
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
except:
if f is not None:
f.close()
raise
f.close()
if not os.path.exists('/proc/%s/smaps' % os.getpid()):
def get_memory_maps(self, ext):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or CONFIG_MMU " \
"kernel configuration option is not enabled" % self.pid
raise NotImplementedError(msg)
@wrap_exceptions
def get_process_cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def get_num_ctx_switches(self):
vol = unvol = None
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("voluntary_ctxt_switches"):
vol = int(line.split()[1])
elif line.startswith("nonvoluntary_ctxt_switches"):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return nt_ctxsw(vol, unvol)
raise NotImplementedError("the 'voluntary_ctxt_switches' and " \
"'nonvoluntary_ctxt_switches' fields were not found in " \
"/proc/%s/status; the kernel is probably older than 2.6.23" \
% self.pid)
finally:
f.close()
@wrap_exceptions
def get_process_num_threads(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("Threads:"):
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
try:
f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id))
except EnvironmentError:
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
ntuple = nt_thread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def get_process_nice(self):
#f = open('/proc/%s/stat' % self.pid, 'r')
#try:
# data = f.read()
# return int(data.split()[18])
#finally:
# f.close()
# Use C implementation
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_cpu_affinity(self):
from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
bitmask = _psutil_linux.get_process_cpu_affinity(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def set_process_cpu_affinity(self, value):
def to_bitmask(l):
if not l:
raise ValueError("invalid argument %r" % l)
out = 0
for b in l:
if not isinstance(b, (int, long)) or b < 0:
raise ValueError("invalid argument %r" % b)
out |= 2**b
return out
bitmask = to_bitmask(value)
try:
_psutil_linux.set_process_cpu_affinity(self.pid, bitmask)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EINVAL:
allcpus = list(range(len(get_system_per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
raise ValueError("invalid CPU %i" % cpu)
raise
# only starting from kernel 2.6.13
if hasattr(_psutil_linux, "ioprio_get"):
@wrap_exceptions
def get_process_ionice(self):
ioclass, value = _psutil_linux.ioprio_get(self.pid)
return nt_ionice(ioclass, value)
@wrap_exceptions
def set_process_ionice(self, ioclass, value):
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_NONE")
ioclass = IOPRIO_CLASS_NONE
value = 0
if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
value = 4
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_IDLE")
value = 0
else:
value = 0
if not 0 <= value <= 8:
raise ValueError("value argument range expected is between 0 and 8")
return _psutil_linux.ioprio_set(self.pid, ioclass, value)
@wrap_exceptions
def process_rlimit(self, resource, limits=None):
if limits is None:
# get
return _psutil_linux.prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError("second argument must be a (soft, hard) tuple")
soft, hard = limits
_psutil_linux.prlimit(self.pid, resource, soft, hard)
@wrap_exceptions
def get_process_status(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("State:"):
letter = line.split()[1]
if letter in _status_map:
return _status_map[letter]
return constant(-1, '?')
finally:
f.close()
@wrap_exceptions
def get_open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
try:
file = os.readlink(file)
except OSError:
# ENOENT == file which is gone in the meantime
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = nt_openfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return connections opened by process as a list of namedtuples.
The kind parameter filters for connections that fit the following
criteria:
Kind Value Number of connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
all the sum of all the possible families and protocols
"""
# Note: in case of UNIX sockets we're only able to determine the
# local bound path while the remote endpoint is not retrievable:
# http://goo.gl/R3GHM
inodes = {}
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine:
# lsof does the same so it's unlikely that we can to better.
for fd in os.listdir("/proc/%s/fd" % self.pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (self.pid, fd))
except OSError:
continue
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode] = fd
if not inodes:
# no connections for this process
return []
def process(file, family, type_):
retlist = []
try:
f = open(file, 'r')
except IOError:
# IPv6 not supported on this platform
err = sys.exc_info()[1]
if err.errno == errno.ENOENT and file.endswith('6'):
return []
else:
raise
try:
f.readline() # skip the first line
for line in f:
# IPv4 / IPv6
if family in (socket.AF_INET, socket.AF_INET6):
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
if inode in inodes:
laddr = self._decode_address(laddr, family)
raddr = self._decode_address(raddr, family)
if type_ == socket.SOCK_STREAM:
status = _TCP_STATES_TABLE[status]
else:
status = CONN_NONE
fd = int(inodes[inode])
conn = nt_connection(fd, family, type_, laddr,
raddr, status)
retlist.append(conn)
elif family == socket.AF_UNIX:
tokens = line.split()
_, _, _, _, type_, _, inode = tokens[0:7]
if inode in inodes:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
fd = int(inodes[inode])
type_ = int(type_)
conn = nt_connection(fd, family, type_, path,
None, CONN_NONE)
retlist.append(conn)
else:
raise ValueError(family)
return retlist
finally:
f.close()
tcp4 = ("tcp" , socket.AF_INET , socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp" , socket.AF_INET , socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
tmap = {
"all" : (tcp4, tcp6, udp4, udp6, unix),
"tcp" : (tcp4, tcp6),
"tcp4" : (tcp4,),
"tcp6" : (tcp6,),
"udp" : (udp4, udp6),
"udp4" : (udp4,),
"udp6" : (udp6,),
"unix" : (unix,),
"inet" : (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
if kind not in tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in tmap])))
ret = []
for f, family, type_ in tmap[kind]:
ret += process("/proc/net/%s" % f, family, type_)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def get_num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def get_process_ppid(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("PPid:"):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_uids(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Uid:'):
_, real, effective, saved, fs = line.split()
return nt_uids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_gids(self):
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Gid:'):
_, real, effective, saved, fs = line.split()
return nt_gids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@staticmethod
def _decode_address(addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
if PY3:
ip = ip.encode('ascii')
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if family == socket.AF_INET:
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
#ip = ip.decode('hex')
#return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
| bsd-3-clause |
Pear0/binutils-gdb | gdb/python/lib/gdb/xmethod.py | 9 | 11077 | # Python side of the support for xmethods.
# Copyright (C) 2013-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for defining xmethods"""
import gdb
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class XMethod(object):
"""Base class (or a template) for an xmethod description.
Currently, the description requires only the 'name' and 'enabled'
attributes. Description objects are managed by 'XMethodMatcher'
objects (see below). Note that this is only a template for the
interface of the XMethodMatcher.methods objects. One could use
this class or choose to use an object which supports this exact same
interface. Also, an XMethodMatcher can choose not use it 'methods'
attribute. In such cases this class (or an equivalent) is not used.
Attributes:
name: The name of the xmethod.
enabled: A boolean indicating if the xmethod is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
class XMethodMatcher(object):
"""Abstract base class for matching an xmethod.
When looking for xmethods, GDB invokes the `match' method of a
registered xmethod matcher to match the object type and method name.
The `match' method in concrete classes derived from this class should
return an `XMethodWorker' object, or a list of `XMethodWorker'
objects if there is a match (see below for 'XMethodWorker' class).
Attributes:
name: The name of the matcher.
enabled: A boolean indicating if the matcher is enabled.
methods: A sequence of objects of type 'XMethod', or objects
which have at least the attributes of an 'XMethod' object.
This list is used by the 'enable'/'disable'/'info' commands to
enable/disable/list the xmethods registered with GDB. See
the 'match' method below to know how this sequence is used.
This attribute is None if the matcher chooses not have any
xmethods managed by it.
"""
def __init__(self, name):
"""
Args:
name: An identifying name for the xmethod or the group of
xmethods returned by the `match' method.
"""
self.name = name
self.enabled = True
self.methods = None
def match(self, class_type, method_name):
"""Match class type and method name.
In derived classes, it should return an XMethodWorker object, or a
sequence of 'XMethodWorker' objects. Only those xmethod workers
whose corresponding 'XMethod' descriptor object is enabled should be
returned.
Args:
class_type: The class type (gdb.Type object) to match.
method_name: The name (string) of the method to match.
"""
raise NotImplementedError("XMethodMatcher match")
class XMethodWorker(object):
"""Base class for all xmethod workers defined in Python.
An xmethod worker is an object which matches the method arguments, and
invokes the method when GDB wants it to. Internally, GDB first invokes the
'get_arg_types' method to perform overload resolution. If GDB selects to
invoke this Python xmethod, then it invokes it via the overridden
'__call__' method. The 'get_result_type' method is used to implement
'ptype' on the xmethod.
Derived classes should override the 'get_arg_types', 'get_result_type'
and '__call__' methods.
"""
def get_arg_types(self):
"""Return arguments types of an xmethod.
A sequence of gdb.Type objects corresponding to the arguments of the
xmethod are returned. If the xmethod takes no arguments, then 'None'
or an empty sequence is returned. If the xmethod takes only a single
argument, then a gdb.Type object or a sequence with a single gdb.Type
element is returned.
"""
raise NotImplementedError("XMethodWorker get_arg_types")
def get_result_type(self, *args):
"""Return the type of the result of the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value. These are the same arguments passed to '__call__'.
Returns:
A gdb.Type object representing the type of the result of the
xmethod.
"""
raise NotImplementedError("XMethodWorker get_result_type")
def __call__(self, *args):
"""Invoke the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value.
Returns:
A gdb.Value corresponding to the value returned by the xmethod.
Returns 'None' if the method does not return anything.
"""
raise NotImplementedError("XMethodWorker __call__")
class SimpleXMethodMatcher(XMethodMatcher):
"""A utility class to implement simple xmethod mathers and workers.
See the __init__ method below for information on how instances of this
class can be used.
For simple classes and methods, one can choose to use this class. For
complex xmethods, which need to replace/implement template methods on
possibly template classes, one should implement their own xmethod
matchers and workers. See py-xmethods.py in testsuite/gdb.python
directory of the GDB source tree for examples.
"""
class SimpleXMethodWorker(XMethodWorker):
def __init__(self, method_function, arg_types):
self._arg_types = arg_types
self._method_function = method_function
def get_arg_types(self):
return self._arg_types
def __call__(self, *args):
return self._method_function(*args)
def __init__(self, name, class_matcher, method_matcher, method_function,
*arg_types):
"""
Args:
name: Name of the xmethod matcher.
class_matcher: A regular expression used to match the name of the
class whose method this xmethod is implementing/replacing.
method_matcher: A regular expression used to match the name of the
method this xmethod is implementing/replacing.
method_function: A Python callable which would be called via the
'invoke' method of the worker returned by the objects of this
class. This callable should accept the object (*this) as the
first argument followed by the rest of the arguments to the
method. All arguments to this function should be gdb.Value
objects.
arg_types: The gdb.Type objects corresponding to the arguments that
this xmethod takes. It can be None, or an empty sequence,
or a single gdb.Type object, or a sequence of gdb.Type objects.
"""
XMethodMatcher.__init__(self, name)
assert callable(method_function), (
"The 'method_function' argument to 'SimpleXMethodMatcher' "
"__init__ method should be a callable.")
self._method_function = method_function
self._class_matcher = class_matcher
self._method_matcher = method_matcher
self._arg_types = arg_types
def match(self, class_type, method_name):
cm = re.match(self._class_matcher, str(class_type.unqualified().tag))
mm = re.match(self._method_matcher, method_name)
if cm and mm:
return SimpleXMethodMatcher.SimpleXMethodWorker(
self._method_function, self._arg_types)
# A helper function for register_xmethod_matcher which returns an error
# object if MATCHER is not having the requisite attributes in the proper
# format.
def _validate_xmethod_matcher(matcher):
if not hasattr(matcher, "match"):
return TypeError("Xmethod matcher is missing method: match")
if not hasattr(matcher, "name"):
return TypeError("Xmethod matcher is missing attribute: name")
if not hasattr(matcher, "enabled"):
return TypeError("Xmethod matcher is missing attribute: enabled")
if not isinstance(matcher.name, basestring):
return TypeError("Attribute 'name' of xmethod matcher is not a "
"string")
if matcher.name.find(";") >= 0:
return ValueError("Xmethod matcher name cannot contain ';' in it")
# A helper function for register_xmethod_matcher which looks up an
# xmethod matcher with NAME in LOCUS. Returns the index of the xmethod
# matcher in 'xmethods' sequence attribute of the LOCUS. If NAME is not
# found in LOCUS, then -1 is returned.
def _lookup_xmethod_matcher(locus, name):
for i in range(0, len(locus.xmethods)):
if locus.xmethods[i].name == name:
return i
return -1
def register_xmethod_matcher(locus, matcher, replace=False):
"""Registers a xmethod matcher MATCHER with a LOCUS.
Arguments:
locus: The locus in which the xmethods should be registered.
It can be 'None' to indicate that the xmethods should be
registered globally. Or, it could be a gdb.Objfile or a
gdb.Progspace object in which the xmethods should be
registered.
matcher: The xmethod matcher to register with the LOCUS. It
should be an instance of 'XMethodMatcher' class.
replace: If True, replace any existing xmethod matcher with the
same name in the locus. Otherwise, if a matcher with the same name
exists in the locus, raise an exception.
"""
err = _validate_xmethod_matcher(matcher)
if err:
raise err
if not locus:
locus = gdb
if locus == gdb:
locus_name = "global"
else:
locus_name = locus.filename
index = _lookup_xmethod_matcher(locus, matcher.name)
if index >= 0:
if replace:
del locus.xmethods[index]
else:
raise RuntimeError("Xmethod matcher already registered with "
"%s: %s" % (locus_name, matcher.name))
if gdb.parameter("verbose"):
gdb.write("Registering xmethod matcher '%s' with %s' ...\n")
locus.xmethods.insert(0, matcher)
| gpl-2.0 |
NMGRL/pychron | pychron/entry/tasks/importer_panes.py | 3 | 4782 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
# from traitsui.tabular_adapter import TabularAdapter
#
#
# class ImportNameAdapter(TabularAdapter):
# columns = [('Name', 'name')]
#
#
# class ImportedNameAdapter(TabularAdapter):
# columns = [('Name', 'name'), ('Skipped', 'skipped')]
# skipped_text = Property
#
# def _get_skipped_text(self):
# return 'Yes' if self.item.skipped else ''
#
# def get_bg_color(self, obj, trait, row, column=0):
# color = 'white'
# if self.item.skipped:
# color = 'red'
# return color
# class ImporterPane(TraitsDockPane):
# name = 'Importer'
# id = 'pychron.labnumber.extractor'
#
# def traits_view(self):
# v = View(
# VGroup(
# HGroup(
# HGroup(Item('include_analyses', label='Analyses'),
# Item('include_blanks', label='Blanks'),
# Item('include_airs', label='Airs'),
# Item('include_cocktails', label='Cocktails'),
# label='Include',
# show_border=True,
# ),
# VGroup(
# HGroup(spring,
# UItem('import_button'),
# #Item('dry_run')
# ),
# label='Import',
# show_border=True
# )
# ),
# VGroup(
# HGroup(spring, Item('data_source')),
# # VFold(
# VGroup(
# VGroup(
# Item('object.extractor.dbconn_spec', style='custom', show_label=False),
# HGroup(spring, Item('object.extractor.connect_button', show_label=False)),
# label='Source'
# ),
# VGroup(
#
# HGroup(Item('import_kind', show_label=False),
# UItem('open_button', visible_when='import_kind=="rid_list"'),
# ),
# UItem('text_selected'),
# HGroup(
# Item('names', show_label=False, editor=TabularEditor(adapter=ImportNameAdapter(),
# editable=False,
# selected='selected',
# multi_select=True,
# scroll_to_row='scroll_to_row'
# )),
# # CustomLabel('custom_label1',
# # color='blue',
# # size=10),
# Item('imported_names', show_label=False,
# editor=TabularEditor(adapter=ImportedNameAdapter(),
# editable=False,
# ))
# ),
# # HGroup(spring, Item('import_button', show_label=False)),
# label='Results'
# )
# )
# )
# )
# )
# return v
# ============= EOF =============================================
| apache-2.0 |
sarutobi/flowofkindness | rynda/core/mixins.py | 3 | 2565 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.edit import FormMixin, ProcessFormView
class PaginatorMixin(object):
""" Paginator line mixin. Best use with list-based mixins """
def paginator(self, num_pages, page=1, adj_pages=2, outside_range=3):
page = int(page)
num_pages = int(num_pages)
if page > num_pages:
page = num_pages
if page < 1:
page = 1
has_prev = has_next = False
if num_pages > 1:
if page > 1:
has_prev = True
if page < num_pages:
has_next = True
# Counts minimal pages to work
pager_size = 2 * (outside_range + adj_pages) + 1
# If pager_size greater than total pages - pager wil show all pages in
# range
if pager_size >= num_pages:
return {
'first': [], 'window': [n for n in range(1, num_pages + 1)],
'last': [], 'has_prev': has_prev, 'has_next': has_next}
# Checking page windows
# Current page in first (low) window
if (outside_range + adj_pages + 1) >= page:
first = []
window = [n for n in range(
1, outside_range + 2 + 2 * adj_pages)
if n > 0 and n < num_pages]
last = [n for n in range(num_pages - outside_range, num_pages+1)]
# Current page in middle window
elif (num_pages - outside_range - adj_pages - 1) < page:
first = [n for n in range(1, outside_range + 1)]
window = [n for n in range(
num_pages - outside_range - 2 * adj_pages + 1, num_pages + 1)]
last = []
# Current page in last (high) window
else:
first = [n for n in range(1, outside_range + 1)]
last = [n for n in range(
num_pages - outside_range + 1, num_pages+1)]
window = [n for n in range(
page - adj_pages, page + adj_pages + 1)
if n < num_pages]
return {
'first': first, 'window': window, 'last': last,
'has_prev': has_prev, 'has_next': has_next}
class QueryStringMixin(object):
""" This mixin adds a query string to context. """
def get_context_data(self, **kwargs):
context = super(QueryStringMixin, self).get_context_data(**kwargs)
context['query_string'] = u'?%s' % self.request.META['QUERY_STRING']
return context
| mit |
Rdbaker/Mealbound | ceraon/utils.py | 1 | 4801 | # -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from datetime import timedelta as td
from datetime import tzinfo
from threading import Thread
import requests
from flask import Blueprint, current_app, flash, request
def get_fb_access_token():
"""Get an access token from facebook for graph API calls."""
base_url = 'https://graph.facebook.com/oauth/access_token?' \
'grant_type=client_credentials'
res = requests.get(
base_url + '&client_id={}'.format(current_app.config['FB_APP_ID']) +
'&client_secret={}'.format(current_app.config['FB_APP_SECRET']))
return res.json().get('access_token')
def friendly_arg_get(key, default=None, type_cast=None):
"""Same as request.args.get but returns default on ValueError."""
try:
return request.args.get(key, default=default, type=type_cast)
except:
return default
class FlaskThread(Thread):
"""A utility class for threading in a flask app."""
def __init__(self, *args, **kwargs):
"""Create a new thread with a flask context."""
super().__init__(*args, **kwargs)
self.app = current_app._get_current_object()
def run(self):
"""Run the thread."""
# Make this an effective no-op if we're testing.
if not self.app.config['TESTING']:
with self.app.app_context():
super().run()
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error),
category)
class RESTBlueprint(Blueprint):
"""A base class for a RESTful API's view blueprint.
This comes with helper methods that set up routes based on method/actions.
It infers the route_prefix based on the version and blueprint name in the
format: `/api/<version string>/<blueprint name string>`
then creates routes from that.
Example usage:
mod = RESTBlueprint('users', __name__, 'v2')
# route is: GET /api/v2/users/<uid>
@mod.find()
def find_user(uid):
return User.get(uid)
# route is: PATCH /api/v2/users/<uid>
@mod.update()
def update_user(uid):
return User.update(name='new name')
# route is: POST /api/v2/users
@mod.create()
def create_user():
return User.create(name='my new user')
The `find`, `update`, `replace`, and `destroy` methods will add a
parameter called `uid` to your route. Make sure to correctly resolve that
to your entity's ID.
"""
def __init__(self, blueprint_name, name, version):
return super(RESTBlueprint, self).__init__(
'api.{}.{}'.format(version, blueprint_name),
name, url_prefix='/api/{}/{}'.format(version, blueprint_name))
def flexible_route(self, *args, **kwargs):
kwargs.update({'strict_slashes': False})
return self.route(*args, **kwargs)
def create(self, *args, **kwargs):
kwargs.update({'methods': ['POST']})
return self.flexible_route('/', *args, **kwargs)
def list(self, *args, **kwargs):
kwargs.update({'methods': ['GET']})
return self.flexible_route('/', *args, **kwargs)
def find(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['GET']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def update(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['PATCH']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def replace(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['PUT']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
def destroy(self, converter='string', *args, **kwargs):
kwargs.update({'methods': ['DELETE']})
return self.flexible_route('/<{}:uid>'.format(converter), *args,
**kwargs)
class UTC(tzinfo):
"""tzinfo for a UTC timezone."""
def dst(self, dt_obj):
"""Return the DST offset in minutes from UTC."""
return 0
def fromutc(self, dt_obj):
"""Return a datetime object in local time from a UTC datetime."""
return dt_obj
def tzname(self, dt_obj):
"""Return the name of the timezone from a datetime obj."""
return 'UTC/GMT'
def utcoffset(self, dt_obj):
"""Return a timedelta showing offset from UTC.
Negative values indicating West of UTC
"""
return td()
| bsd-3-clause |
vFense/vFenseAgent-nix | agent/deps/mac/Python-2.7.5/lib/python2.7/test/profilee.py | 398 | 3041 | """
Input for test_profile.py and test_cprofile.py.
IMPORTANT: This stuff is touchy. If you modify anything above the
test class you'll have to regenerate the stats by running the two
test files.
*ALL* NUMBERS in the expected output are relevant. If you change
the formatting of pstats, please don't just regenerate the expected
output without checking very carefully that not a single number has
changed.
"""
import sys
# In order to have reproducible time, we simulate a timer in the global
# variable 'TICKS', which represents simulated time in milliseconds.
# (We can't use a helper function increment the timer since it would be
# included in the profile and would appear to consume all the time.)
TICKS = 42000
def timer():
return TICKS
def testfunc():
# 1 call
# 1000 ticks total: 270 ticks local, 730 ticks in subfunctions
global TICKS
TICKS += 99
helper() # 300
helper() # 300
TICKS += 171
factorial(14) # 130
def factorial(n):
# 23 calls total
# 170 ticks total, 150 ticks local
# 3 primitive calls, 130, 20 and 20 ticks total
# including 116, 17, 17 ticks local
global TICKS
if n > 0:
TICKS += n
return mul(n, factorial(n-1))
else:
TICKS += 11
return 1
def mul(a, b):
# 20 calls
# 1 tick, local
global TICKS
TICKS += 1
return a * b
def helper():
# 2 calls
# 300 ticks total: 20 ticks local, 260 ticks in subfunctions
global TICKS
TICKS += 1
helper1() # 30
TICKS += 2
helper1() # 30
TICKS += 6
helper2() # 50
TICKS += 3
helper2() # 50
TICKS += 2
helper2() # 50
TICKS += 5
helper2_indirect() # 70
TICKS += 1
def helper1():
# 4 calls
# 30 ticks total: 29 ticks local, 1 tick in subfunctions
global TICKS
TICKS += 10
hasattr(C(), "foo") # 1
TICKS += 19
lst = []
lst.append(42) # 0
sys.exc_info() # 0
def helper2_indirect():
helper2() # 50
factorial(3) # 20
def helper2():
# 8 calls
# 50 ticks local: 39 ticks local, 11 ticks in subfunctions
global TICKS
TICKS += 11
hasattr(C(), "bar") # 1
TICKS += 13
subhelper() # 10
TICKS += 15
def subhelper():
# 8 calls
# 10 ticks total: 8 ticks local, 2 ticks in subfunctions
global TICKS
TICKS += 2
for i in range(2): # 0
try:
C().foo # 1 x 2
except AttributeError:
TICKS += 3 # 3 x 2
class C:
def __getattr__(self, name):
# 28 calls
# 1 tick, local
global TICKS
TICKS += 1
raise AttributeError
| lgpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/tests/modeltests/aggregation/models.py | 139 | 1181 | # coding: utf-8
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __unicode__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __unicode__(self):
return self.name
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher)
pubdate = models.DateField()
def __unicode__(self):
return self.name
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __unicode__(self):
return self.name
| apache-2.0 |
ActiveState/code | recipes/Python/577336_Fast_reentrant_optimistic_lock_implemented/recipe-577336.py | 1 | 4351 | from cpython cimport pythread
from cpython.exc cimport PyErr_NoMemory
cdef class FastRLock:
"""Fast, re-entrant locking.
Under uncongested conditions, the lock is never acquired but only
counted. Only when a second thread comes in and notices that the
lock is needed, it acquires the lock and notifies the first thread
to release it when it's done. This is all made possible by the
wonderful GIL.
"""
cdef pythread.PyThread_type_lock _real_lock
cdef long _owner # ID of thread owning the lock
cdef int _count # re-entry count
cdef int _pending_requests # number of pending requests for real lock
cdef bint _is_locked # whether the real lock is acquired
def __cinit__(self):
self._owner = -1
self._count = 0
self._is_locked = False
self._pending_requests = 0
self._real_lock = pythread.PyThread_allocate_lock()
if self._real_lock is NULL:
PyErr_NoMemory()
def __dealloc__(self):
if self._real_lock is not NULL:
pythread.PyThread_free_lock(self._real_lock)
self._real_lock = NULL
def acquire(self, bint blocking=True):
return lock_lock(self, pythread.PyThread_get_thread_ident(), blocking)
def release(self):
if self._owner != pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
# compatibility with threading.RLock
def __enter__(self):
# self.acquire()
return lock_lock(self, pythread.PyThread_get_thread_ident(), True)
def __exit__(self, t, v, tb):
# self.release()
if self._owner != pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
def _is_owned(self):
return self._owner == pythread.PyThread_get_thread_ident()
cdef inline bint lock_lock(FastRLock lock, long current_thread, bint blocking) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if lock._count:
# locked! - by myself?
if current_thread == lock._owner:
lock._count += 1
return 1
elif not lock._pending_requests:
# not locked, not requested - go!
lock._owner = current_thread
lock._count = 1
return 1
# need to get the real lock
return _acquire_lock(
lock, current_thread,
pythread.WAIT_LOCK if blocking else pythread.NOWAIT_LOCK)
cdef bint _acquire_lock(FastRLock lock, long current_thread, int wait) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if not lock._is_locked and not lock._pending_requests:
# someone owns it but didn't acquire the real lock - do that
# now and tell the owner to release it when done. Note that we
# do not release the GIL here as we must absolutely be the one
# who acquires the lock now.
if not pythread.PyThread_acquire_lock(lock._real_lock, wait):
return 0
#assert not lock._is_locked
lock._is_locked = True
lock._pending_requests += 1
with nogil:
# wait for the lock owning thread to release it
locked = pythread.PyThread_acquire_lock(lock._real_lock, wait)
lock._pending_requests -= 1
#assert not lock._is_locked
#assert lock._count == 0
if not locked:
return 0
lock._is_locked = True
lock._owner = current_thread
lock._count = 1
return 1
cdef inline void unlock_lock(FastRLock lock) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
#assert lock._owner == pythread.PyThread_get_thread_ident()
#assert lock._count > 0
lock._count -= 1
if lock._count == 0:
lock._owner = -1
if lock._is_locked:
pythread.PyThread_release_lock(lock._real_lock)
lock._is_locked = False
| mit |
duyetdev/openerp-6.1.1 | openerp/addons/marketing/__openerp__.py | 9 | 1744 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Marketing",
"version" : "1.1",
"depends" : ["base", "base_setup"],
"author" : "OpenERP SA",
"category": 'Hidden/Dependency',
'complexity': "expert",
"description": """
Menu for Marketing.
===================
Contains the installer for marketing-related modules.
""",
'website': 'http://www.openerp.com',
'init_xml': [],
'update_xml': [
'security/marketing_security.xml',
'security/ir.model.access.csv',
'marketing_view.xml'
],
'demo_xml': ['marketing_demo.xml'],
'installable': True,
'auto_install': False,
'certificate' : '00598574977629228189',
'images': ['images/config_marketing.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ntuecon/server | pyenv/Lib/site-packages/pbr/tests/test_setup.py | 10 | 19413 | # Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import tempfile
import testscenarios
try:
import cStringIO as io
BytesIO = io.StringIO
except ImportError:
import io
BytesIO = io.BytesIO
import fixtures
from pbr import git
from pbr import options
from pbr import packaging
from pbr.tests import base
class SkipFileWrites(base.BaseTestCase):
scenarios = [
('changelog_option_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_option_false',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_env_true',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_both_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('authors_option_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_option_false',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_env_true',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_both_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
]
def setUp(self):
super(SkipFileWrites, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
if not os.path.exists(self.git_dir):
self.skipTest("%s is missing; skipping git-related checks"
% self.git_dir)
return
self.filename = os.path.join(self.temp_path, self.filename)
self.option_dict = dict()
if self.option_key is not None:
self.option_dict[self.option_key] = ('setup.cfg',
self.option_value)
self.useFixture(
fixtures.EnvironmentVariable(self.env_key, self.env_value))
def test_skip(self):
self.pkg_func(git_dir=self.git_dir,
dest_dir=self.temp_path,
option_dict=self.option_dict)
self.assertEqual(
not os.path.exists(self.filename),
(self.option_value.lower() in options.TRUE_VALUES
or self.env_value is not None))
_changelog_content = """7780758\x00Break parser\x00 (tag: refs/tags/1_foo.1)
04316fe\x00Make python\x00 (refs/heads/review/monty_taylor/27519)
378261a\x00Add an integration test script.\x00
3c373ac\x00Merge "Lib\x00 (HEAD, tag: refs/tags/2013.2.rc2, tag: refs/tags/2013.2, refs/heads/mile-proposed)
182feb3\x00Fix pip invocation for old versions of pip.\x00 (tag: refs/tags/0.5.17)
fa4f46e\x00Remove explicit depend on distribute.\x00 (tag: refs/tags/0.5.16)
d1c53dd\x00Use pip instead of easy_install for installation.\x00
a793ea1\x00Merge "Skip git-checkout related tests when .git is missing"\x00
6c27ce7\x00Skip git-checkout related tests when .git is missing\x00
451e513\x00Bug fix: create_stack() fails when waiting\x00
4c8cfe4\x00Improve test coverage: network delete API\x00 (tag: refs/tags/(evil))
d7e6167\x00Bug fix: Fix pass thru filtering in list_networks\x00 (tag: refs/tags/ev()il)
c47ec15\x00Consider 'in-use' a non-pending volume for caching\x00 (tag: refs/tags/ev)il)
8696fbd\x00Improve test coverage: private extension API\x00 (tag: refs/tags/ev(il)
f0440f8\x00Improve test coverage: hypervisor list\x00 (tag: refs/tags/e(vi)l)
04984a5\x00Refactor hooks file.\x00 (HEAD, tag: 0.6.7,b, tag: refs/tags/(12), refs/heads/master)
a65e8ee\x00Remove jinja pin.\x00 (tag: refs/tags/0.5.14, tag: refs/tags/0.5.13)
""" # noqa
def _make_old_git_changelog_format(line):
"""Convert post-1.8.1 git log format to pre-1.8.1 git log format"""
if not line.strip():
return line
sha, msg, refname = line.split('\x00')
refname = refname.replace('tag: ', '')
return '\x00'.join((sha, msg, refname))
_old_git_changelog_content = '\n'.join(
_make_old_git_changelog_format(line)
for line in _changelog_content.split('\n'))
class GitLogsTest(base.BaseTestCase):
scenarios = [
('pre1.8.3', {'changelog': _old_git_changelog_content}),
('post1.8.3', {'changelog': _changelog_content}),
]
def setUp(self):
super(GitLogsTest, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
self.useFixture(
fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS'))
self.useFixture(
fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG'))
def test_write_git_changelog(self):
self.useFixture(fixtures.FakePopen(lambda _: {
"stdout": BytesIO(self.changelog.encode('utf-8'))
}))
git.write_git_changelog(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh:
changelog_contents = ch_fh.read()
self.assertIn("2013.2", changelog_contents)
self.assertIn("0.5.17", changelog_contents)
self.assertIn("------", changelog_contents)
self.assertIn("Refactor hooks file", changelog_contents)
self.assertIn(
"Bug fix: create_stack() fails when waiting",
changelog_contents)
self.assertNotIn("Refactor hooks file.", changelog_contents)
self.assertNotIn("182feb3", changelog_contents)
self.assertNotIn("review/monty_taylor/27519", changelog_contents)
self.assertNotIn("0.5.13", changelog_contents)
self.assertNotIn("0.6.7", changelog_contents)
self.assertNotIn("12", changelog_contents)
self.assertNotIn("(evil)", changelog_contents)
self.assertNotIn("ev()il", changelog_contents)
self.assertNotIn("ev(il", changelog_contents)
self.assertNotIn("ev)il", changelog_contents)
self.assertNotIn("e(vi)l", changelog_contents)
self.assertNotIn('Merge "', changelog_contents)
self.assertNotIn('1_foo.1', changelog_contents)
def test_generate_authors(self):
author_old = u"Foo Foo <email@foo.com>"
author_new = u"Bar Bar <email@bar.com>"
co_author = u"Foo Bar <foo@bar.com>"
co_author_by = u"Co-authored-by: " + co_author
git_log_cmd = (
"git --git-dir=%s log --format=%%aN <%%aE>"
% self.git_dir)
git_co_log_cmd = ("git --git-dir=%s log" % self.git_dir)
git_top_level = "git rev-parse --show-toplevel"
cmd_map = {
git_log_cmd: author_new,
git_co_log_cmd: co_author_by,
git_top_level: self.root_dir,
}
exist_files = [self.git_dir,
os.path.join(self.temp_path, "AUTHORS.in")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
def _fake_run_shell_command(cmd, **kwargs):
return cmd_map[" ".join(cmd)]
self.useFixture(fixtures.MonkeyPatch(
"pbr.git._run_shell_command",
_fake_run_shell_command))
with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh:
auth_fh.write("%s\n" % author_old)
git.generate_authors(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh:
authors = auth_fh.read()
self.assertTrue(author_old in authors)
self.assertTrue(author_new in authors)
self.assertTrue(co_author in authors)
class BaseSphinxTest(base.BaseTestCase):
def setUp(self):
super(BaseSphinxTest, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"sphinx.application.Sphinx.__init__", lambda *a, **kw: None))
self.useFixture(fixtures.MonkeyPatch(
"sphinx.application.Sphinx.build", lambda *a, **kw: None))
self.useFixture(fixtures.MonkeyPatch(
"sphinx.config.Config.man_pages", ['foo']))
self.useFixture(fixtures.MonkeyPatch(
"sphinx.config.Config.init_values", lambda *a: None))
self.useFixture(fixtures.MonkeyPatch(
"sphinx.config.Config.__init__", lambda *a: None))
from distutils import dist
self.distr = dist.Distribution()
self.distr.packages = ("fake_package",)
self.distr.command_options["build_sphinx"] = {
"source_dir": ["a", "."]}
pkg_fixture = fixtures.PythonPackage(
"fake_package", [("fake_module.py", b""),
("another_fake_module_for_testing.py", b""),
("fake_private_module.py", b"")])
self.useFixture(pkg_fixture)
self.useFixture(base.DiveDir(pkg_fixture.base))
self.distr.command_options["pbr"] = {}
if hasattr(self, "excludes"):
self.distr.command_options["pbr"]["autodoc_exclude_modules"] = (
'setup.cfg',
"fake_package.fake_private_module\n"
"fake_package.another_fake_*\n"
"fake_package.unknown_module")
if hasattr(self, 'has_opt') and self.has_opt:
options = self.distr.command_options["pbr"]
options["autodoc_index_modules"] = ('setup.cfg', self.autodoc)
class BuildSphinxTest(BaseSphinxTest):
scenarios = [
('true_autodoc_caps',
dict(has_opt=True, autodoc='True', has_autodoc=True)),
('true_autodoc_caps_with_excludes',
dict(has_opt=True, autodoc='True', has_autodoc=True,
excludes="fake_package.fake_private_module\n"
"fake_package.another_fake_*\n"
"fake_package.unknown_module")),
('true_autodoc_lower',
dict(has_opt=True, autodoc='true', has_autodoc=True)),
('false_autodoc',
dict(has_opt=True, autodoc='False', has_autodoc=False)),
('no_autodoc',
dict(has_opt=False, autodoc='False', has_autodoc=False)),
]
def test_build_doc(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.run()
self.assertTrue(
os.path.exists("api/autoindex.rst") == self.has_autodoc)
self.assertTrue(
os.path.exists(
"api/fake_package.fake_module.rst") == self.has_autodoc)
if not self.has_autodoc or hasattr(self, "excludes"):
assertion = self.assertFalse
else:
assertion = self.assertTrue
assertion(
os.path.exists(
"api/fake_package.fake_private_module.rst"))
assertion(
os.path.exists(
"api/fake_package.another_fake_module_for_testing.rst"))
def test_builders_config(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(2, len(build_doc.builders))
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = ''
build_doc.finalize_options()
self.assertEqual('', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'man'
build_doc.finalize_options()
self.assertEqual(1, len(build_doc.builders))
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'html,man,doctest'
build_doc.finalize_options()
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
self.assertIn('doctest', build_doc.builders)
def test_cmd_builder_override(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "non-existing-builder")
build_doc = packaging.LocalBuildDoc(self.distr)
self.assertNotIn('non-existing-builder', build_doc.builders)
self.assertIn('html', build_doc.builders)
# process command line options which should override config
build_doc.finalize_options()
self.assertIn('non-existing-builder', build_doc.builders)
self.assertNotIn('html', build_doc.builders)
def test_cmd_builder_override_multiple_builders(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "builder1,builder2")
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(["builder1", "builder2"], build_doc.builders)
class ParseRequirementsTestScenarios(base.BaseTestCase):
versioned_scenarios = [
('non-versioned', {'versioned': False, 'expected': ['bar']}),
('versioned', {'versioned': True, 'expected': ['bar>=1.2.3']})
]
scenarios = [
('normal', {'url': "foo\nbar", 'expected': ['foo', 'bar']}),
('normal_with_comments', {
'url': "# this is a comment\nfoo\n# and another one\nbar",
'expected': ['foo', 'bar']}),
('removes_index_lines', {'url': '-f foobar', 'expected': []}),
]
scenarios = scenarios + testscenarios.multiply_scenarios([
('ssh_egg_url', {'url': 'git+ssh://foo.com/zipball#egg=bar'}),
('git_https_egg_url', {'url': 'git+https://foo.com/zipball#egg=bar'}),
('http_egg_url', {'url': 'https://foo.com/zipball#egg=bar'}),
], versioned_scenarios)
scenarios = scenarios + testscenarios.multiply_scenarios(
[
('git_egg_url',
{'url': 'git://foo.com/zipball#egg=bar', 'name': 'bar'})
], [
('non-editable', {'editable': False}),
('editable', {'editable': True}),
],
versioned_scenarios)
def test_parse_requirements(self):
tmp_file = tempfile.NamedTemporaryFile()
req_string = self.url
if hasattr(self, 'editable') and self.editable:
req_string = ("-e %s" % req_string)
if hasattr(self, 'versioned') and self.versioned:
req_string = ("%s-1.2.3" % req_string)
with open(tmp_file.name, 'w') as fh:
fh.write(req_string)
self.assertEqual(self.expected,
packaging.parse_requirements([tmp_file.name]))
class ParseRequirementsTest(base.BaseTestCase):
def setUp(self):
super(ParseRequirementsTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack',
suffix='.setup')
def test_parse_requirements_override_with_env(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_parse_requirements_override_with_env_multiple_files(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
"no-such-file," + self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_get_requirement_from_file_empty(self):
actual = packaging.get_reqs_from_files([])
self.assertEqual([], actual)
def test_parse_requirements_python_version(self):
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
def test_parse_requirements_right_python_version(self):
with open("requirements-py1.txt", "w") as fh:
fh.write("thisisatrap")
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
class ParseDependencyLinksTest(base.BaseTestCase):
def setUp(self):
super(ParseDependencyLinksTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack",
suffix=".setup")
def test_parse_dependency_normal(self):
with open(self.tmp_file, "w") as fh:
fh.write("http://test.com\n")
self.assertEqual(
["http://test.com"],
packaging.parse_dependency_links([self.tmp_file]))
def test_parse_dependency_with_git_egg_url(self):
with open(self.tmp_file, "w") as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(
["git://foo.com/zipball#egg=bar"],
packaging.parse_dependency_links([self.tmp_file]))
| bsd-3-clause |
Melecio/face-detection | neural_network.py | 1 | 4860 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
from os import listdir
from os.path import isfile, join
from random import shuffle
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer, TanhLayer
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
from PIL import Image, ImageOps, ImageDraw, ImageFilter
import itertools
# Module image in 'image.py'
from image import img_features_vectors, img_features
def open_img(path):
return Image.open(path).convert('L')
def process(img):
return ImageOps.equalize(img)
"""Given list of images, create the training data set"""
def train_data_set(files):
# Because PyBrain may take the first 25% for testing
shuffle(files)
data_set = ClassificationDataSet(400, 1, nb_classes=2)
number = 0
for path, target in files:
if number % 100 == 0:
print number,
sys.stdout.flush()
number += 1
img = open_img(path)
vector = img_features(img)
img.close()
data_set.addSample(vector, target)
return data_set
"""Given list of images, test the network with the backpropagation algorithm"""
def test_network(net, images):
for img in images:
new_img = img.convert('RGB')
draw = ImageDraw.Draw(new_img)
for vector, box, window in img_features_vectors(img):
nof, yesf = net.activate(vector)
if yesf > nof:
print "found a face"
window.show()
draw.rectangle(box, outline=0xff0000)
new_img.show()
"""Opens the images of the data set"""
def open_imgs(files):
for path in files:
yield processs(open_img(path))
"""Given a directory, opens it and gets the files"""
def get_files(directory):
files = listdir(directory)
paths = map(lambda f: join(directory,f), files)
return [ p for p in paths if isfile(p) ]
"""Parsing of the command-line input"""
def read():
parser = argparse.ArgumentParser(description='Face detection using Neural Networks')
parser.add_argument('-t', '--train-faces', help='Receives a directory with files to train with', nargs='+')
parser.add_argument('-f', '--train-non-faces', help='Receives a directory with files to train with', nargs='+')
parser.add_argument('-p', '--test', help='Receives a list of images (testing set)', nargs='+')
parser.add_argument('-r', '--read', help='Read the file with the already trained network object', nargs=1)
parser.add_argument('-w', '--write', help='Write the network to the specified file (format is .xml)', nargs=1)
args = parser.parse_args()
# Read the Neural Network Object
if args.read:
net = NetworkReader.readFrom(args.read[0])
else:
net = buildNetwork(400, 5, 2, bias=True, outclass=SoftmaxLayer)
# net = buildNetwork(400, 80, 16, 1, bias=True, hiddenclass=TanhLayer)
# If there are some files to train with
if (args.train_faces or args.train_non_faces):
if args.train_faces:
faces = get_files(args.train_faces[0])
else:
faces = []
if args.train_non_faces:
non_faces = get_files(args.train_non_faces[0])
else:
non_faces = []
# Expected targets
faces = map(lambda path: (path, [1]), faces)
non_faces = map(lambda path: (path, [0]), non_faces)
training_files = faces + non_faces
else:
training_files = None
# If there are some files to test with
if args.test:
testing_imgs = open_imgs(args.test)
else:
testing_imgs = None
# If there is a writing file
if args.write:
write_file = args.write[0]
else:
write_file = None
return net, training_files, testing_imgs, write_file
"""Main function"""
def main():
net, training_files, testing_imgs, write_file = read()
if training_files:
print "creating training data set"
training_set = train_data_set(training_files)
training_set._convertToOneOfMany() # I don't know why this line is needed
print "training"
# print net
# print training_set, len(training_set)
# print training_set.calculateStatistics()
training_set.saveToFile('train.set')
trainer = BackpropTrainer(net, training_set, learningrate=0.05, verbose=True)
trainer.trainUntilConvergence(maxEpochs=100)
if testing_imgs:
print "testing"
test_network(net, testing_imgs)
if write_file:
NetworkWriter.writeToFile(net, write_file)
if __name__ == "__main__":
main()
| bsd-3-clause |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/test/test_copyreg.py | 173 | 4217 | import copyreg
import unittest
from test import support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copyreg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copyreg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copyreg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func, code)
copyreg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copyreg._extension_registry[mod, func] == code)
self.assertTrue(copyreg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copyreg._extension_cache)
# Redundant registration should be OK.
copyreg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copyreg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copyreg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copyreg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copyreg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copyreg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copyreg._inverted_registry:
self.assertRaises(ValueError, copyreg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copyreg._extension_registry)
# The code *may* be in copyreg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copyreg.add_extension(mod, func, code)
copyreg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000:
self.assertRaises(ValueError, copyreg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copyreg._slotnames(WithoutSlots), [])
self.assertEqual(copyreg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copyreg._slotnames(WithPrivate), expected)
self.assertEqual(copyreg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copyreg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
manjunaths/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_udvh_update_test.py | 5 | 9004 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
class BaseLinearOperatorUDVHUpdatetest(object):
"""Base test for this type of operator."""
# Subclasses should set these attributes to either True or False.
# If True, A = L + UDV^H
# If False, A = L + UV^H or A = L + UU^H, depending on _use_v.
_use_diag_perturbation = None
# If True, diag is > 0, which means D is symmetric positive definite.
_is_diag_positive = None
# If True, A = L + UDV^H
# If False, A = L + UDU^H or A = L + UU^H, depending on _use_diag_perturbation
_use_v = None
@property
def _dtypes_to_test(self):
# TODO(langmore) Test complex types once cholesky works with them.
# See comment in LinearOperatorUDVHUpdate.__init__.
return [dtypes.float32, dtypes.float64]
@property
def _shapes_to_test(self):
# Add the (2, 10, 10) shape at the end to get something slightly larger than
# the other tests. Doing this because this operator makes use of inversion
# and determinant lemmas that are known to have stability issues.
return [(0, 0), (1, 1), (1, 3, 3), (3, 4, 4), (2, 1, 4, 4), (2, 10, 10)]
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
# Recall A = L + UDV^H
shape = list(shape)
diag_shape = shape[:-1]
k = shape[-2] // 2 + 1
u_perturbation_shape = shape[:-1] + [k]
diag_perturbation_shape = shape[:-2] + [k]
# base_operator L will be a symmetric positive definite diagonal linear
# operator, with condition number as high as 1e4.
base_diag = linear_operator_test_util.random_uniform(
diag_shape, minval=1e-4, maxval=1., dtype=dtype)
base_diag_ph = array_ops.placeholder(dtype=dtype)
# U
u = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
u_ph = array_ops.placeholder(dtype=dtype)
# V
v = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
v_ph = array_ops.placeholder(dtype=dtype)
# D
if self._is_diag_positive:
diag_perturbation = linear_operator_test_util.random_uniform(
diag_perturbation_shape, minval=1e-4, maxval=1., dtype=dtype)
else:
diag_perturbation = linear_operator_test_util.random_normal(
diag_perturbation_shape, stddev=1e-4, dtype=dtype)
diag_perturbation_ph = array_ops.placeholder(dtype=dtype)
if use_placeholder:
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
base_diag = base_diag.eval()
u = u.eval()
v = v.eval()
diag_perturbation = diag_perturbation.eval()
# In all cases, set base_operator to be positive definite.
base_operator = linalg.LinearOperatorDiag(
base_diag_ph, is_positive_definite=True)
operator = linalg.LinearOperatorUDVHUpdate(
base_operator,
u=u_ph,
v=v_ph if self._use_v else None,
diag=diag_perturbation_ph if self._use_diag_perturbation else None,
is_diag_positive=self._is_diag_positive)
feed_dict = {
base_diag_ph: base_diag,
u_ph: u,
v_ph: v,
diag_perturbation_ph: diag_perturbation}
else:
base_operator = linalg.LinearOperatorDiag(
base_diag, is_positive_definite=True)
operator = linalg.LinearOperatorUDVHUpdate(
base_operator,
u,
v=v if self._use_v else None,
diag=diag_perturbation if self._use_diag_perturbation else None,
is_diag_positive=self._is_diag_positive)
feed_dict = None
# The matrix representing L
base_diag_mat = array_ops.matrix_diag(base_diag)
# The matrix representing D
diag_perturbation_mat = array_ops.matrix_diag(diag_perturbation)
# Set up mat as some variant of A = L + UDV^H
if self._use_v and self._use_diag_perturbation:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
mat = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_perturbation_mat, v, adjoint_b=True))
elif self._use_v:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
mat = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
elif self._use_diag_perturbation:
# In this case, we have L + UDU^H, which is PD if D > 0, since L > 0.
expect_use_cholesky = self._is_diag_positive
mat = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_perturbation_mat, u, adjoint_b=True))
else:
# In this case, we have L + UU^H, which is PD since L > 0.
expect_use_cholesky = True
mat = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)
if expect_use_cholesky:
self.assertTrue(operator._use_cholesky)
else:
self.assertFalse(operator._use_cholesky)
return operator, mat, feed_dict
class LinearOperatorUDVHUpdatetestWithDiagUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_perturbation = True
_is_diag_positive = True
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
class LinearOperatorUDVHUpdatetestWithDiagCannotUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky."""
_use_diag_perturbation = True
_is_diag_positive = False
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
class LinearOperatorUDVHUpdatetestNoDiagUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_perturbation = False
_is_diag_positive = None
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
class LinearOperatorUDVHUpdatetestNoDiagCannotUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky."""
_use_diag_perturbation = False
_is_diag_positive = None
_use_v = True
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
class LinearOperatorUDVHUpdatetestWithDiagNotSquare(
BaseLinearOperatorUDVHUpdatetest,
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_perturbation = True
_is_diag_positive = True
_use_v = True
if __name__ == "__main__":
test.main()
| apache-2.0 |
rodrigofaccioli/drugdesign | virtualscreening/vina/spark/hydrogen_bond_crud.py | 1 | 4480 | from pyspark.sql import SQLContext, Row
from vina_utils import get_ligand_from_receptor_ligand_model
"""
Creates data frame of residue list
sqlCtx - spark SQL context
residue_listRDD - RDD for creating data frame. It had been created by load_file_select_hydrogen_bond function
"""
def create_df_residue_list(sqlCtx, residue_listRDD):
df_residue_list = sqlCtx.createDataFrame(residue_listRDD)
df_residue_list.registerTempTable("residue_list")
return df_residue_list
"""
Creates data frame of all residues for hydrogen bond
sqlCtx - spark SQL context
residue_listRDD - RDD for creating data frame. It had been created by load_file_all_residue_hbonds function
"""
def create_df_all_residue(sqlCtx, all_residue_split):
df_all_residue = sqlCtx.createDataFrame(all_residue_split)
df_all_residue.registerTempTable("all_residue")
return df_all_residue
"""
Creates data frame of all residues filtered by residue list
sqlCtx - spark SQL context
Important: Before running this function must execute the functions
create_df_all_residue and create_df_residue_list
"""
def create_df_all_residue_filtered_by_res_list(sqlCtx):
#Getting all information based on list of residues
sql = """
SELECT all_residue.*
FROM all_residue
JOIN residue_list ON residue_list.residue = all_residue.receptor_residue
"""
df_result = sqlCtx.sql(sql)
df_result.registerTempTable("residues_filtered_by_list")
return df_result
"""
Group by poses all residues filtered by residue list
sqlCtx - spark SQL context
Important: Before running this function must execute the function
create_df_all_residue_filtered_by_res_list
"""
def get_group_by_poses_all_residue_filtered_by_res_list(sqlCtx):
sql = """
SELECT pose, count(*) as num_res
FROM residues_filtered_by_list
GROUP BY pose
ORDER BY num_res DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe normalized Hydrogen Bond by donors and acceptors
sqlCtx - spark SQL context
df_only_poses - data frame created by get_group_by_poses_all_residue_filtered_by_res_list function
Important:
database is created by load_database function from database_io file.
This load_database function creates RDD only.
Therefore, the lines below must be executed before calling this function
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
"""
def create_df_normalized_by_donors_acceptors(sqlCtx, df_only_poses):
normalizedRDD = df_only_poses.map(lambda p: Row(num_res=int(p.num_res), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()
#Creating Dataframe
normalized_residues_filtered_by_list_table = sqlCtx.createDataFrame(normalizedRDD)
normalized_residues_filtered_by_list_table.registerTempTable("normalized_residues_filtered_by_list")
# Normalized Hydrogen Bond by donors and acceptors
sql = """
SELECT pose, (b.num_res / a.hb_donors_acceptors) as normalized_hb
FROM database a
JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand
ORDER BY normalized_hb DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe normalized Hydrogen Bond by heavy atoms
sqlCtx - spark SQL context
Important:
database is created by load_database function from database_io file.
This load_database function creates RDD only.
Therefore, the lines below must be executed before calling this function
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
"""
def create_df_normalized_by_heavy_atoms(sqlCtx):
# Normalized Hydrogen Bond by heavy atoms
sql = """
SELECT pose, (b.num_res / a.heavyAtom) as normalized_hb
FROM database a
JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand
ORDER BY normalized_hb DESC
"""
df_result = sqlCtx.sql(sql)
return df_result
"""
Creates dataframe of hydrogen bond
sqlCtx - spark SQL context
rdd_hydrogen_bond - RDD for creating dataframe. It had been created by load_file_summary_hbonds function
"""
def create_df_hydrogen_bond(sqlCtx, rdd_hydrogen_bond):
hydrogen_bond_table = sqlCtx.createDataFrame(rdd_hydrogen_bond)
hydrogen_bond_table.registerTempTable("hydrogenbond")
return hydrogen_bond_table
| apache-2.0 |
marcore/edx-platform | common/djangoapps/third_party_auth/strategy.py | 45 | 2184 | """
A custom Strategy for python-social-auth that allows us to fetch configuration from
ConfigurationModels rather than django.settings
"""
from .models import OAuth2ProviderConfig
from .pipeline import AUTH_ENTRY_CUSTOM
from social.backends.oauth import OAuthAuth
from social.strategies.django_strategy import DjangoStrategy
class ConfigurationModelStrategy(DjangoStrategy):
"""
A DjangoStrategy customized to load settings from ConfigurationModels
for upstream python-social-auth backends that we cannot otherwise modify.
"""
def setting(self, name, default=None, backend=None):
"""
Load the setting from a ConfigurationModel if possible, or fall back to the normal
Django settings lookup.
OAuthAuth subclasses will call this method for every setting they want to look up.
SAMLAuthBackend subclasses will call this method only after first checking if the
setting 'name' is configured via SAMLProviderConfig.
LTIAuthBackend subclasses will call this method only after first checking if the
setting 'name' is configured via LTIProviderConfig.
"""
if isinstance(backend, OAuthAuth):
provider_config = OAuth2ProviderConfig.current(backend.name)
if not provider_config.enabled:
raise Exception("Can't fetch setting of a disabled backend/provider.")
try:
return provider_config.get_setting(name)
except KeyError:
pass
# special case handling of login error URL if we're using a custom auth entry point:
if name == 'LOGIN_ERROR_URL':
auth_entry = self.request.session.get('auth_entry')
if auth_entry and auth_entry in AUTH_ENTRY_CUSTOM:
error_url = AUTH_ENTRY_CUSTOM[auth_entry].get('error_url')
if error_url:
return error_url
# At this point, we know 'name' is not set in a [OAuth2|LTI|SAML]ProviderConfig row.
# It's probably a global Django setting like 'FIELDS_STORED_IN_SESSION':
return super(ConfigurationModelStrategy, self).setting(name, default, backend)
| agpl-3.0 |
maxamillion/anaconda | pyanaconda/bootloader.py | 1 | 89455 | # bootloader.py
# Anaconda's bootloader configuration module.
#
# Copyright (C) 2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
# Matthew Miller <mattdm@redhat.com> (extlinux portion)
#
import collections
import os
import re
import struct
import blivet
from parted import PARTITION_BIOS_GRUB
from glob import glob
from itertools import chain
from pyanaconda import iutil
from blivet.devicelibs import raid
from pyanaconda.isys import sync
from pyanaconda.product import productName
from pyanaconda.flags import flags, can_touch_runtime_system
from blivet.errors import StorageError
from blivet.fcoe import fcoe
import pyanaconda.network
from pyanaconda.errors import errorHandler, ERROR_RAISE, ZIPLError
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
from pyanaconda.nm import nm_device_hwaddress
from blivet import platform
from blivet.size import Size
from pyanaconda.i18n import _, N_
from pyanaconda.orderedset import OrderedSet
import logging
log = logging.getLogger("anaconda")
def get_boot_block(device, seek_blocks=0):
status = device.status
if not status:
try:
device.setup()
except StorageError:
return ""
block_size = device.partedDevice.sectorSize
fd = iutil.eintr_retry_call(os.open, device.path, os.O_RDONLY)
if seek_blocks:
os.lseek(fd, seek_blocks * block_size, 0)
block = iutil.eintr_retry_call(os.read, fd, 512)
iutil.eintr_retry_call(os.close, fd)
if not status:
try:
device.teardown(recursive=True)
except StorageError:
pass
return block
def is_windows_boot_block(block):
try:
windows = (len(block) >= 512 and
struct.unpack("H", block[0x1fe: 0x200]) == (0xaa55,))
except struct.error:
windows = False
return windows
def has_windows_boot_block(device):
return is_windows_boot_block(get_boot_block(device))
class serial_opts(object):
def __init__(self):
self.speed = None
self.parity = None
self.word = None
self.stop = None
self.flow = None
def parse_serial_opt(arg):
"""Parse and split serial console options.
Documentation/kernel-parameters.txt says:
ttyS<n>[,options]
Use the specified serial port. The options are of
the form "bbbbpnf", where "bbbb" is the baud rate,
"p" is parity ("n", "o", or "e"), "n" is number of
bits, and "f" is flow control ("r" for RTS or
omit it). Default is "9600n8".
but note that everything after the baud rate is optional, so these are
all valid: 9600, 19200n, 38400n8, 9600e7r.
Also note that the kernel assumes 1 stop bit; this can't be changed.
"""
opts = serial_opts()
m = re.match(r'\d+', arg)
if m is None:
return opts
opts.speed = m.group()
idx = len(opts.speed)
try:
opts.parity = arg[idx+0]
opts.word = arg[idx+1]
opts.flow = arg[idx+2]
except IndexError:
pass
return opts
def _is_on_iscsi(device):
"""Tells whether a given device is on an iSCSI disk or not."""
return all(isinstance(disk, blivet.devices.iScsiDiskDevice)
for disk in device.disks)
class BootLoaderError(Exception):
pass
class Arguments(OrderedSet):
def _merge_ip(self):
"""
Find ip= arguments targetting the same interface and merge them.
"""
# partition the input
def partition_p(arg):
# we are only interested in ip= parameters that use some kind of
# automatic network setup:
return arg.startswith("ip=") and arg.count(":") == 1
ip_params = filter(partition_p, self)
rest = OrderedSet(filter(lambda p: not partition_p(p), self))
# split at the colon:
ip_params = map(lambda p: p.split(":"), ip_params)
# create mapping from nics to their configurations
config = collections.defaultdict(list)
for (nic, cfg) in ip_params:
config[nic].append(cfg)
# generate the new parameters:
ip_params = set()
for nic in config:
ip_params.add("%s:%s" % (nic, ",".join(sorted(config[nic]))))
# update the set
self.clear()
self.update(rest)
self.update(ip_params)
return self
def __str__(self):
self._merge_ip()
return " ".join(list(self))
def add(self, key):
self.discard(key)
super(Arguments, self).add(key)
def update(self, other):
for key in other:
self.discard(key)
self.add(key)
class BootLoaderImage(object):
""" Base class for bootloader images. Suitable for non-linux OS images. """
def __init__(self, device=None, label=None, short=None):
self.label = label
self.short_label = short
self.device = device
class LinuxBootLoaderImage(BootLoaderImage):
def __init__(self, device=None, label=None, short=None, version=None):
super(LinuxBootLoaderImage, self).__init__(device=device, label=label)
self.label = label # label string
self.short_label = short # shorter label string
self.device = device # StorageDevice instance
self.version = version # kernel version string
self._kernel = None # filename string
self._initrd = None # filename string
@property
def kernel(self):
filename = self._kernel
if self.version and not filename:
filename = "vmlinuz-%s" % self.version
return filename
@property
def initrd(self):
filename = self._initrd
if self.version and not filename:
filename = "initramfs-%s.img" % self.version
return filename
class TbootLinuxBootLoaderImage(LinuxBootLoaderImage):
_multiboot = "tboot.gz" # filename string
_mbargs = ["logging=vga,serial,memory"]
_args = ["intel_iommu=on"]
def __init__(self, device=None, label=None, short=None, version=None):
super(TbootLinuxBootLoaderImage, self).__init__(
device=device, label=label,
short=short, version=version)
@property
def multiboot(self):
return self._multiboot
@property
def mbargs(self):
return self._mbargs
@property
def args(self):
return self._args
class BootLoader(object):
name = "Generic Bootloader"
packages = []
config_file = None
config_file_mode = 0o600
can_dual_boot = False
can_update = False
image_label_attr = "label"
encryption_support = False
stage2_is_valid_stage1 = False
# requirements for stage2 devices
stage2_device = None
stage2_device_types = []
stage2_raid_levels = []
stage2_raid_metadata = []
stage2_raid_member_types = []
stage2_mountpoints = ["/boot", "/"]
stage2_bootable = False
stage2_must_be_primary = True
stage2_description = N_("/boot file system")
stage2_max_end = Size("2 TiB")
@property
def stage2_format_types(self):
return ["ext4", "ext3", "ext2"]
# this is so stupid...
global_preserve_args = ["speakup_synth", "apic", "noapic", "apm", "ide",
"noht", "acpi", "video", "pci", "nodmraid",
"nompath", "nomodeset", "noiswmd", "fips",
"selinux", "biosdevname", "ipv6.disable",
"net.ifnames"]
preserve_args = []
_trusted_boot = False
def __init__(self):
self.boot_args = Arguments()
self.dracut_args = Arguments()
self.disks = []
self._disk_order = []
# timeout in seconds
self._timeout = None
self.password = None
# console/serial stuff
self.console = ""
self.console_options = ""
self._set_console()
# list of BootLoaderImage instances representing bootable OSs
self.linux_images = []
self.chain_images = []
# default image
self._default_image = None
self._update_only = False
self.skip_bootloader = False
self.errors = []
self.warnings = []
self.reset()
def reset(self):
""" Reset stage1 and stage2 values """
# the device the bootloader will be installed on
self.stage1_device = None
# the "boot disk", meaning the disk stage1 _will_ go on
self.stage1_disk = None
self.stage2_device = None
self.stage2_is_preferred_stage1 = False
self.errors = []
self.problems = []
self.warnings = []
#
# disk list access
#
@property
def disk_order(self):
"""Potentially partial order for disks."""
return self._disk_order
@disk_order.setter
def disk_order(self, order):
log.debug("new disk order: %s", order)
self._disk_order = order
if self.disks:
self._sort_disks()
def _sort_disks(self):
"""Sort the internal disk list. """
for name in reversed(self.disk_order):
try:
idx = [d.name for d in self.disks].index(name)
except ValueError:
log.error("bios order specified unknown disk %s", name)
continue
self.disks.insert(0, self.disks.pop(idx))
def set_disk_list(self, disks):
self.disks = disks[:]
self._sort_disks()
#
# image list access
#
@property
def default(self):
"""The default image."""
if not self._default_image and self.linux_images:
self._default_image = self.linux_images[0]
return self._default_image
@default.setter
def default(self, image):
if image not in self.images:
raise ValueError("new default image not in image list")
log.debug("new default image: %s", image)
self._default_image = image
@property
def images(self):
""" List of OS images that will be included in the configuration. """
all_images = self.linux_images
all_images.extend(i for i in self.chain_images if i.label)
return all_images
def clear_images(self):
"""Empty out the image list."""
self.linux_images = []
self.chain_images = []
def add_image(self, image):
"""Add a BootLoaderImage instance to the image list."""
if isinstance(image, LinuxBootLoaderImage):
self.linux_images.append(image)
else:
self.chain_images.append(image)
def image_label(self, image):
"""Return the appropriate image label for this bootloader."""
return getattr(image, self.image_label_attr)
#
# platform-specific data access
#
@property
def disklabel_types(self):
return platform.platform._disklabel_types
@property
def device_descriptions(self):
return platform.platform.bootStage1ConstraintDict["descriptions"]
#
# constraint checking for target devices
#
def _is_valid_md(self, device, raid_levels=None,
metadata=None, member_types=None, desc=""):
ret = True
if device.type != "mdarray":
return ret
if raid_levels and device.level not in raid_levels:
levels_str = ",".join("%s" % l for l in raid_levels)
self.errors.append(_("RAID sets that contain '%(desc)s' must have one "
"of the following raid levels: %(raid_level)s.")
% {"desc" : desc, "raid_level" : levels_str})
ret = False
# new arrays will be created with an appropriate metadata format
if device.exists and \
metadata and device.metadataVersion not in metadata:
self.errors.append(_("RAID sets that contain '%(desc)s' must have one "
"of the following metadata versions: %(metadata_versions)s.")
% {"desc": desc, "metadata_versions": ",".join(metadata)})
ret = False
if member_types:
for member in device.devices:
if not self._device_type_match(member, member_types):
self.errors.append(_("RAID sets that contain '%(desc)s' must "
"have one of the following device "
"types: %(types)s.")
% {"desc" : desc, "types" : ",".join(member_types)})
ret = False
log.debug("_is_valid_md(%s) returning %s", device.name, ret)
return ret
def _is_valid_disklabel(self, device, disklabel_types=None):
ret = True
if self.disklabel_types:
for disk in device.disks:
label_type = getattr(disk.format, "labelType", None)
if not label_type or label_type not in self.disklabel_types:
types_str = ",".join(disklabel_types)
self.errors.append(_("%(name)s must have one of the following "
"disklabel types: %(types)s.")
% {"name" : device.name, "types" : types_str})
ret = False
log.debug("_is_valid_disklabel(%s) returning %s", device.name, ret)
return ret
def _is_valid_format(self, device, format_types=None, mountpoints=None,
desc=""):
ret = True
if format_types and device.format.type not in format_types:
self.errors.append(_("%(desc)s cannot be of type %(type)s.")
% {"desc" : desc, "type" : device.format.type})
ret = False
if mountpoints and hasattr(device.format, "mountpoint") \
and device.format.mountpoint not in mountpoints:
self.errors.append(_("%(desc)s must be mounted on one of %(mountpoints)s.")
% {"desc" : desc, "mountpoints" : ", ".join(mountpoints)})
ret = False
log.debug("_is_valid_format(%s) returning %s", device.name, ret)
return ret
def _is_valid_size(self, device, desc=""):
ret = True
msg = None
errors = []
if device.format.minSize and device.format.maxSize:
msg = (_("%(desc)s must be between %(min)d and %(max)d MB in size")
% {"desc" : desc, "min" : device.format.minSize,
"max" : device.format.maxSize})
if device.format.minSize and device.size < device.format.minSize:
if msg is None:
errors.append(_("%(desc)s must not be smaller than %(min)dMB.")
% {"desc" : desc, "min" : device.format.minSize})
else:
errors.append(msg)
ret = False
if device.format.maxSize and device.size > device.format.maxSize:
if msg is None:
errors.append(_("%(desc)s must not be larger than %(max)dMB.")
% {"desc" : desc, "max" : device.format.maxSize})
elif msg not in errors:
# don't add the same error string twice
errors.append(msg)
ret = False
log.debug("_is_valid_size(%s) returning %s", device.name, ret)
return ret
def _is_valid_location(self, device, max_end=None, desc=""):
ret = True
if max_end and device.type == "partition" and device.partedPartition:
end_sector = device.partedPartition.geometry.end
sector_size = device.partedPartition.disk.device.sectorSize
end = Size(sector_size * end_sector)
if end > max_end:
self.errors.append(_("%(desc)s must be within the first %(max_end)s of "
"the disk.") % {"desc": desc, "max_end": max_end})
ret = False
log.debug("_is_valid_location(%s) returning %s", device.name, ret)
return ret
def _is_valid_partition(self, device, primary=None, desc=""):
ret = True
if device.type == "partition" and primary and not device.isPrimary:
self.errors.append(_("%s must be on a primary partition.") % desc)
ret = False
log.debug("_is_valid_partition(%s) returning %s", device.name, ret)
return ret
#
# target/stage1 device access
#
def _device_type_index(self, device, types):
""" Return the index of the matching type in types to device's type.
Return None if no match is found. """
index = None
try:
index = types.index(device.type)
except ValueError:
if "disk" in types and device.isDisk:
index = types.index("disk")
return index
def _device_type_match(self, device, types):
""" Return True if device is of one of the types in the list types. """
return self._device_type_index(device, types) is not None
def device_description(self, device):
device_types = list(self.device_descriptions.keys())
idx = self._device_type_index(device, device_types)
if idx is None:
raise ValueError("No description available for %s" % device.type)
# this looks unnecessarily complicated, but it handles the various
# device types that we treat as disks
return self.device_descriptions[device_types[idx]]
def set_preferred_stage1_type(self, preferred):
""" Set a preferred type of stage1 device. """
if not self.stage2_is_valid_stage1:
# "partition" means first sector of stage2 and is only meaningful
# for bootloaders that can use stage2 as stage1
return
if preferred == "mbr":
# "mbr" is already the default
return
# partition means "use the stage2 device for a stage1 device"
self.stage2_is_preferred_stage1 = True
def is_valid_stage1_device(self, device, early=False):
""" Return True if the device is a valid stage1 target device.
Also collect lists of errors and warnings.
The criteria for being a valid stage1 target device vary from
platform to platform. On some platforms a disk with an msdos
disklabel is a valid stage1 target, while some platforms require
a special device. Some examples of these special devices are EFI
system partitions on EFI machines, PReP boot partitions on
iSeries, and Apple bootstrap partitions on Mac.
The 'early' keyword argument is a boolean flag indicating whether
or not this check is being performed at a point where the mountpoint
cannot be expected to be set for things like EFI system partitions.
"""
self.errors = []
self.warnings = []
valid = True
constraint = platform.platform.bootStage1ConstraintDict
if device is None:
return False
if not self._device_type_match(device, constraint["device_types"]):
log.debug("stage1 device cannot be of type %s", device.type)
return False
if blivet.arch.isS390() and _is_on_iscsi(device):
log.debug("stage1 device cannot be on an iSCSI disk on s390(x)")
return False
description = self.device_description(device)
if self.stage2_is_valid_stage1 and device == self.stage2_device:
# special case
valid = (self.stage2_is_preferred_stage1 and
self.is_valid_stage2_device(device))
# we'll be checking stage2 separately so don't duplicate messages
self.problems = []
self.warnings = []
return valid
if device.protected:
valid = False
if not self._is_valid_disklabel(device,
disklabel_types=self.disklabel_types):
valid = False
if not self._is_valid_size(device, desc=description):
valid = False
if not self._is_valid_location(device,
max_end=constraint["max_end"],
desc=description):
valid = False
if not self._is_valid_md(device,
raid_levels=constraint["raid_levels"],
metadata=constraint["raid_metadata"],
member_types=constraint["raid_member_types"],
desc=description):
valid = False
if not self.stage2_bootable and not getattr(device, "bootable", True):
log.warning("%s not bootable", device.name)
# XXX does this need to be here?
if getattr(device.format, "label", None) in ("ANACONDA", "LIVE"):
log.info("ignoring anaconda boot disk")
valid = False
if early:
mountpoints = []
else:
mountpoints = constraint["mountpoints"]
if not self._is_valid_format(device,
format_types=constraint["format_types"],
mountpoints=mountpoints,
desc=description):
valid = False
if not self.encryption_support and device.encrypted:
self.errors.append(_("%s cannot be on an encrypted block "
"device.") % description)
valid = False
log.debug("is_valid_stage1_device(%s) returning %s", device.name, valid)
return valid
def set_stage1_device(self, devices):
self.stage1_device = None
if not self.stage1_disk:
self.reset()
raise BootLoaderError("need stage1 disk to set stage1 device")
if self.stage2_is_preferred_stage1:
self.stage1_device = self.stage2_device
return
for device in devices:
if self.stage1_disk not in device.disks:
continue
if self.is_valid_stage1_device(device):
if flags.imageInstall and device.isDisk:
# GRUB2 will install to /dev/loop0 but not to
# /dev/mapper/<image_name>
self.stage1_device = device.parents[0]
else:
self.stage1_device = device
break
if not self.stage1_device:
self.reset()
raise BootLoaderError("failed to find a suitable stage1 device")
#
# boot/stage2 device access
#
def is_valid_stage2_device(self, device, linux=True, non_linux=False):
""" Return True if the device is suitable as a stage2 target device.
Also collect lists of errors and warnings.
"""
self.errors = []
self.warnings = []
valid = True
if device is None:
return False
if device.protected:
valid = False
if blivet.arch.isS390() and _is_on_iscsi(device):
self.errors.append(_("%s cannot be on an iSCSI disk on s390(x)") % self.stage2_description)
valid = False
if not self._device_type_match(device, self.stage2_device_types):
self.errors.append(_("%(desc)s cannot be of type %(type)s")
% {"desc" : _(self.stage2_description), "type" : device.type})
valid = False
if not self._is_valid_disklabel(device,
disklabel_types=self.disklabel_types):
valid = False
if not self._is_valid_size(device, desc=_(self.stage2_description)):
valid = False
if self.stage2_max_end and not self._is_valid_location(device,
max_end=self.stage2_max_end,
desc=_(self.stage2_description)):
valid = False
if not self._is_valid_partition(device,
primary=self.stage2_must_be_primary):
valid = False
if not self._is_valid_md(device,
raid_levels=self.stage2_raid_levels,
metadata=self.stage2_raid_metadata,
member_types=self.stage2_raid_member_types,
desc=_(self.stage2_description)):
valid = False
if linux and \
not self._is_valid_format(device,
format_types=self.stage2_format_types,
mountpoints=self.stage2_mountpoints,
desc=_(self.stage2_description)):
valid = False
non_linux_format_types = platform.platform._non_linux_format_types
if non_linux and \
not self._is_valid_format(device,
format_types=non_linux_format_types):
valid = False
if not self.encryption_support and device.encrypted:
self.errors.append(_("%s cannot be on an encrypted block "
"device.") % _(self.stage2_description))
valid = False
log.debug("is_valid_stage2_device(%s) returning %s", device.name, valid)
return valid
#
# miscellaneous
#
def has_windows(self, devices):
return False
@property
def timeout(self):
"""Bootloader timeout in seconds."""
if self._timeout is not None:
t = self._timeout
else:
t = 5
return t
def check(self):
""" Run additional bootloader checks """
return True
@timeout.setter
def timeout(self, seconds):
self._timeout = seconds
@property
def update_only(self):
return self._update_only
@update_only.setter
def update_only(self, value):
if value and not self.can_update:
raise ValueError("this boot loader does not support updates")
elif self.can_update:
self._update_only = value
def set_boot_args(self, *args, **kwargs):
""" Set up the boot command line.
Keyword Arguments:
storage - a blivet.Storage instance
All other arguments are expected to have a dracutSetupArgs()
method.
"""
storage = kwargs.pop("storage", None)
#
# FIPS
#
if flags.cmdline.get("fips") == "1":
self.boot_args.add("boot=%s" % self.stage2_device.fstabSpec)
#
# dracut
#
# storage
from blivet.devices import NetworkStorageDevice
dracut_devices = [storage.rootDevice]
if self.stage2_device != storage.rootDevice:
dracut_devices.append(self.stage2_device)
dracut_devices.extend(storage.fsset.swapDevices)
# Does /usr have its own device? If so, we need to tell dracut
usr_device = storage.mountpoints.get("/usr")
if usr_device:
dracut_devices.extend([usr_device])
netdevs = storage.devicetree.getDevicesByInstance(NetworkStorageDevice)
rootdev = storage.rootDevice
if any(rootdev.dependsOn(netdev) for netdev in netdevs):
dracut_devices = set(dracut_devices)
for dev in storage.mountpoints.values():
if any(dev.dependsOn(netdev) for netdev in netdevs):
dracut_devices.add(dev)
done = []
for device in dracut_devices:
for dep in storage.devices:
if dep in done:
continue
if device != dep and not device.dependsOn(dep):
continue
setup_args = dep.dracutSetupArgs()
if not setup_args:
continue
self.boot_args.update(setup_args)
self.dracut_args.update(setup_args)
done.append(dep)
# network storage
# XXX this is nothing to be proud of
if isinstance(dep, NetworkStorageDevice):
setup_args = pyanaconda.network.dracutSetupArgs(dep)
self.boot_args.update(setup_args)
self.dracut_args.update(setup_args)
# passed-in objects
for cfg_obj in chain(args, kwargs.values()):
if hasattr(cfg_obj, "dracutSetupArgs"):
setup_args = cfg_obj.dracutSetupArgs()
self.boot_args.update(setup_args)
self.dracut_args.update(setup_args)
else:
setup_string = cfg_obj.dracutSetupString()
self.boot_args.add(setup_string)
self.dracut_args.add(setup_string)
# This is needed for FCoE, bug #743784. The case:
# We discover LUN on an iface which is part of multipath setup.
# If the iface is disconnected after discovery anaconda doesn't
# write dracut ifname argument for the disconnected iface path
# (in Network.dracutSetupArgs).
# Dracut needs the explicit ifname= because biosdevname
# fails to rename the iface (because of BFS booting from it).
for nic, _dcb, _auto_vlan in fcoe().nics:
try:
hwaddr = nm_device_hwaddress(nic)
except ValueError:
continue
self.boot_args.add("ifname=%s:%s" % (nic, hwaddr.lower()))
# Add iscsi_firmware to trigger dracut running iscsistart
# See rhbz#1099603 and rhbz#1185792
if len(glob("/sys/firmware/iscsi_boot*")) > 0:
self.boot_args.add("iscsi_firmware")
#
# preservation of some of our boot args
# FIXME: this is stupid.
#
for opt in self.global_preserve_args + self.preserve_args:
if opt not in flags.cmdline:
continue
arg = flags.cmdline.get(opt)
new_arg = opt
if arg:
new_arg += "=%s" % arg
self.boot_args.add(new_arg)
# passed-in objects
for cfg_obj in chain(args, kwargs.values()):
if hasattr(cfg_obj, "dracutSetupArgs"):
setup_args = cfg_obj.dracutSetupArgs()
self.boot_args.update(setup_args)
self.dracut_args.update(setup_args)
else:
setup_string = cfg_obj.dracutSetupString()
self.boot_args.add(setup_string)
self.dracut_args.add(setup_string)
#
# configuration
#
@property
def boot_prefix(self):
""" Prefix, if any, to paths in /boot. """
if self.stage2_device.format.mountpoint == "/":
prefix = "/boot"
else:
prefix = ""
return prefix
def _set_console(self):
""" Set console options based on boot arguments. """
console = flags.cmdline.get("console", "")
console = os.path.basename(console)
self.console, _x, self.console_options = console.partition(",")
def write_config_console(self, config):
"""Write console-related configuration lines."""
pass
def write_config_password(self, config):
"""Write password-related configuration lines."""
pass
def write_config_header(self, config):
"""Write global configuration lines."""
self.write_config_console(config)
self.write_config_password(config)
def write_config_images(self, config):
"""Write image configuration entries."""
raise NotImplementedError()
def write_config_post(self):
try:
iutil.eintr_retry_call(os.chmod, iutil.getSysroot() + self.config_file, self.config_file_mode)
except OSError as e:
log.error("failed to set config file permissions: %s", e)
def write_config(self):
""" Write the bootloader configuration. """
if not self.config_file:
raise BootLoaderError("no config file defined for this boot loader")
config_path = os.path.normpath(iutil.getSysroot() + self.config_file)
if os.access(config_path, os.R_OK):
os.rename(config_path, config_path + ".anacbak")
config = open(config_path, "w")
self.write_config_header(config)
self.write_config_images(config)
config.close()
self.write_config_post()
@property
def trusted_boot(self):
return self._trusted_boot
@trusted_boot.setter
def trusted_boot(self, trusted_boot):
self._trusted_boot = trusted_boot
#
# installation
#
def write(self):
""" Write the bootloader configuration and install the bootloader. """
if self.skip_bootloader:
return
if self.update_only:
self.update()
return
self.write_config()
sync()
self.stage2_device.format.sync(root=iutil.getTargetPhysicalRoot())
self.install()
def install(self, args=None):
raise NotImplementedError()
def update(self):
""" Update an existing bootloader configuration. """
pass
class GRUB(BootLoader):
name = "GRUB"
_config_dir = "grub"
_config_file = "grub.conf"
_device_map_file = "device.map"
can_dual_boot = True
can_update = True
stage2_is_valid_stage1 = True
stage2_bootable = True
stage2_must_be_primary = False
# list of strings representing options for boot device types
stage2_device_types = ["partition", "mdarray"]
stage2_raid_levels = [raid.RAID1]
stage2_raid_member_types = ["partition"]
stage2_raid_metadata = ["0", "0.90", "1.0"]
packages = ["grub"]
_serial_consoles = ["ttyS"]
def __init__(self):
super(GRUB, self).__init__()
self.encrypted_password = ""
#
# grub-related conveniences
#
def grub_device_name(self, device):
""" Return a grub-friendly representation of device. """
disk = getattr(device, "disk", device)
name = "(hd%d" % self.disks.index(disk)
if hasattr(device, "disk"):
name += ",%d" % (device.partedPartition.number - 1,)
name += ")"
return name
@property
def grub_config_dir(self):
""" Config dir, adjusted for grub's view of the world. """
return self.boot_prefix + self._config_dir
#
# configuration
#
@property
def config_dir(self):
""" Full path to configuration directory. """
return "/boot/" + self._config_dir
@property
def config_file(self):
""" Full path to configuration file. """
return "%s/%s" % (self.config_dir, self._config_file)
@property
def device_map_file(self):
""" Full path to device.map file. """
return "%s/%s" % (self.config_dir, self._device_map_file)
@property
def grub_conf_device_line(self):
return ""
@property
def splash_dir(self):
""" relative path to splash image directory."""
return GRUB._config_dir
@property
def has_serial_console(self):
""" true if the console is a serial console. """
return any(self.console.startswith(sconsole) for sconsole in self._serial_consoles)
@property
def serial_command(self):
command = ""
if self.console and self.has_serial_console:
unit = self.console[-1]
command = ["serial"]
s = parse_serial_opt(self.console_options)
if unit and unit != '0':
command.append("--unit=%s" % unit)
if s.speed and s.speed != '9600':
command.append("--speed=%s" % s.speed)
if s.parity:
if s.parity == 'o':
command.append("--parity=odd")
elif s.parity == 'e':
command.append("--parity=even")
if s.word and s.word != '8':
command.append("--word=%s" % s.word)
if s.stop and s.stop != '1':
command.append("--stop=%s" % s.stop)
command = " ".join(command)
return command
def write_config_console(self, config):
""" Write console-related configuration. """
if not self.console:
return
if self.has_serial_console:
config.write("%s\n" % self.serial_command)
config.write("terminal --timeout=%s serial console\n"
% self.timeout)
console_arg = "console=%s" % self.console
if self.console_options:
console_arg += ",%s" % self.console_options
self.boot_args.add(console_arg)
def _encrypt_password(self):
""" Make sure self.encrypted_password is set up correctly. """
if self.encrypted_password:
return
if not self.password:
raise BootLoaderError("cannot encrypt empty password")
# Used for ascii_letters and digits constants
import string # pylint: disable=deprecated-module
import crypt
import random
salt = "$6$"
salt_len = 16
salt_chars = string.ascii_letters + string.digits + './'
rand_gen = random.SystemRandom()
salt += "".join(rand_gen.choice(salt_chars) for i in range(salt_len))
self.encrypted_password = crypt.crypt(self.password, salt)
def write_config_password(self, config):
""" Write password-related configuration. """
if not self.password and not self.encrypted_password:
return
self._encrypt_password()
password_line = "--encrypted " + self.encrypted_password
config.write("password %s\n" % password_line)
def write_config_header(self, config):
"""Write global configuration information. """
if self.boot_prefix:
have_boot = "do not "
else:
have_boot = ""
s = """# grub.conf generated by anaconda
# Note that you do not have to rerun grub after making changes to this file.
# NOTICE: You %(do)shave a /boot partition. This means that all kernel and
# initrd paths are relative to %(boot)s, eg.
# root %(grub_target)s
# kernel %(prefix)s/vmlinuz-version ro root=%(root_device)s
# initrd %(prefix)s/initrd-[generic-]version.img
""" % {"do": have_boot, "boot": self.stage2_device.format.mountpoint,
"root_device": self.stage2_device.path,
"grub_target": self.grub_device_name(self.stage1_device),
"prefix": self.boot_prefix}
config.write(s)
config.write("boot=%s\n" % self.stage1_device.path)
config.write(self.grub_conf_device_line)
# find the index of the default image
try:
default_index = self.images.index(self.default)
except ValueError:
e = "Failed to find default image (%s)" % self.default.label
raise BootLoaderError(e)
config.write("default=%d\n" % default_index)
config.write("timeout=%d\n" % self.timeout)
self.write_config_console(config)
if iutil.isConsoleOnVirtualTerminal(self.console):
splash = "splash.xpm.gz"
splash_path = os.path.normpath("%s/boot/%s/%s" % (iutil.getSysroot(),
self.splash_dir,
splash))
if os.access(splash_path, os.R_OK):
grub_root_grub_name = self.grub_device_name(self.stage2_device)
config.write("splashimage=%s/%s/%s\n" % (grub_root_grub_name,
self.splash_dir,
splash))
config.write("hiddenmenu\n")
self.write_config_password(config)
def write_config_images(self, config):
""" Write image entries into configuration file. """
for image in self.images:
args = Arguments()
if isinstance(image, LinuxBootLoaderImage):
grub_root = self.grub_device_name(self.stage2_device)
args.update(["ro", "root=%s" % image.device.fstabSpec])
args.update(self.boot_args)
if isinstance(image, TbootLinuxBootLoaderImage):
args.update(image.args)
snippet = ("\tkernel %(prefix)s/%(multiboot)s %(mbargs)s\n"
"\tmodule %(prefix)s/%(kernel)s %(args)s\n"
"\tmodule %(prefix)s/%(initrd)s\n"
% {"prefix": self.boot_prefix,
"multiboot": image.multiboot,
"mbargs": image.mbargs,
"kernel": image.kernel, "args": args,
"initrd": image.initrd})
else:
snippet = ("\tkernel %(prefix)s/%(kernel)s %(args)s\n"
"\tinitrd %(prefix)s/%(initrd)s\n"
% {"prefix": self.boot_prefix,
"kernel": image.kernel, "args": args,
"initrd": image.initrd})
stanza = ("title %(label)s (%(version)s)\n"
"\troot %(grub_root)s\n"
"%(snippet)s"
% {"label": image.label, "version": image.version,
"grub_root": grub_root, "snippet": snippet})
else:
stanza = ("title %(label)s\n"
"\trootnoverify %(grub_root)s\n"
"\tchainloader +1\n"
% {"label": image.label,
"grub_root": self.grub_device_name(image.device)})
log.info("bootloader.py: used boot args: %s ", args)
config.write(stanza)
def write_device_map(self):
""" Write out a device map containing all supported devices. """
map_path = os.path.normpath(iutil.getSysroot() + self.device_map_file)
if os.access(map_path, os.R_OK):
os.rename(map_path, map_path + ".anacbak")
dev_map = open(map_path, "w")
dev_map.write("# this device map was generated by anaconda\n")
for disk in self.disks:
dev_map.write("%s %s\n" % (self.grub_device_name(disk),
disk.path))
dev_map.close()
def write_config_post(self):
""" Perform additional configuration after writing config file(s). """
super(GRUB, self).write_config_post()
# make symlink for menu.lst (grub's default config file name)
menu_lst = "%s%s/menu.lst" % (iutil.getSysroot(), self.config_dir)
if os.access(menu_lst, os.R_OK):
try:
os.rename(menu_lst, menu_lst + '.anacbak')
except OSError as e:
log.error("failed to back up %s: %s", menu_lst, e)
try:
os.symlink(self._config_file, menu_lst)
except OSError as e:
log.error("failed to create grub menu.lst symlink: %s", e)
# make symlink to grub.conf in /etc since that's where configs belong
etc_grub = "%s/etc/%s" % (iutil.getSysroot(), self._config_file)
if os.access(etc_grub, os.R_OK):
try:
os.unlink(etc_grub)
except OSError as e:
log.error("failed to remove %s: %s", etc_grub, e)
try:
os.symlink("..%s" % self.config_file, etc_grub)
except OSError as e:
log.error("failed to create /etc/grub.conf symlink: %s", e)
def write_config(self):
""" Write bootloader configuration to disk. """
# write device.map
self.write_device_map()
# this writes the actual configuration file
super(GRUB, self).write_config()
#
# installation
#
@property
def install_targets(self):
""" List of (stage1, stage2) tuples representing install targets. """
targets = []
# make sure we have stage1 and stage2 installed with redundancy
# so that boot can succeed even in the event of failure or removal
# of some of the disks containing the member partitions of the
# /boot array. If the stage1 is not a disk, it probably needs to
# be a partition on a particular disk (biosboot, prepboot), so only
# add the redundant targets if installing stage1 to a disk that is
# a member of the stage2 array.
# Look for both mdraid and btrfs raid
if self.stage2_device.type == "mdarray" and \
self.stage2_device.level == raid.RAID1:
stage2_raid = True
# Set parents to the list of partitions in the RAID
stage2_parents = self.stage2_device.parents
elif self.stage2_device.type == "btrfs subvolume" and \
self.stage2_device.parents[0].dataLevel == raid.RAID1:
stage2_raid = True
# Set parents to the list of partitions in the parent volume
stage2_parents = self.stage2_device.parents[0].parents
else:
stage2_raid = False
if stage2_raid and \
self.stage1_device.isDisk and \
self.stage2_device.dependsOn(self.stage1_device):
for stage2dev in stage2_parents:
# if target disk contains any of /boot array's member
# partitions, set up stage1 on each member's disk
stage1dev = stage2dev.disk
targets.append((stage1dev, self.stage2_device))
else:
targets.append((self.stage1_device, self.stage2_device))
return targets
def install(self, args=None):
rc = iutil.execInSysroot("grub-install", ["--just-copy"])
if rc:
raise BootLoaderError("boot loader install failed")
for (stage1dev, stage2dev) in self.install_targets:
cmd = ("root %(stage2dev)s\n"
"install --stage2=%(config_dir)s/stage2"
" /%(grub_config_dir)s/stage1 d %(stage1dev)s"
" /%(grub_config_dir)s/stage2 p"
" %(stage2dev)s/%(grub_config_dir)s/%(config_basename)s\n"
% {"grub_config_dir": self.grub_config_dir,
"config_dir": self.config_dir,
"config_basename": self._config_file,
"stage1dev": self.grub_device_name(stage1dev),
"stage2dev": self.grub_device_name(stage2dev)})
(pread, pwrite) = os.pipe()
iutil.eintr_retry_call(os.write, pwrite, cmd.encode("utf-8"))
iutil.eintr_retry_call(os.close, pwrite)
args = ["--batch", "--no-floppy",
"--device-map=%s" % self.device_map_file]
rc = iutil.execInSysroot("grub", args, stdin=pread)
iutil.eintr_retry_call(os.close, pread)
if rc:
raise BootLoaderError("boot loader install failed")
def update(self):
self.install()
#
# miscellaneous
#
def has_windows(self, devices):
""" Potential boot devices containing non-linux operating systems. """
# make sure we don't clobber error/warning lists
errors = self.errors[:]
warnings = self.warnings[:]
ret = [d for d in devices if self.is_valid_stage2_device(d, linux=False, non_linux=True)]
self.errors = errors
self.warnings = warnings
return bool(ret)
# Add a warning about certain RAID situations to is_valid_stage2_device
def is_valid_stage2_device(self, device, linux=True, non_linux=False):
valid = super(GRUB, self).is_valid_stage2_device(device, linux, non_linux)
# If the stage2 device is on a raid1, check that the stage1 device is also redundant,
# either by also being part of an array or by being a disk (which is expanded
# to every disk in the array by install_targets).
if self.stage1_device and self.stage2_device and \
self.stage2_device.type == "mdarray" and \
self.stage2_device.level == raid.RAID1 and \
self.stage1_device.type != "mdarray":
if not self.stage1_device.isDisk:
msg = _("boot loader stage2 device %(stage2dev)s is on a multi-disk array, but boot loader stage1 device %(stage1dev)s is not. " \
"A drive failure in %(stage2dev)s could render the system unbootable.") % \
{"stage1dev" : self.stage1_device.name,
"stage2dev" : self.stage2_device.name}
self.warnings.append(msg)
elif not self.stage2_device.dependsOn(self.stage1_device):
msg = _("boot loader stage2 device %(stage2dev)s is on a multi-disk array, but boot loader stage1 device %(stage1dev)s is not part of this array. " \
"The stage1 boot loader will only be installed to a single drive.") % \
{"stage1dev" : self.stage1_device.name,
"stage2dev" : self.stage2_device.name}
self.warnings.append(msg)
return valid
class GRUB2(GRUB):
""" GRUBv2
- configuration
- password (insecure), password_pbkdf2
- http://www.gnu.org/software/grub/manual/grub.html#Invoking-grub_002dmkpasswd_002dpbkdf2
- --users per-entry specifies which users can access, otherwise
entry is unrestricted
- /etc/grub/custom.cfg
- how does grub resolve names of md arrays?
- disable automatic use of grub-mkconfig?
- on upgrades?
- BIOS boot partition (GPT)
- parted /dev/sda set <partition_number> bios_grub on
- can't contain a file system
- 31KiB min, 1MiB recommended
"""
name = "GRUB2"
packages = ["grub2"]
_config_file = "grub.cfg"
_config_dir = "grub2"
defaults_file = "/etc/default/grub"
terminal_type = "console"
stage2_max_end = None
# requirements for boot devices
stage2_device_types = ["partition", "mdarray", "lvmlv"]
stage2_raid_levels = [raid.RAID0, raid.RAID1, raid.RAID4,
raid.RAID5, raid.RAID6, raid.RAID10]
stage2_raid_metadata = ["0", "0.90", "1.0", "1.2"]
@property
def stage2_format_types(self):
if productName.startswith("Red Hat "):
return ["xfs", "ext4", "ext3", "ext2", "btrfs"]
else:
return ["ext4", "ext3", "ext2", "btrfs", "xfs"]
def __init__(self):
super(GRUB2, self).__init__()
# XXX we probably need special handling for raid stage1 w/ gpt disklabel
# since it's unlikely there'll be a bios boot partition on each disk
#
# grub-related conveniences
#
def grub_device_name(self, device):
""" Return a grub-friendly representation of device.
Disks and partitions use the (hdX,Y) notation, while lvm and
md devices just use their names.
"""
disk = None
name = "(%s)" % device.name
if device.isDisk:
disk = device
elif hasattr(device, "disk"):
disk = device.disk
if disk is not None:
name = "(hd%d" % self.disks.index(disk)
if hasattr(device, "disk"):
lt = device.disk.format.labelType
name += ",%s%d" % (lt, device.partedPartition.number)
name += ")"
return name
def write_config_console(self, config):
if not self.console:
return
console_arg = "console=%s" % self.console
if self.console_options:
console_arg += ",%s" % self.console_options
self.boot_args.add(console_arg)
def write_device_map(self):
""" Write out a device map containing all supported devices. """
map_path = os.path.normpath(iutil.getSysroot() + self.device_map_file)
if os.access(map_path, os.R_OK):
os.rename(map_path, map_path + ".anacbak")
devices = self.disks
if self.stage1_device not in devices:
devices.append(self.stage1_device)
for disk in self.stage2_device.disks:
if disk not in devices:
devices.append(disk)
devices = [d for d in devices if d.isDisk]
if len(devices) == 0:
return
dev_map = open(map_path, "w")
dev_map.write("# this device map was generated by anaconda\n")
for drive in devices:
dev_map.write("%s %s\n" % (self.grub_device_name(drive),
drive.path))
dev_map.close()
def write_defaults(self):
defaults_file = "%s%s" % (iutil.getSysroot(), self.defaults_file)
defaults = open(defaults_file, "w+")
defaults.write("GRUB_TIMEOUT=%d\n" % self.timeout)
defaults.write("GRUB_DISTRIBUTOR=\"$(sed 's, release .*$,,g' /etc/system-release)\"\n")
defaults.write("GRUB_DEFAULT=saved\n")
defaults.write("GRUB_DISABLE_SUBMENU=true\n")
if self.console and self.has_serial_console:
defaults.write("GRUB_TERMINAL=\"serial console\"\n")
defaults.write("GRUB_SERIAL_COMMAND=\"%s\"\n" % self.serial_command)
else:
defaults.write("GRUB_TERMINAL_OUTPUT=\"%s\"\n" % self.terminal_type)
# this is going to cause problems for systems containing multiple
# linux installations or even multiple boot entries with different
# boot arguments
log.info("bootloader.py: used boot args: %s ", self.boot_args)
defaults.write("GRUB_CMDLINE_LINUX=\"%s\"\n" % self.boot_args)
defaults.write("GRUB_DISABLE_RECOVERY=\"true\"\n")
#defaults.write("GRUB_THEME=\"/boot/grub2/themes/system/theme.txt\"\n")
defaults.close()
def _encrypt_password(self):
""" Make sure self.encrypted_password is set up properly. """
if self.encrypted_password:
return
if not self.password:
raise RuntimeError("cannot encrypt empty password")
(pread, pwrite) = os.pipe()
passwords = "%s\n%s\n" % (self.password, self.password)
iutil.eintr_retry_call(os.write, pwrite, passwords.encode("utf-8"))
iutil.eintr_retry_call(os.close, pwrite)
buf = iutil.execWithCapture("grub2-mkpasswd-pbkdf2", [],
stdin=pread,
root=iutil.getSysroot())
iutil.eintr_retry_call(os.close, pread)
self.encrypted_password = buf.split()[-1].strip()
if not self.encrypted_password.startswith("grub.pbkdf2."):
raise BootLoaderError("failed to encrypt boot loader password")
def write_password_config(self):
if not self.password and not self.encrypted_password:
return
users_file = iutil.getSysroot() + "/etc/grub.d/01_users"
header = open(users_file, "w")
header.write("#!/bin/sh -e\n\n")
header.write("cat << \"EOF\"\n")
# XXX FIXME: document somewhere that the username is "root"
header.write("set superusers=\"root\"\n")
header.write("export superusers\n")
self._encrypt_password()
password_line = "password_pbkdf2 root " + self.encrypted_password
header.write("%s\n" % password_line)
header.write("EOF\n")
header.close()
iutil.eintr_retry_call(os.chmod, users_file, 0o700)
def write_config(self):
self.write_config_console(None)
# See if we have a password and if so update the boot args before we
# write out the defaults file.
if self.password or self.encrypted_password:
self.boot_args.add("rd.shell=0")
self.write_defaults()
# if we fail to setup password auth we should complete the
# installation so the system is at least bootable
try:
self.write_password_config()
except (BootLoaderError, OSError, RuntimeError) as e:
log.error("boot loader password setup failed: %s", e)
# make sure the default entry is the OS we are installing
if self.default is not None:
entry_title = "0"
rc = iutil.execInSysroot("grub2-set-default", [entry_title])
if rc:
log.error("failed to set default menu entry to %s", productName)
# now tell grub2 to generate the main configuration file
rc = iutil.execInSysroot("grub2-mkconfig",
["-o", self.config_file])
if rc:
raise BootLoaderError("failed to write boot loader configuration")
#
# installation
#
def install(self, args=None):
if args is None:
args = []
# XXX will installing to multiple drives work as expected with GRUBv2?
for (stage1dev, stage2dev) in self.install_targets:
grub_args = args + ["--no-floppy", stage1dev.path]
if stage1dev == stage2dev:
# This is hopefully a temporary hack. GRUB2 currently refuses
# to install to a partition's boot block without --force.
grub_args.insert(0, '--force')
else:
if flags.nombr:
grub_args.insert(0, '--grub-setup=/bin/true')
log.info("bootloader.py: mbr update by grub2 disabled")
else:
log.info("bootloader.py: mbr will be updated for grub2")
rc = iutil.execWithRedirect("grub2-install", grub_args,
root=iutil.getSysroot(),
env_prune=['MALLOC_PERTURB_'])
if rc:
raise BootLoaderError("boot loader install failed")
def write(self):
""" Write the bootloader configuration and install the bootloader. """
if self.skip_bootloader:
return
if self.update_only:
self.update()
return
try:
self.write_device_map()
self.stage2_device.format.sync(root=iutil.getTargetPhysicalRoot())
sync()
self.install()
sync()
self.stage2_device.format.sync(root=iutil.getTargetPhysicalRoot())
finally:
self.write_config()
sync()
self.stage2_device.format.sync(root=iutil.getTargetPhysicalRoot())
def check(self):
""" When installing to the mbr of a disk grub2 needs enough space
before the first partition in order to embed its core.img
Until we have a way to ask grub2 what the size is we check to make
sure it starts >= 512K, otherwise return an error.
"""
ret = True
base_gap_bytes = 32256 # 31.5KiB
advanced_gap_bytes = 524288 # 512KiB
self.errors = []
self.warnings = []
if self.stage1_device == self.stage2_device:
return ret
# These are small enough to fit
if self.stage2_device.type == "partition":
min_start = base_gap_bytes
else:
min_start = advanced_gap_bytes
if not self.stage1_disk:
return False
# If the first partition starts too low and there is no biosboot partition show an error.
error_msg = None
biosboot = False
parts = self.stage1_disk.format.partedDisk.partitions
for p in parts:
if p.getFlag(PARTITION_BIOS_GRUB):
biosboot = True
break
start = p.geometry.start * p.disk.device.sectorSize
if start < min_start:
error_msg = _("%(deviceName)s may not have enough space for grub2 to embed "
"core.img when using the %(fsType)s file system on %(deviceType)s") \
% {"deviceName": self.stage1_device.name, "fsType": self.stage2_device.format.type,
"deviceType": self.stage2_device.type}
if error_msg and not biosboot:
log.error(error_msg)
self.errors.append(error_msg)
ret = False
return ret
class EFIGRUB(GRUB2):
packages = ["grub2-efi", "efibootmgr", "shim"]
can_dual_boot = False
stage2_is_valid_stage1 = False
stage2_bootable = False
_efi_binary = "\\shim.efi"
@property
def _config_dir(self):
return "efi/EFI/%s" % (self.efi_dir,)
def __init__(self):
super(EFIGRUB, self).__init__()
self.efi_dir = 'BOOT'
def efibootmgr(self, *args, **kwargs):
if flags.imageInstall or flags.dirInstall:
log.info("Skipping efibootmgr for image/directory install.")
return ""
if "noefi" in flags.cmdline:
log.info("Skipping efibootmgr for noefi")
return ""
if kwargs.pop("capture", False):
exec_func = iutil.execWithCapture
else:
exec_func = iutil.execWithRedirect
if "root" not in kwargs:
kwargs["root"] = iutil.getSysroot()
return exec_func("efibootmgr", list(args), **kwargs)
#
# installation
#
def remove_efi_boot_target(self):
buf = self.efibootmgr(capture=True)
for line in buf.splitlines():
try:
(slot, _product) = line.split(None, 1)
except ValueError:
continue
if _product == productName.split("-")[0]:
slot_id = slot[4:8]
# slot_id is hex, we can't use .isint and use this regex:
if not re.match("^[0-9a-fA-F]+$", slot_id):
log.warning("failed to parse efi boot slot (%s)", slot)
continue
rc = self.efibootmgr("-b", slot_id, "-B")
if rc:
raise BootLoaderError("failed to remove old efi boot entry. This is most likely a kernel or firmware bug.")
@property
def efi_dir_as_efifs_dir(self):
ret = self._config_dir.replace('efi/', '')
return "\\" + ret.replace('/', '\\')
def _add_single_efi_boot_target(self, partition):
boot_disk = partition.disk
boot_part_num = str(partition.partedPartition.number)
rc = self.efibootmgr("-c", "-w", "-L", productName.split("-")[0],
"-d", boot_disk.path, "-p", boot_part_num,
"-l",
self.efi_dir_as_efifs_dir + self._efi_binary,
root=iutil.getSysroot())
if rc:
raise BootLoaderError("failed to set new efi boot target. This is most likely a kernel or firmware bug.")
def add_efi_boot_target(self):
if self.stage1_device.type == "partition":
self._add_single_efi_boot_target(self.stage1_device)
elif self.stage1_device.type == "mdarray":
for parent in self.stage1_device.parents:
self._add_single_efi_boot_target(parent)
def install(self, args=None):
if not flags.leavebootorder:
self.remove_efi_boot_target()
self.add_efi_boot_target()
def update(self):
self.install()
#
# installation
#
def write(self):
""" Write the bootloader configuration and install the bootloader. """
if self.skip_bootloader:
return
if self.update_only:
self.update()
return
try:
sync()
self.stage2_device.format.sync(root=iutil.getTargetPhysicalRoot())
self.install()
finally:
self.write_config()
def check(self):
return True
class Aarch64EFIGRUB(EFIGRUB):
_serial_consoles = ["ttyAMA", "ttyS"]
class MacEFIGRUB(EFIGRUB):
def mactel_config(self):
if os.path.exists(iutil.getSysroot() + "/usr/libexec/mactel-boot-setup"):
rc = iutil.execInSysroot("/usr/libexec/mactel-boot-setup", [])
if rc:
log.error("failed to configure Mac boot loader")
def install(self, args=None):
super(MacEFIGRUB, self).install()
self.mactel_config()
def is_valid_stage1_device(self, device, early=False):
valid = super(MacEFIGRUB, self).is_valid_stage1_device(device, early)
# Make sure we don't pick the OSX root partition
if valid and getattr(device.format, "name", "") != "Linux HFS+ ESP":
valid = False
if hasattr(device.format, "name"):
log.debug("device.format.name is '%s'", device.format.name)
log.debug("MacEFIGRUB.is_valid_stage1_device(%s) returning %s", device.name, valid)
return valid
# Inherit abstract methods from BootLoader
# pylint: disable=abstract-method
class YabootBase(BootLoader):
def write_config_password(self, config):
if self.password:
config.write("password=%s\n" % self.password)
config.write("restricted\n")
def write_config_images(self, config):
for image in self.images:
if not isinstance(image, LinuxBootLoaderImage):
# mac os images are handled specially in the header on mac
continue
args = Arguments()
if self.password:
args.add("rd.shell=0")
if image.initrd:
initrd_line = "\tinitrd=%s/%s\n" % (self.boot_prefix,
image.initrd)
else:
initrd_line = ""
root_device_spec = image.device.fstabSpec
if root_device_spec.startswith("/"):
root_line = "\troot=%s\n" % root_device_spec
else:
args.add("root=%s" % root_device_spec)
root_line = ""
args.update(self.boot_args)
log.info("bootloader.py: used boot args: %s ", args)
stanza = ("image=%(boot_prefix)s%(kernel)s\n"
"\tlabel=%(label)s\n"
"\tread-only\n"
"%(initrd_line)s"
"%(root_line)s"
"\tappend=\"%(args)s\"\n\n"
% {"kernel": image.kernel, "initrd_line": initrd_line,
"label": self.image_label(image),
"root_line": root_line, "args": args,
"boot_prefix": self.boot_prefix})
config.write(stanza)
class Yaboot(YabootBase):
name = "Yaboot"
_config_file = "yaboot.conf"
prog = "ybin"
image_label_attr = "short_label"
packages = ["yaboot"]
# stage2 device requirements
stage2_device_types = ["partition", "mdarray"]
stage2_device_raid_levels = [raid.RAID1]
#
# configuration
#
@property
def config_dir(self):
conf_dir = "/etc"
if self.stage2_device.format.mountpoint == "/boot":
conf_dir = "/boot/etc"
return conf_dir
@property
def config_file(self):
return "%s/%s" % (self.config_dir, self._config_file)
def write_config_header(self, config):
if self.stage2_device.type == "mdarray":
boot_part_num = self.stage2_device.parents[0].partedPartition.number
else:
boot_part_num = self.stage2_device.partedPartition.number
# yaboot.conf timeout is in tenths of a second. Brilliant.
header = ("# yaboot.conf generated by anaconda\n\n"
"boot=%(stage1dev)s\n"
"init-message=\"Welcome to %(product)s!\\nHit <TAB> for "
"boot options\"\n\n"
"partition=%(part_num)d\n"
"timeout=%(timeout)d\n"
"install=/usr/lib/yaboot/yaboot\n"
"delay=5\n"
"enablecdboot\n"
"enableofboot\n"
"enablenetboot\n"
% {"stage1dev": self.stage1_device.path,
"product": productName, "part_num": boot_part_num,
"timeout": self.timeout * 10})
config.write(header)
self.write_config_variant_header(config)
self.write_config_password(config)
config.write("\n")
def write_config_variant_header(self, config):
config.write("nonvram\n")
config.write("mntpoint=/boot/yaboot\n")
config.write("usemount\n")
def write_config_post(self):
super(Yaboot, self).write_config_post()
# make symlink in /etc to yaboot.conf if config is in /boot/etc
etc_yaboot_conf = iutil.getSysroot() + "/etc/yaboot.conf"
if not os.access(etc_yaboot_conf, os.R_OK):
try:
os.symlink("../boot/etc/yaboot.conf", etc_yaboot_conf)
except OSError as e:
log.error("failed to create /etc/yaboot.conf symlink: %s", e)
def write_config(self):
if not os.path.isdir(iutil.getSysroot() + self.config_dir):
os.mkdir(iutil.getSysroot() + self.config_dir)
# this writes the config
super(Yaboot, self).write_config()
#
# installation
#
def install(self, args=None):
args = ["-f", "-C", self.config_file]
rc = iutil.execInSysroot(self.prog, args)
if rc:
raise BootLoaderError("boot loader installation failed")
class IPSeriesYaboot(Yaboot):
prog = "mkofboot"
#
# configuration
#
def write_config_variant_header(self, config):
config.write("nonvram\n") # only on pSeries?
config.write("fstype=raw\n")
#
# installation
#
def install(self, args=None):
self.updatePowerPCBootList()
super(IPSeriesYaboot, self).install()
def updatePowerPCBootList(self):
if not can_touch_runtime_system("updatePowerPCBootList", touch_live=True):
return
log.debug("updatePowerPCBootList: self.stage1_device.path = %s", self.stage1_device.path)
buf = iutil.execWithCapture("nvram",
["--print-config=boot-device"])
if len(buf) == 0:
log.error("FAIL: nvram --print-config=boot-device")
return
boot_list = buf.strip().split()
log.debug("updatePowerPCBootList: boot_list = %s", boot_list)
buf = iutil.execWithCapture("ofpathname",
[self.stage1_device.path])
if len(buf) > 0:
boot_disk = buf.strip()
log.debug("updatePowerPCBootList: boot_disk = %s", boot_disk)
else:
log.error("FAIL: ofpathname %s", self.stage1_device.path)
return
# Place the disk containing the PReP partition first.
# Remove all other occurances of it.
boot_list = [boot_disk] + [x for x in boot_list if x != boot_disk]
log.debug("updatePowerPCBootList: updated boot_list = %s", boot_list)
update_value = "boot-device=%s" % " ".join(boot_list)
rc = iutil.execWithRedirect("nvram", ["--update-config", update_value])
if rc:
log.error("FAIL: nvram --update-config %s", update_value)
else:
log.info("Updated PPC boot list with the command: nvram --update-config %s", update_value)
class IPSeriesGRUB2(GRUB2):
# GRUB2 sets /boot bootable and not the PReP partition. This causes the Open Firmware BIOS not
# to present the disk as a bootable target. If stage2_bootable is False, then the PReP partition
# will be marked bootable. Confusing.
stage2_bootable = False
terminal_type = "ofconsole"
#
# installation
#
def install(self, args=None):
if flags.leavebootorder:
log.info("leavebootorder passed as an option. Will not update the NVRAM boot list.")
else:
self.updateNVRAMBootList()
super(IPSeriesGRUB2, self).install(args=["--no-nvram"])
# This will update the PowerPC's (ppc) bios boot devive order list
def updateNVRAMBootList(self):
if not can_touch_runtime_system("updateNVRAMBootList", touch_live=True):
return
log.debug("updateNVRAMBootList: self.stage1_device.path = %s", self.stage1_device.path)
buf = iutil.execWithCapture("nvram",
["--print-config=boot-device"])
if len(buf) == 0:
log.error("Failed to determine nvram boot device")
return
boot_list = buf.strip().replace("\"", "").split()
log.debug("updateNVRAMBootList: boot_list = %s", boot_list)
buf = iutil.execWithCapture("ofpathname",
[self.stage1_device.path])
if len(buf) > 0:
boot_disk = buf.strip()
else:
log.error("Failed to translate boot path into device name")
return
# Place the disk containing the PReP partition first.
# Remove all other occurances of it.
boot_list = [boot_disk] + [x for x in boot_list if x != boot_disk]
update_value = "boot-device=%s" % " ".join(boot_list)
rc = iutil.execWithRedirect("nvram", ["--update-config", update_value])
if rc:
log.error("Failed to update new boot device order")
#
# In addition to the normal grub configuration variable, add one more to set the size of the
# console's window to a standard 80x24
#
def write_defaults(self):
super(IPSeriesGRUB2, self).write_defaults()
defaults_file = "%s%s" % (iutil.getSysroot(), self.defaults_file)
defaults = open(defaults_file, "a+")
# The terminfo's X and Y size, and output location could change in the future
defaults.write("GRUB_TERMINFO=\"terminfo -g 80x24 console\"\n")
defaults.close()
class MacYaboot(Yaboot):
prog = "mkofboot"
can_dual_boot = True
#
# configuration
#
def write_config_variant_header(self, config):
try:
mac_os = [i for i in self.chain_images if i.label][0]
except IndexError:
pass
else:
config.write("macosx=%s\n" % mac_os.device.path)
config.write("magicboot=/usr/lib/yaboot/ofboot\n")
class ZIPL(BootLoader):
name = "ZIPL"
config_file = "/etc/zipl.conf"
packages = ["s390utils-base"]
# stage2 device requirements
stage2_device_types = ["partition"]
@property
def stage2_format_types(self):
if productName.startswith("Red Hat "):
return ["xfs", "ext4", "ext3", "ext2"]
else:
return ["ext4", "ext3", "ext2", "xfs"]
image_label_attr = "short_label"
preserve_args = ["cio_ignore", "rd.znet", "rd_ZNET"]
def __init__(self):
super(ZIPL, self).__init__()
self.stage1_name = None
#
# configuration
#
@property
def boot_dir(self):
return "/boot"
def write_config_images(self, config):
for image in self.images:
if "kdump" in (image.initrd or image.kernel):
# no need to create bootloader entries for kdump
continue
args = Arguments()
if image.initrd:
initrd_line = "\tramdisk=%s/%s\n" % (self.boot_dir,
image.initrd)
else:
initrd_line = ""
args.add("root=%s" % image.device.fstabSpec)
args.update(self.boot_args)
if image.device.type == "btrfs subvolume":
args.update(["rootflags=subvol=%s" % image.device.name])
log.info("bootloader.py: used boot args: %s ", args)
stanza = ("[%(label)s]\n"
"\timage=%(boot_dir)s/%(kernel)s\n"
"%(initrd_line)s"
"\tparameters=\"%(args)s\"\n"
% {"label": self.image_label(image),
"kernel": image.kernel, "initrd_line": initrd_line,
"args": args,
"boot_dir": self.boot_dir})
config.write(stanza)
def write_config_header(self, config):
header = ("[defaultboot]\n"
"defaultauto\n"
"prompt=1\n"
"timeout=%(timeout)d\n"
"default=%(default)s\n"
"target=/boot\n"
% {"timeout": self.timeout,
"default": self.image_label(self.default)})
config.write(header)
#
# installation
#
def install(self, args=None):
buf = iutil.execWithCapture("zipl", [], root=iutil.getSysroot())
for line in buf.splitlines():
if line.startswith("Preparing boot device: "):
# Output here may look like:
# Preparing boot device: dasdb (0200).
# Preparing boot device: dasdl.
# We want to extract the device name and pass that.
name = re.sub(r".+?: ", "", line)
self.stage1_name = re.sub(r"(\s\(.+\))?\.$", "", name)
# a limitation of s390x is that the kernel parameter list must not
# exceed 896 bytes; there is nothing we can do about this, so just
# catch the error and show it to the user instead of crashing
elif line.startswith("Error: The length of the parameters "):
errorHandler.cb(ZIPLError(line))
if not self.stage1_name:
raise BootLoaderError("could not find IPL device")
# do the reipl
iutil.reIPL(self.stage1_name)
class EXTLINUX(BootLoader):
name = "EXTLINUX"
_config_file = "extlinux.conf"
_config_dir = "/boot/extlinux"
# stage1 device requirements
stage1_device_types = ["disk"]
# stage2 device requirements
stage2_format_types = ["ext4", "ext3", "ext2"]
stage2_device_types = ["partition"]
stage2_bootable = True
packages = ["syslinux-extlinux"]
@property
def config_file(self):
return "%s/%s" % (self._config_dir, self._config_file)
@property
def boot_prefix(self):
""" Prefix, if any, to paths in /boot. """
if self.stage2_device.format.mountpoint == "/":
prefix = "/boot"
else:
prefix = ""
return prefix
def write_config_console(self, config):
if not self.console:
return
console_arg = "console=%s" % self.console
if self.console_options:
console_arg += ",%s" % self.console_options
self.boot_args.add(console_arg)
def write_config_images(self, config):
self.write_config_console(config)
for image in self.images:
args = Arguments()
args.update(["root=%s" % image.device.fstabSpec, "ro"])
if image.device.type == "btrfs subvolume":
args.update(["rootflags=subvol=%s" % image.device.name])
args.update(self.boot_args)
log.info("bootloader.py: used boot args: %s ", args)
stanza = ("label %(label)s (%(version)s)\n"
"\tkernel %(boot_prefix)s/%(kernel)s\n"
"\tinitrd %(boot_prefix)s/%(initrd)s\n"
"\tappend %(args)s\n\n"
% {"label": self.image_label(image),
"version": image.version,
"kernel": image.kernel,
"initrd": image.initrd,
"args": args,
"boot_prefix": self.boot_prefix})
config.write(stanza)
def write_config_header(self, config):
header = ("# extlinux.conf generated by anaconda\n\n"
"ui menu.c32\n\n"
"menu autoboot Welcome to %(productName)s. Automatic boot in # second{,s}. Press a key for options.\n"
"menu title %(productName)s Boot Options.\n"
"menu hidden\n\n"
"timeout %(timeout)d\n"
"#totaltimeout 9000\n\n"
% {"productName": productName, "timeout": self.timeout *10})
config.write(header)
if self.default is not None:
config.write("default %(default)s\n\n" % {"default" : self.image_label(self.default)})
self.write_config_password(config)
def write_config_password(self, config):
if self.password:
config.write("menu master passwd %s\n" % self.password)
config.write("menu notabmsg Press [Tab] and enter the password to edit options")
def write_config_post(self):
etc_extlinux = os.path.normpath(iutil.getSysroot() + "/etc/" + self._config_file)
if not os.access(etc_extlinux, os.R_OK):
try:
os.symlink("../boot/%s" % self._config_file, etc_extlinux)
except OSError as e:
log.warning("failed to create /etc/extlinux.conf symlink: %s", e)
def write_config(self):
super(EXTLINUX, self).write_config()
#
# installation
#
def install(self, args=None):
args = ["--install", self._config_dir]
rc = iutil.execInSysroot("extlinux", args)
if rc:
raise BootLoaderError("boot loader install failed")
# every platform that wants a bootloader needs to be in this dict
bootloader_by_platform = {platform.X86: GRUB2,
platform.EFI: EFIGRUB,
platform.MacEFI: MacEFIGRUB,
platform.PPC: GRUB2,
platform.IPSeriesPPC: IPSeriesGRUB2,
platform.NewWorldPPC: MacYaboot,
platform.S390: ZIPL,
platform.Aarch64EFI: Aarch64EFIGRUB,
platform.ARM: EXTLINUX,
platform.omapARM: EXTLINUX}
def get_bootloader():
platform_name = platform.platform.__class__.__name__
if flags.extlinux:
cls = EXTLINUX
else:
cls = bootloader_by_platform.get(platform.platform.__class__, BootLoader)
log.info("bootloader %s on %s platform", cls.__name__, platform_name)
return cls()
# anaconda-specific functions
def writeSysconfigKernel(storage, version, instClass):
# get the name of the default kernel package based on the version
kernel_basename = "vmlinuz-" + version
kernel_file = "/boot/%s" % kernel_basename
if not os.path.isfile(iutil.getSysroot() + kernel_file):
kernel_file = "/boot/efi/EFI/%s/%s" % (instClass.efi_dir, kernel_basename)
if not os.path.isfile(iutil.getSysroot() + kernel_file):
log.error("failed to recreate path to default kernel image")
return
try:
import rpm
except ImportError:
log.error("failed to import rpm python module")
return
ts = rpm.TransactionSet(iutil.getSysroot())
mi = ts.dbMatch('basenames', kernel_file)
try:
h = next(mi)
except StopIteration:
log.error("failed to get package name for default kernel")
return
kernel = h.name
f = open(iutil.getSysroot() + "/etc/sysconfig/kernel", "w+")
f.write("# UPDATEDEFAULT specifies if new-kernel-pkg should make\n"
"# new kernels the default\n")
# only update the default if we're setting the default to linux (#156678)
if storage.bootloader.default.device == storage.rootDevice:
f.write("UPDATEDEFAULT=yes\n")
else:
f.write("UPDATEDEFAULT=no\n")
f.write("\n")
f.write("# DEFAULTKERNEL specifies the default kernel package type\n")
f.write("DEFAULTKERNEL=%s\n" % kernel)
if storage.bootloader.trusted_boot:
f.write("# HYPERVISOR specifies the default multiboot kernel\n")
f.write("HYPERVISOR=/boot/tboot.gz\n")
f.write("HYPERVISOR_ARGS=logging=vga,serial,memory\n")
f.close()
def writeBootLoaderFinal(storage, payload, instClass, ksdata):
""" Do the final write of the bootloader. """
# set up dracut/fips boot args
# XXX FIXME: do this from elsewhere?
storage.bootloader.set_boot_args(storage=storage,
payload=payload)
try:
storage.bootloader.write()
except BootLoaderError as e:
log.error("bootloader.write failed: %s", e)
if errorHandler.cb(e) == ERROR_RAISE:
raise
def writeBootLoader(storage, payload, instClass, ksdata):
""" Write bootloader configuration to disk.
When we get here, the bootloader will already have a default linux
image. We only have to add images for the non-default kernels and
adjust the default to reflect whatever the default variant is.
"""
if not storage.bootloader.skip_bootloader:
stage1_device = storage.bootloader.stage1_device
log.info("boot loader stage1 target device is %s", stage1_device.name)
stage2_device = storage.bootloader.stage2_device
log.info("boot loader stage2 target device is %s", stage2_device.name)
# Bridge storage EFI configuration to bootloader
if hasattr(storage.bootloader, 'efi_dir'):
storage.bootloader.efi_dir = instClass.efi_dir
if isinstance(payload, RPMOSTreePayload):
if storage.bootloader.skip_bootloader:
log.info("skipping boot loader install per user request")
return
writeBootLoaderFinal(storage, payload, instClass, ksdata)
return
# get a list of installed kernel packages
# add whatever rescue kernels we can find to the end
kernel_versions = list(payload.kernelVersionList)
rescue_versions = glob(iutil.getSysroot() + "/boot/vmlinuz-*-rescue-*")
rescue_versions += glob(iutil.getSysroot() + "/boot/efi/EFI/%s/vmlinuz-*-rescue-*" % instClass.efi_dir)
kernel_versions += (f.split("/")[-1][8:] for f in rescue_versions)
if not kernel_versions:
log.warning("no kernel was installed -- boot loader config unchanged")
return
# all the linux images' labels are based on the default image's
base_label = productName
base_short_label = "linux"
# The first one is the default kernel. Update the bootloader's default
# entry to reflect the details of the default kernel.
version = kernel_versions.pop(0)
default_image = LinuxBootLoaderImage(device=storage.rootDevice,
version=version,
label=base_label,
short=base_short_label)
storage.bootloader.add_image(default_image)
storage.bootloader.default = default_image
# write out /etc/sysconfig/kernel
writeSysconfigKernel(storage, version, instClass)
if storage.bootloader.skip_bootloader:
log.info("skipping boot loader install per user request")
return
# now add an image for each of the other kernels
for version in kernel_versions:
label = "%s-%s" % (base_label, version)
short = "%s-%s" % (base_short_label, version)
if storage.bootloader.trusted_boot:
image = TbootLinuxBootLoaderImage(
device=storage.rootDevice,
version=version,
label=label, short=short)
else:
image = LinuxBootLoaderImage(device=storage.rootDevice,
version=version,
label=label, short=short)
storage.bootloader.add_image(image)
writeBootLoaderFinal(storage, payload, instClass, ksdata)
| gpl-2.0 |
benediktkr/lokun-record | record/sec.py | 1 | 2077 | from random import randint
def compare1toN(str1, strl):
return any([compare(str1, a) for a in strl])
def compare(str1, str2):
return compare_const2(str1, str2)
def compare_const2(str1, str2):
if len(str1) != len(str2):
return False
result = 0
for x, y in zip(str1, str2):
result |= ord(x) ^ ord(y)
return result == 0
def compare_const(str1, str2):
"""Constant-time string comparasion, to avoid timing attacks.
Leaks the lenght, but that's ok since we are always comparing
hashes, and the only information the adversary has to gain by
the length of a hash as a better guess at what hashing algorithm
is being used. At which point, i'd like to point out Shannons
Maxim."""
length = min(len(str1), len(str2))
ret = True
for i in xrange(length):
if str1[i] != str2[i]:
ret = False
if len(str1) != len(str2):
ret = False
return ret
def compare_noleak(str1, str2):
"""A non-random version that doesn't leak the length, made for Baldur :)
str1 should be the user-supplied string, and str2 the string you comare
against.
NOTE: Pads with 0x00, only inteded to compare strings, not byte-lists."""
l1 = len(str1)
l2 = len(str2)
if l1 > l2:
# If the user string is longer than the source string, pad.
delta = l1 - l2
str2 += "\x00"*delta
ret = True
for i in xrange(l1):
if str1[i] != str2[i]:
ret = False
return ret
def compare_rnd(str1, str2):
"""Constant-time string comparasion, to avoid timing attacks.
Start in a random char of the string.
Doesn't leak the length, since the starting point (and thus the
breaking point) as randomly chosen."""
length = min(len(str1), len(str2))
start = randint(0, length-1)
for i in xrange(length):
j = (start+i) % length
if str1[j] != str2[j]:
return False
if len(str1) != len(str2):
return False
return True
| agpl-3.0 |
dcrosta/mongo-disco | app/job.py | 1 | 2372 | #!/usr/bin/env python
# encoding: utf-8
'''
File: DiscoJob.py
Author: NYU ITP team
Description: Disco Job Wrapper
'''
from disco.core import Job, result_iterator
from disco.worker.classic.worker import Params
from disco.worker.classic.modutil import locate_modules,find_modules
from mongodb_io import mongodb_output_stream,mongodb_input_stream
from splitter import calculate_splits as do_split
class DiscoJob():
def __init__(self,config,map,reduce):
import config_util
self.config = config_util.config
#if the user doesn't specify output, print to stdout
if not config.get('output_uri') and not config.get('print_to_stdout'):
config['print_to_stdout'] = True
for item in config:
self.config[item] = config[item]
self.map = map
self.reduce = reduce
self.job = Job()
self.params = Params()
for key in self.config:
self.params.__dict__[key] = self.config[key]
def run(self):
if self.config['print_to_stdout']:
self.job.run(input = do_split(self.config),
map = self.map,
reduce = self.reduce,
params = self.params,
map_input_stream = mongodb_input_stream,
required_modules= ['mongodb_io',
'mongodb_input',
'config_util',
'mongo_util',
'mongodb_output'])
for key, value in result_iterator(self.job.wait(show=True)):
print key, value
else:
self.job.run(input = do_split(self.config),
map = self.map,
reduce = self.reduce,
params = self.params,
map_input_stream = mongodb_input_stream,
reduce_output_stream = mongodb_output_stream,
required_modules= ['mongodb_io',
'mongodb_input',
'config_util',
'mongo_util',
'mongodb_output'])
if self.config.get("job_wait",False):
self.job.wait(show=True)
| apache-2.0 |
classicboyir/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
davidalbertonogueira/TurboParser | semeval2015_data/scripts/augment_with_companion_data.py | 6 | 2153 | import sys
import pdb
keep_document_names = True
has_sense = True # True for SemEval 2015, False for SemEval 2014.
trim_first_line = True # True for SemEval 2015, False for SemEval 2014.
filepath = sys.argv[1]
if len(sys.argv) > 2:
filepath_companion = sys.argv[2]
else:
filepath_companion = ''
f = open(filepath)
if filepath_companion == '':
f_companion = None
else:
f_companion = open(filepath_companion)
if trim_first_line:
line = f.readline()
line = line.rstrip('\n')
print line
for line in f:
line = line.rstrip('\n')
if f_companion != None:
line_companion = f_companion.readline()
line_companion = line_companion.rstrip('\n')
if line.startswith('#') and line.split('\t')[0] != '#':
if keep_document_names: print line
elif line == '':
print line
else:
fields = line.split("\t")
word_index = fields[0]
word = fields[1]
lemma = fields[2]
pos = fields[3]
if len(fields) == 4:
top = '-'
pred = '-'
if has_sense:
sense = '-'
else:
top = fields[4]
pred = fields[5]
if has_sense:
sense = fields[6]
if has_sense:
args = fields[7:]
else:
args = fields[6:]
if f_companion != None:
fields_companion = line_companion.split("\t")
predicted_pos = fields_companion[0]
head = fields_companion[1]
deprel = fields_companion[2]
else:
predicted_pos = '_'
head = '_'
deprel = '_'
fields_output = [word_index,
word,
lemma,
pos, #predicted_pos
head,
deprel,
top,
pred]
if has_sense:
fields_output.append(sense);
fields_output.extend(args);
line_output = '\t'.join(fields_output)
print line_output
f.close()
if f_companion != None:
f_companion.close()
| lgpl-3.0 |
ropable/resource_tracking | tracking/migrations/0004_auto_20200102_0914.py | 1 | 1126 | # Generated by Django 2.1.11 on 2020-01-02 01:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0003_auto_20190308_1114'),
]
operations = [
migrations.AlterField(
model_name='device',
name='symbol',
field=models.CharField(choices=[('2 wheel drive', '2-Wheel Drive'), ('4 wheel drive passenger', '4-Wheel Drive Passenger'), ('4 wheel drive ute', '4-Wheel Drive (Ute)'), ('light unit', 'Light Unit'), ('heavy duty', 'Heavy Duty'), ('gang truck', 'Gang Truck'), ('snorkel', 'Snorkel'), ('dozer', 'Dozer'), ('grader', 'Grader'), ('loader', 'Loader'), ('tender', 'Tender'), ('float', 'Float'), ('fixed wing aircraft', 'Waterbomber'), ('rotary aircraft', 'Rotary'), ('spotter aircraft', 'Spotter'), ('helitac', 'Helitac'), ('rescue helicopter', 'Rescue Helicopter'), ('aviation fuel truck', 'Aviation Fuel Truck'), (None, ''), ('comms bus', 'Communications Bus'), ('boat', 'Boat'), ('person', 'Person'), ('other', 'Other'), ('unknown', 'Unknown')], default='other', max_length=32),
),
]
| bsd-3-clause |
40223245/123 | static/Brython3.1.1-20150328-091302/Lib/unittest/mock.py | 739 | 71473 | # mock.py
# Test tools for mocking and patching.
# Maintained by Michael Foord
# Backport for other versions of Python available from
# http://pypi.python.org/pypi/mock
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0'
import inspect
import pprint
import sys
from functools import wraps
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
FILTER_DIR = True
# Workaround for issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, type) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if isinstance(func, type) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
# instance methods and classmethods need to lose the self argument
if getattr(func, '__self__', None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
funcopy.__module__ = func.__module__
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, type):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, type):
# already an instance
return getattr(obj, '__call__', None) is not None
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, type)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not name.isidentifier():
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(unittest.mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, type):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
if delegated is None:
return self._mock_side_effect
return delegated.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
if delegated is None:
self._mock_side_effect = value
else:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
if not FILTER_DIR:
return object.__dir__(self)
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
if result is DEFAULT:
result = self.return_value
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, type):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, type):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) is str:
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, str):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, type):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
"bool next "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex',
'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir', 'subclasses', 'format',
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, str):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, str):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, str):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, str):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, type)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, type):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# it is possible for objects to have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| gpl-3.0 |
tdsimao/tt | django/contrib/gis/gdal/driver.py | 411 | 2411 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri' : 'ESRI Shapefile',
'shp' : 'ESRI Shapefile',
'shape' : 'ESRI Shapefile',
'tiger' : 'TIGER',
'tiger/line' : 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, basestring):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(name)
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count: capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
| gpl-2.0 |
40423136/2017springcd_hw | plugin/liquid_tags/spotify.py | 313 | 1304 | """
Spotify Tag
---------
This implements a Liquid-style spotify tag for Pelican,
based on the jekyll / octopress youtube tag [1]_
Syntax
------
{% spotify id %}
Example
-------
{% spotify 1HNZcRFlIKwHAJD3LxvX4d %}
Output
------
<iframe
src='https://embed.spotify.com/?uri=spotify:track:1HNZcRFlIKwHAJD3LxvX4d'
width='300' height='380' frameborder='0' allowtransparency='true'>
</iframe>
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% spotify id %}"
SPOTIFY = re.compile(r'(\w+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('spotify')
def spotify(preprocessor, tag, markup):
spotify_id = None
match = SPOTIFY.search(markup)
if match:
groups = match.groups()
spotify_id = groups[0]
if spotify_id:
spotify_out = """
<iframe src='https://embed.spotify.com/?uri=spotify:track:{}'
width='300'
height='380'
frameborder='0'
allowtransparency='true'></iframe>""".format(spotify_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return spotify_out
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register # noqa
| agpl-3.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/distutils/command/bdist.py | 228 | 5596 | """distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
__revision__ = "$Id$"
import os
from distutils.util import get_platform
from distutils.core import Command
from distutils.errors import DistutilsPlatformError, DistutilsOptionError
def show_formats():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats = []
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist(Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = {'posix': 'gztar',
'nt': 'zip',
'os2': 'zip'}
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip', 'msi']
# And the real information.
format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
'msi': ('bdist_msi', "Microsoft Installer")
}
def initialize_options(self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
self.group = None
self.owner = None
def finalize_options(self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
if self.skip_build:
self.plat_name = get_platform()
else:
self.plat_name = self.get_finalized_command('build').plat_name
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create built distributions " + \
"on platform %s" % os.name
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError, "invalid format '%s'" % format
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# passing the owner and group names for tar archiving
if cmd_name == 'bdist_dumb':
sub_cmd.owner = self.owner
sub_cmd.group = self.group
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
| bsd-3-clause |
elsigh/browserscope | third_party/uaparser/user_agent_parser.py | 3 | 12414 | #!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared models."""
import os
import re
import yaml
class UserAgentParser(object):
def __init__(self, pattern, family_replacement=None, v1_replacement=None):
"""Initialize UserAgentParser.
Args:
pattern: a regular expression string
family_replacement: a string to override the matched family (optional)
v1_replacement: a string to override the matched v1 (optional)
"""
self.pattern = pattern
self.user_agent_re = re.compile(self.pattern)
self.family_replacement = family_replacement
self.v1_replacement = v1_replacement
def MatchSpans(self, user_agent_string):
match_spans = []
match = self.user_agent_re.search(user_agent_string)
if match:
match_spans = [match.span(group_index)
for group_index in range(1, match.lastindex + 1)]
return match_spans
def Parse(self, user_agent_string):
family, v1, v2, v3 = None, None, None, None
match = self.user_agent_re.search(user_agent_string)
if match:
if self.family_replacement:
if re.search(r'\$1', self.family_replacement):
family = re.sub(r'\$1', match.group(1), self.family_replacement)
else:
family = self.family_replacement
else:
family = match.group(1)
if self.v1_replacement:
v1 = self.v1_replacement
elif match.lastindex >= 2:
v1 = match.group(2)
if match.lastindex >= 3:
v2 = match.group(3)
if match.lastindex >= 4:
v3 = match.group(4)
return family, v1, v2, v3
class OSParser(object):
def __init__(self, pattern, os_replacement=None ):
"""Initialize UserAgentParser.
Args:
pattern: a regular expression string
os_replacement: a string to override the matched os (optional)
"""
self.pattern = pattern
self.user_agent_re = re.compile(self.pattern)
self.os_replacement = os_replacement
def MatchSpans(self, user_agent_string):
match_spans = []
match = self.user_agent_re.search(user_agent_string)
if match:
match_spans = [match.span(group_index)
for group_index in range(1, match.lastindex + 1)]
return match_spans
def Parse(self, user_agent_string):
os, os_v1, os_v2, os_v3, os_v4 = None, None, None, None, None
match = self.user_agent_re.search(user_agent_string)
if match:
if self.os_replacement:
os = self.os_replacement
else:
os = match.group(1)
if match.lastindex >= 2:
os_v1 = match.group(2)
if match.lastindex >= 3:
os_v2 = match.group(3)
if match.lastindex >= 4:
os_v3 = match.group(4)
if match.lastindex >= 5:
os_v4 = match.group(5)
return os, os_v1, os_v2, os_v3, os_v4
class DeviceParser(object):
def __init__(self, pattern, device_replacement=None):
"""Initialize UserAgentParser.
Args:
pattern: a regular expression string
device_replacement: a string to override the matched device (optional)
"""
self.pattern = pattern
self.user_agent_re = re.compile(self.pattern)
self.device_replacement = device_replacement
def MatchSpans(self, user_agent_string):
match_spans = []
match = self.user_agent_re.search(user_agent_string)
if match:
match_spans = [match.span(group_index)
for group_index in range(1, match.lastindex + 1)]
return match_spans
def Parse(self, user_agent_string):
device = None
match = self.user_agent_re.search(user_agent_string)
if match:
if self.device_replacement:
if re.search(r'\$1', self.device_replacement):
device = re.sub(r'\$1', match.group(1), self.device_replacement)
else:
device = self.device_replacement
else:
device = match.group(1)
return device
def ParseAll(user_agent_string, **jsParseBits):
""" Parse all the things
Args:
user_agent_string: the full user agent string
jsParseBits: javascript override bits
Returns:
flat dictionary containing all parsed bits
"""
jsParseBits = jsParseBits or {}
resultUA = ParseUserAgent(user_agent_string, **jsParseBits)
resultOS = ParseOS(user_agent_string, **jsParseBits)
resultAll = {}
for result in (resultUA, resultOS):
resultAll.update(result)
return resultAll
def ParseUserAgent(user_agent_string, **jsParseBits):
""" Parses the user-agent string for user agent (browser) info.
Args:
user_agent_string: The full user-agent string.
jsParseBits: javascript override bits
Returns:
flat dictionary containing parsed bits
"""
if 'js_user_agent_family' in jsParseBits and jsParseBits['js_user_agent_family'] != '':
family = jsParseBits['js_user_agent_family']
if 'js_user_agent_v1' in jsParseBits:
v1 = jsParseBits['js_user_agent_v1'] or None
if 'js_user_agent_v2' in jsParseBits:
v2 = jsParseBits['js_user_agent_v2'] or None
if 'js_user_agent_v3' in jsParseBits:
v3 = jsParseBits['js_user_agent_v3'] or None
else:
for uaParser in USER_AGENT_PARSERS:
family, v1, v2, v3 = uaParser.Parse(user_agent_string)
if family:
break
# Override for Chrome Frame IFF Chrome is enabled.
if 'js_user_agent_string' in jsParseBits:
js_user_agent_string = jsParseBits['js_user_agent_string']
if (js_user_agent_string and js_user_agent_string.find('Chrome/') > -1 and
user_agent_string.find('chromeframe') > -1):
jsOverride = {}
jsOverride = ParseUserAgent(js_user_agent_string)
family = 'Chrome Frame (%s %s)' % (family, v1)
v1 = jsOverride['v1']
v2 = jsOverride['v2']
v3 = jsOverride['v3']
family = family or 'Other'
return {'family': family, 'v1': v1, 'v2': v2, 'v3': v3}
def ParseOS(user_agent_string, **jsParseBits):
""" Parses the user-agent string for operating system info
Args:
user_agent_string: The full user-agent string.
jsParseBits: javascript override bits
Returns:
flat dictionary containing parsed bits
"""
for osParser in OS_PARSERS:
os, os_v1, os_v2, os_v3, os_v4 = osParser.Parse(user_agent_string)
if os:
break
os = os or 'Other'
return { 'os': os, 'os_v1': os_v1, 'os_v2': os_v2, 'os_v3': os_v3, 'os_v4': os_v4 }
def ParseDevice(user_agent_string, **jsParseBits):
""" incomplete! """
for deviceParser in DEVICE_PARSERS:
device = deviceParser.Parse(user_agent_string)
if device:
break
device = device or 'Other'
return {'device': device}
def PrettyUserAgent(family, v1=None, v2=None, v3=None):
"""Pretty user agent string."""
if v3:
if v3[0].isdigit():
return '%s %s.%s.%s' % (family, v1, v2, v3)
else:
return '%s %s.%s%s' % (family, v1, v2, v3)
elif v2:
return '%s %s.%s' % (family, v1, v2)
elif v1:
return '%s %s' % (family, v1)
return family
def PrettyOS(os, os_v1=None, os_v2=None, os_v3=None, os_v4=None):
"""Pretty os string."""
if os_v4:
return '%s %s.%s.%s.%s' % (os, os_v1, os_v2, os_v3, os_v4)
if os_v3:
if os_v3[0].isdigit():
return '%s %s.%s.%s' % (os, os_v1, os_v2, os_v3)
else:
return '%s %s.%s%s' % (os, os_v1, os_v2, os_v3)
elif os_v2:
return '%s %s.%s' % (os, os_v1, os_v2)
elif os_v1:
return '%s %s' % (os, os_v1)
return os
def Parse(user_agent_string, js_user_agent_string=None,
js_user_agent_family=None,
js_user_agent_v1=None,
js_user_agent_v2=None,
js_user_agent_v3=None):
""" backwards compatible. use one of the other Parse methods instead! """
# Override via JS properties.
if js_user_agent_family is not None and js_user_agent_family != '':
family = js_user_agent_family
v1 = None
v2 = None
v3 = None
if js_user_agent_v1 is not None:
v1 = js_user_agent_v1
if js_user_agent_v2 is not None:
v2 = js_user_agent_v2
if js_user_agent_v3 is not None:
v3 = js_user_agent_v3
else:
for parser in USER_AGENT_PARSERS:
family, v1, v2, v3 = parser.Parse(user_agent_string)
if family:
break
# Override for Chrome Frame IFF Chrome is enabled.
if (js_user_agent_string and js_user_agent_string.find('Chrome/') > -1 and
user_agent_string.find('chromeframe') > -1):
family = 'Chrome Frame (%s %s)' % (family, v1)
cf_family, v1, v2, v3 = Parse(js_user_agent_string)
return family or 'Other', v1, v2, v3
def Pretty(family, v1=None, v2=None, v3=None):
""" backwards compatible. use PrettyUserAgent instead! """
if v3:
if v3[0].isdigit():
return '%s %s.%s.%s' % (family, v1, v2, v3)
else:
return '%s %s.%s%s' % (family, v1, v2, v3)
elif v2:
return '%s %s.%s' % (family, v1, v2)
elif v1:
return '%s %s' % (family, v1)
return family
def GetFilters(user_agent_string, js_user_agent_string=None,
js_user_agent_family=None,
js_user_agent_v1=None,
js_user_agent_v2=None,
js_user_agent_v3=None):
"""Return the optional arguments that should be saved and used to query.
js_user_agent_string is always returned if it is present. We really only need
it for Chrome Frame. However, I added it in the generally case to find other
cases when it is different. When the recording of js_user_agent_string was
added, we created new records for all new user agents.
Since we only added js_document_mode for the IE 9 preview case, it did not
cause new user agent records the way js_user_agent_string did.
js_document_mode has since been removed in favor of individual property
overrides.
Args:
user_agent_string: The full user-agent string.
js_user_agent_string: JavaScript ua string from client-side
js_user_agent_family: This is an override for the family name to deal
with the fact that IE platform preview (for instance) cannot be
distinguished by user_agent_string, but only in javascript.
js_user_agent_v1: v1 override - see above.
js_user_agent_v2: v1 override - see above.
js_user_agent_v3: v1 override - see above.
Returns:
{js_user_agent_string: '[...]', js_family_name: '[...]', etc...}
"""
filters = {}
filterdict = {
'js_user_agent_string': js_user_agent_string,
'js_user_agent_family': js_user_agent_family,
'js_user_agent_v1': js_user_agent_v1,
'js_user_agent_v2': js_user_agent_v2,
'js_user_agent_v3': js_user_agent_v3
}
for key, value in filterdict.items():
if value is not None and value != '':
filters[key] = value
return filters
# Build the list of user agent parsers from YAML
rootDir = os.path.abspath(os.path.dirname(__file__))
yamlFile = open(rootDir + '/resources/user_agent_parser.yaml')
yaml = yaml.load(yamlFile)
yamlFile.close()
USER_AGENT_PARSERS = []
for parser in yaml['user_agent_parsers']:
regex = parser['regex']
family_replacement = None
if 'family_replacement' in parser:
family_replacement = parser['family_replacement']
v1_replacement = None
if 'v1_replacement' in parser:
v1_replacement = parser['v1_replacement']
USER_AGENT_PARSERS.append(UserAgentParser(regex, family_replacement, v1_replacement))
OS_PARSERS = []
for parser in yaml['os_parsers']:
regex = parser['regex']
os_replacement = None
if 'os_replacement' in parser:
os_replacement = parser['os_replacement']
OS_PARSERS.append(OSParser(regex, os_replacement))
DEVICE_PARSERS = []
for parser in yaml['device_parsers']:
regex = parser['regex']
device_replacement = None
if 'device_replacement' in parser:
device_replacement = parser['device_replacement']
DEVICE_PARSERS.append(DeviceParser(regex, device_replacement)) | apache-2.0 |
NathanW2/QGIS | tests/src/python/test_qgsfieldformatters.py | 1 | 13493 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for field formatters.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '05/12/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsFeature, QgsProject, QgsRelation, QgsVectorLayer,
QgsValueMapFieldFormatter, QgsValueRelationFieldFormatter,
QgsRelationReferenceFieldFormatter, QgsRangeFieldFormatter, QgsSettings)
from qgis.testing import start_app, unittest
start_app()
class TestQgsValueMapFieldFormatter(unittest.TestCase):
VALUEMAP_NULL_TEXT = "{2839923C-8B7D-419E-B84B-CA2FE9B80EC7}"
def test_representValue(self):
QgsSettings().setValue("qgis/nullValue", "NULL")
layer = QgsVectorLayer("none?field=number1:integer&field=number2:double&field=text1:string&field=number3:integer&field=number4:double&field=text2:string",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayer(layer)
f = QgsFeature()
f.setAttributes([2, 2.5, 'NULL', None, None, None])
layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueMapFieldFormatter()
# Tests with different value types occurring in the value map
config = {'map': {'two': '2', 'twoandhalf': '2.5', 'NULL text': 'NULL',
'nothing': self.VALUEMAP_NULL_TEXT}}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), 'two')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), 'twoandhalf')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), 'NULL text')
# Tests with null values of different types, if value map contains null
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), 'nothing')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), 'nothing')
# Tests with fallback display for different value types
config = {}
self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), '(2)')
self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), '(2.50000)')
self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), '(NULL)')
# Tests with fallback display for null in different types of fields
self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), '(NULL)')
self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), '(NULL)')
QgsProject.instance().removeAllMapLayers()
class TestQgsValueRelationFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayer(second_layer)
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
fieldFormatter = QgsValueRelationFieldFormatter()
# Everything valid
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Missing Layer
config = {'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Layer
config = {'Layer': 'invalid', 'Key': 'pkid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Key
config = {'Layer': second_layer.id(), 'Key': 'invalid', 'Value': 'decoded'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
# Invalid Value
config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'invalid'}
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)')
QgsProject.instance().removeMapLayer(second_layer.id())
def test_valueToStringList(self):
def _test(a, b):
self.assertEqual(QgsValueRelationFieldFormatter.valueToStringList(a), b)
_test([1, 2, 3], ["1", "2", "3"])
_test("{1,2,3}", ["1", "2", "3"])
_test(['1', '2', '3'], ["1", "2", "3"])
_test('not an array', ['not an array'])
class TestQgsRelationReferenceFieldFormatter(unittest.TestCase):
def test_representValue(self):
first_layer = QgsVectorLayer("none?field=foreign_key:integer",
"first_layer", "memory")
self.assertTrue(first_layer.isValid())
second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string",
"second_layer", "memory")
self.assertTrue(second_layer.isValid())
QgsProject.instance().addMapLayers([first_layer, second_layer])
f = QgsFeature()
f.setAttributes([123])
first_layer.dataProvider().addFeatures([f])
f = QgsFeature()
f.setAttributes([123, 'decoded_val'])
second_layer.dataProvider().addFeatures([f])
relMgr = QgsProject.instance().relationManager()
fieldFormatter = QgsRelationReferenceFieldFormatter()
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(first_layer.id())
rel.setReferencedLayer(second_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertTrue(rel.isValid())
relMgr.addRelation(rel)
# Everything valid
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val')
# Code not find match in foreign layer
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '456')
# Invalid relation id
config = {'Relation': 'invalid'}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# No display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression(None)
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Invalid display expression
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('invalid +')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Missing relation
config = {}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
# Inconsistent layer provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(second_layer, 0, config, None, '123'), '123')
# Inconsistent idx provided to representValue()
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 1, config, None, '123'), '123')
# Invalid relation
rel = QgsRelation()
rel.setId('rel2')
rel.setName('Relation Number Two')
rel.setReferencingLayer(first_layer.id())
rel.addFieldPair('foreign_key', 'pkid')
self.assertFalse(rel.isValid())
relMgr.addRelation(rel)
config = {'Relation': rel.id()}
second_layer.setDisplayExpression('decoded')
self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123')
QgsProject.instance().removeAllMapLayers()
class TestQgsRangeFieldFormatter(unittest.TestCase):
def test_representValue(self):
layer = QgsVectorLayer("point?field=int:integer&field=double:double",
"layer", "memory")
self.assertTrue(layer.isValid())
QgsProject.instance().addMapLayers([layer])
fieldFormatter = QgsRangeFieldFormatter()
# Precision is ignored for integers
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123'), '123')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123000'), '123000')
self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, '123'), '123.0')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0.000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123.00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0.12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0.13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0.127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0.127')
QgsSettings().setValue("locale/overrideFlag", True)
QgsSettings().setValue("locale/userLocale", 'it')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0,000')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123,00')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0,12')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0,13')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0,127')
self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0,127')
QgsProject.instance().removeAllMapLayers()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
flacjacket/sympy | sympy/core/tests/test_expr.py | 1 | 48018 | from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda)
from sympy.core.function import AppliedUndef
from sympy.abc import a, b, c, d, e, n, t, u, x, y, z
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.utilities.pytest import raises, XFAIL
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x,y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x,y)
return True
def test_basic():
def j(a,b):
x = a
x = +a
x = -a
x = a+b
x = a-b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a,b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
assert (pi < 3) == False
assert (pi <= 3) == False
assert (pi > 3) == True
assert (pi >= 3) == True
assert (-pi < 3) == True
assert (-pi <= 3) == True
assert (-pi > 3) == False
assert (-pi >= 3) == False
assert (x - 2 < x - 3) == False
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
if hasattr(int, '__index__'): # Python 2.5+ (PEP 357)
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_leadterm():
assert (3+2*x**(log(3)/log(2)-1)).leadterm(x) == (3,0)
assert (1/x**2+1+x+x**2).leadterm(x)[1] == -2
assert (1/x+1+x+x**2).leadterm(x)[1] == -1
assert (x**2+1/x).leadterm(x)[1] == -1
assert (1+x**2).leadterm(x)[1] == 0
assert (x+1).leadterm(x)[1] == 0
assert (x+x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3+2*x**(log(3)/log(2)-1)).as_leading_term(x) == 3
assert (1/x**2+1+x+x**2).as_leading_term(x) == 1/x**2
assert (1/x+1+x+x**2).as_leading_term(x) == 1/x
assert (x**2+1/x).as_leading_term(x) == 1/x
assert (1+x**2).as_leading_term(x) == 1
assert (x+1).as_leading_term(x) == 1
assert (x+x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y+z+x).leadterm(x) == (y+z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2+pi+x).as_leading_term(x) == 2 + pi
assert (2*x+pi*x+x**2).as_leading_term(x) == (2+pi)*x
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_atoms():
assert sorted(list(x.atoms())) == [x]
assert sorted(list((1+x).atoms())) == sorted([1, x])
assert sorted(list((1+2*cos(x)).atoms(Symbol))) == [x]
assert sorted(list((1+2*cos(x)).atoms(Symbol,Number))) == sorted([1, 2, x])
assert sorted(list((2*(x**(y**x))).atoms())) == sorted([2, x, y])
assert sorted(list(Rational(1,2).atoms())) == [S.Half]
assert sorted(list(Rational(1,2).atoms(Symbol))) == []
assert sorted(list(sin(oo).atoms(oo))) == [oo]
assert sorted(list(Poly(0, x).atoms())) == [S.Zero]
assert sorted(list(Poly(1, x).atoms())) == [S.One]
assert sorted(list(Poly(x, x).atoms())) == [x]
assert sorted(list(Poly(x, x, y).atoms())) == [x]
assert sorted(list(Poly(x + y, x, y).atoms())) == sorted([x, y])
assert sorted(list(Poly(x + y, x, y, z).atoms())) == sorted([x, y])
assert sorted(list(Poly(x + y*t, x, y, z).atoms())) == sorted([t, x, y])
assert list((I*pi).atoms(NumberSymbol)) == [pi]
assert sorted((I*pi).atoms(NumberSymbol, I)) == \
sorted((I*pi).atoms(I,NumberSymbol)) == [pi, I]
assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)])
assert (1 + x*(2 + y)+exp(3 + z)).atoms(Add) == set(
[1 + x*(2 + y)+exp(3 + z),
2 + y,
3 + z])
# issue 3033
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
set([f(x)])
assert e.atoms(AppliedUndef, Function) == \
set([f(x), sin(x)])
assert e.atoms(Function) == \
set([f(x), sin(x)])
assert e.atoms(AppliedUndef, Number) == \
set([f(x), S(2)])
assert e.atoms(Function, Number) == \
set([S(2), sin(x), f(x)])
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) == True
assert (S.Pi).is_polynomial(x, y, z) == True
assert x.is_polynomial(x) == True
assert x.is_polynomial(y) == True
assert (x**2).is_polynomial(x) == True
assert (x**2).is_polynomial(y) == True
assert (x**(-2)).is_polynomial(x) == False
assert (x**(-2)).is_polynomial(y) == True
assert (2**x).is_polynomial(x) == False
assert (2**x).is_polynomial(y) == True
assert (x**k).is_polynomial(x) == False
assert (x**k).is_polynomial(k) == False
assert (x**x).is_polynomial(x) == False
assert (k**k).is_polynomial(k) == False
assert (k**x).is_polynomial(k) == False
assert (x**(-k)).is_polynomial(x) == False
assert ((2*x)**k).is_polynomial(x) == False
assert (x**2 + 3*x - 8).is_polynomial(x) == True
assert (x**2 + 3*x - 8).is_polynomial(y) == True
assert (x**2 + 3*x - 8).is_polynomial() == True
assert sqrt(x).is_polynomial(x) == False
assert (sqrt(x)**3).is_polynomial(x) == False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) == True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) == False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() == True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() == False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) == True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) == False
def test_is_rational_function():
assert Integer(1).is_rational_function() == True
assert Integer(1).is_rational_function(x) == True
assert Rational(17,54).is_rational_function() == True
assert Rational(17,54).is_rational_function(x) == True
assert (12/x).is_rational_function() == True
assert (12/x).is_rational_function(x) == True
assert (x/y).is_rational_function() == True
assert (x/y).is_rational_function(x) == True
assert (x/y).is_rational_function(x, y) == True
assert (x**2+1/x/y).is_rational_function() == True
assert (x**2+1/x/y).is_rational_function(x) == True
assert (x**2+1/x/y).is_rational_function(x, y) == True
assert (sin(y)/x).is_rational_function() == False
assert (sin(y)/x).is_rational_function(y) == False
assert (sin(y)/x).is_rational_function(x) == True
assert (sin(y)/x).is_rational_function(x, y) == False
def test_SAGE1():
#see http://code.google.com/p/sympy/issues/detail?id=247
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x+y+z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) == False
assert isinstance(a.doit(integrals=True), Integral) == False
assert isinstance(a.doit(integrals=False), Integral) == True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x+y).args in ((x, y), (y, x))
assert (x*y+1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x,y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_iter_basic_args():
assert list(sin(x*y).iter_basic_args()) == [x*y]
assert list((x**y).iter_basic_args()) == [x, y]
def test_noncommutative_expand_issue658():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A+B)*B).expand() == A**2*B + A*B**2
assert (A*(A+B+C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert Rational(1, 2).as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2+1)/y).as_numer_denom() == (x**2+1, y)
assert (x*(y+1)/y**7).as_numer_denom() == (x*(y+1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_as_independent():
assert (2*x*sin(x)+y+x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x)+y+x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x)+y+x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 1804 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3+x).as_independent(x, as_Add=True) == (3, x)
assert (3+x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 2380
assert (3*x).as_independent(Symbol) == (3, x)
# issue 2549
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) == (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1), DiracDelta(y - n1)*DiracDelta(x - n2))
# issue 2685
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
def test_call():
# See the long history of this in issues 1927 and 2006.
# No effect as there are no callables
assert sin(x)(1) == sin(x)
assert (1+sin(x))(1) == 1+sin(x)
# Effect in the pressence of callables
l = Lambda(x, 2*x)
assert (l+x)(y) == 2*y+x
assert (x**l)(2) == x**4
# TODO UndefinedFunction does not subclass Expr
#f = Function('f')
#assert (2*f)(x) == 2*f(x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
g = 2*sin(x**3)
assert g.replace(lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y) == sin(x)
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)])
assert expr.find(lambda u: u.is_Symbol) == set([x, y])
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == set([S(2), S(3)])
assert expr.find(Symbol) == set([x, y])
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))])
assert expr.find(lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == set([sin(x), sin(sin(x))])
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == set([sin(x), sin(sin(x))])
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
def test_nonzero():
assert bool(S.Zero) == False
assert bool(S.One) == True
assert bool(x) == True
assert bool(x+y) == True
assert bool(x-x) == False
assert bool(x*y) == True
assert bool(x*1) == True
assert bool(x*0) == False
def test_is_number():
assert Float(3.14).is_number == True
assert Integer(737).is_number == True
assert Rational(3, 2).is_number == True
assert Rational(8).is_number == True
assert x.is_number == False
assert (2*x).is_number == False
assert (x + y).is_number == False
assert log(2).is_number == True
assert log(x).is_number == False
assert (2 + log(2)).is_number == True
assert (8+log(2)).is_number == True
assert (2 + log(x)).is_number == False
assert (8+log(2)+x).is_number == False
assert (1+x**2/x-x).is_number == True
assert Tuple(Integer(1)).is_number == False
assert Add(2, x).is_number == False
assert Mul(3, 4).is_number == True
assert Pow(log(2), 2).is_number == True
assert oo.is_number == True
g = WildFunction('g')
assert g.is_number == False
assert (2*g).is_number == False
assert (x**2).subs(x, 3).is_number == True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number == False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x .as_coeff_add() == ( 0, (x,))
assert (-1+x).as_coeff_add() == (-1, (x,))
assert ( 2+x).as_coeff_add() == ( 2, (x,))
assert ( 1+x).as_coeff_add() == ( 1, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert x .as_coeff_mul() == ( 1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3+pi*x**3).as_coeff_exponent(x) == (2+pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2+pi), 0)
# 1685
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx ,0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) == None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) == None
assert (2*x).extract_multiplicatively(-1) == None
assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) == None
assert (sqrt(x)).extract_multiplicatively(1/x) == None
assert ((x*y)**3).extract_additively(1) == None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) == None
assert (x + 1).extract_additively(-x) == None
assert (-x + 1).extract_additively(2*x) == None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) == None
assert (2*x + 3).extract_additively(3*x) == None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) == S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() == True
assert (-n*x+x).could_extract_minus_sign() != (n*x-x).could_extract_minus_sign()
assert (x-y).could_extract_minus_sign() != (-x+y).could_extract_minus_sign()
assert (1-x-y).could_extract_minus_sign() == True
assert (1-x+y).could_extract_minus_sign() == False
assert ((-x-x*y)/y).could_extract_minus_sign() == True
assert (-(x+x*y)/y).could_extract_minus_sign() == True
assert ((x+x*y)/(-y)).could_extract_minus_sign() == True
assert ((x+x*y)/y).could_extract_minus_sign() == False
assert (x*(-x-x**3)).could_extract_minus_sign() == True # used to give inf recurs
assert ((-x-y)/(x+y)).could_extract_minus_sign() == True # is_Mul odd case
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x-y)/(x-y)).could_extract_minus_sign() == False or\
((-x-y)/(y-x)).could_extract_minus_sign() == False # is_Mul even case
assert ( x - y).could_extract_minus_sign() == False
assert (-x + y).could_extract_minus_sign() == True
def test_coeff():
assert (x+1).coeff(x+1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1+x)*x**2).coeff(1+x) == z*x**2
assert (1+2*x*x**(1+x)).coeff(x*x**(1+x)) == 2
assert (1+2*x**(y+z)).coeff(x**(y+z)) == 2
assert (3+2*x+4*x**2).coeff(1) == 0
assert (3+2*x+4*x**2).coeff(-1) == 0
assert (3+2*x+4*x**2).coeff(x) == 2
assert (3+2*x+4*x**2).coeff(x**2) == 4
assert (3+2*x+4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y
assert (-x/8 + x*y).coeff(-x) == S(1)/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1+n2)*n2).coeff(n1+n2, right=1) == n2
assert (2*(n1+n2)*n2).coeff(n1+n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x+y)**2
expr2 = z*(x+y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x+y)**2
assert expr.coeff(x+y) == 0
assert expr2.coeff(z) == (x+y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x+y)**2
expr2 = z*(x+y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x+y)**2
assert expr2.coeff(z) == (x+y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S(1)/2
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x+y+z).as_base_exp() == (x+y+z, S.One)
assert ((x+y)**z).as_base_exp() == (x+y, z)
def test_issue1864():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5)+1))) == (1/(exp(3*pi*x/5)+1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep = True)
assert radsimp(1/(2+sqrt(2))) == (1/(2+sqrt(2))).radsimp()
assert powsimp(x**y*x**z*y**z, combine='all') == (x**y*x**z*y**z).powsimp(combine='all')
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
# Not tested because it's deprecated
#assert separate((x*(y*z)**3)**2) == ((x*(y*z)**3)**2).separate()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == (a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y+2)/(y+1), y) == (y/(y+2)/(y+1)).apart(y)
assert combsimp(y/(x+2)/(x+1)) == (y/(x+2)/(x+1)).combsimp()
assert factor(x**2+5*x+6) == (x**2+5*x+6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2+5*x+6)/(x+2)) == ((x**2+5*x+6)/(x+2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, **dict(evaluate=False)).as_powers_dict() == {S(2): S(2)}
def test_as_coefficients_dict():
check = [S(1), x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 0]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x+A).args_cnc() == \
[[], [x + A]]
assert (x+a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A+1)).args_cnc(cset=True) == \
[set([x, y]), [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x]), []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x, x**2]), []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_2127():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x+y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S(1).free_symbols == set()
assert (x).free_symbols == set([x])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert (-Integral(x, (x, 1, y))).free_symbols == set([y])
assert meter.free_symbols == set()
assert (meter**x).free_symbols == set([x])
def test_issue2201():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_issue_2061():
assert sqrt(-1.0*x) == 1.0*sqrt(-x)
assert sqrt(1.0*x) == 1.0*sqrt(x)
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n, sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [set([1]), set([1, 2])]
assert sorted(exprs, key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() == [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() == [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [ 2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [ 2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [ 4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [ 4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_issue_1100():
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S(1)/2, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S(1)/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S(1)/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_2744():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() == True
Sum(x, (x, 1, n)).is_constant() == False
Sum(x, (x, 1, n)).is_constant(y) == True
Sum(x, (x, 1, n)).is_constant(n) == False
Sum(x, (x, 1, n)).is_constant(x) == True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() == True
assert eq.subs({x:pi, a:2}) == eq.subs({x:pi, a:3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) == False
assert checksol(x, x, Sum(x, (x, 1, n))) == False
f = Function('f')
assert checksol(x, x, f(x)) == False
p = symbols('p', positive=True)
assert Pow(x, S(0), evaluate=False).is_constant() == True # == 1
assert Pow(S(0), x, evaluate=False).is_constant() == False # == 0 or 1
assert Pow(S(0), p, evaluate=False).is_constant() == True # == 1
assert (2**x).is_constant() == False
assert Pow(S(2), S(3), evaluate=False).is_constant() == True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
# from integrate(x*sqrt(1+2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
def test_random():
from sympy import posify
assert posify(x)[0]._random() is not None
def test_round():
from sympy.abc import x
assert Float('0.1249999').round(2) == 0.12
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Float and ans == d20
ans = S(d20).round(-2)
assert ans.is_Float and ans == 12345678901234567900
assert S('1/7').round(4) == 0.1429
assert S('.[12345]').round(4) == 0.1235
assert S('.1349').round(2) == 0.13
n = S(12345)
ans = n.round()
assert ans.is_Float
assert ans == n
ans = n.round(1)
assert ans.is_Float
assert ans == n
ans = n.round(4)
assert ans.is_Float
assert ans == n
assert n.round(-1) == 12350
r = n.round(-4)
assert r == 10000
# in fact, it should equal many values since __eq__
# compares at equal precision
assert all(r == i for i in range(9984, 10049))
assert n.round(-5) == 0
assert (pi + sqrt(2)).round(2) == 4.56
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert S(2.3).round(1) == 2.3
e = S(12.345).round(2)
assert e == round(12.345, 2)
assert type(e) is Float
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283
assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928
assert (pi + 2*E*I).round() == 3 + 5*I
assert S.Zero.round() == 0
a = (Add(1, Float('1.'+'9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000','')
assert a.round(25) == Float('3.0000000000000000000000000','')
assert a.round(26) == Float('3.00000000000000000000000000','')
assert a.round(27) == Float('2.999999999999999999999999999','')
assert a.round(30) == Float('2.999999999999999999999999999','')
raises(TypeError, lambda: x.round())
# exact magnitude of 10
assert str(S(1).round()) == '1.'
assert str(S(100).round()) == '100.'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72)
assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3)
# issue 3815
assert (I**(I+3)).round(3) == Float('-0.208','')*I
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
| bsd-3-clause |
praekelt/vumi-go | go/apps/tests/view_helpers.py | 1 | 2691 | from django.core.urlresolvers import reverse
from zope.interface import implements
from vumi.tests.helpers import generate_proxies, IHelper
from go.base import utils as base_utils
from go.base.tests.helpers import DjangoVumiApiHelper
from go.vumitools.tests.helpers import GoMessageHelper
from .helpers import ApplicationHelper
class AppViewsHelper(object):
implements(IHelper)
def __init__(self, conversation_type):
self.conversation_type = conversation_type
self.vumi_helper = DjangoVumiApiHelper()
self._app_helper = ApplicationHelper(
conversation_type, self.vumi_helper)
# Proxy methods from our helpers.
generate_proxies(self, self._app_helper)
generate_proxies(self, self.vumi_helper)
def setup(self):
# Create the things we need to create
self.vumi_helper.setup()
self.vumi_helper.make_django_user()
def cleanup(self):
return self.vumi_helper.cleanup()
def get_new_view_url(self):
return reverse('conversations:new_conversation')
def get_conversation_helper(self, conversation):
return ConversationViewHelper(self, conversation.key)
def create_conversation_helper(self, *args, **kw):
conversation = self.create_conversation(*args, **kw)
return self.get_conversation_helper(conversation)
def get_api_commands_sent(self):
return base_utils.connection.get_commands()
class ConversationViewHelper(object):
def __init__(self, app_views_helper, conversation_key):
self.conversation_key = conversation_key
self.conversation_type = app_views_helper.conversation_type
self.app_helper = app_views_helper
def get_view_url(self, view):
view_def = base_utils.get_conversation_view_definition(
self.conversation_type)
return view_def.get_view_url(
view, conversation_key=self.conversation_key)
def get_action_view_url(self, action_name):
return reverse('conversations:conversation_action', kwargs={
'conversation_key': self.conversation_key,
'action_name': action_name,
})
def get_conversation(self):
return self.app_helper.get_conversation(self.conversation_key)
def add_stored_inbound(self, count, **kw):
msg_helper = GoMessageHelper(vumi_helper=self.app_helper)
conv = self.get_conversation()
return msg_helper.add_inbound_to_conv(conv, count, **kw)
def add_stored_replies(self, msgs):
msg_helper = GoMessageHelper(vumi_helper=self.app_helper)
conv = self.get_conversation()
return msg_helper.add_replies_to_conv(conv, msgs)
| bsd-3-clause |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/grizzled/grizzled/test/net/ftp/TestFTPListParse.py | 19 | 5678 | #!/usr/bin/python2.4
# $Id: 30ba7ab5303adb95edaa7bc695c6afaa26fda210 $
"""
Tester.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import time
import google3
from grizzled.net.ftp.parse import *
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
current_year = time.localtime().tm_year
TEST_DATA = [
# ELPF
{'line': '+i9872342.32142,m1229473595,/,\tpub',
'type': 'ELPF',
'size': 0,
'time': (2008, 12, 16, 19, 26, 35, 0, 0, -1),
'name': 'pub',
'try_cwd': True},
{'line': '+i9872342.32142,m1229473595,r,s10376,\tREADME.txt',
'type': 'ELPF',
'size': 10376,
'time': (2008, 12, 16, 19, 26, 35, 0, 0, -1),
'name': 'README.txt',
'try_cwd': False},
# Unix
{'line': '-rw-r--r-- 1 root other 531 Jan 29 03:26 README',
'type': 'Unix',
'size': 531,
'time': (current_year, 1, 29, 03, 26, 0, 0, 0, -1),
'name': 'README',
'try_cwd': False},
{'line': 'dr-xr-xr-x 2 root other 512 Apr 8 2003 etc',
'type': 'Unix',
'size': 512,
'time': (2003, 4, 8, 0, 0, 0, 0, 0, -1),
'name': 'etc',
'try_cwd': True},
{'line': '-rw-r--r-- 1 1356107 15000 4356349 Nov 23 11:34 09 Ribbons Undone.wma',
'type': 'Unix',
'size': 4356349,
'time': (current_year, 11, 23, 11, 34, 0, 0, 0, -1),
'name': '09 Ribbons Undone.wma',
'try_cwd': False},
# Microsoft Windows
{'line': '---------- 1 owner group 1803128 Jul 10 10:18 ls-lR.Z',
'type': 'Windows',
'size': 1803128,
'time': (current_year, 7, 10, 10, 18, 0, 0, 0, -1),
'name': 'ls-lR.Z',
'try_cwd': False},
{'line': 'd--------- 1 owner group 0 May 9 19:45 foo bar',
'type': 'Windows',
'size': 0,
'time': (current_year, 5, 9, 19, 45, 0, 0, 0, -1),
'name': 'foo bar',
'try_cwd': True},
# NetWare
{'line': 'd [R----F--] supervisor 512 Jan 16 18:53 login',
'type': 'NetWare',
'size': 512,
'time': (current_year, 1, 16, 18, 53, 0, 0, 0, -1),
'name': 'login',
'try_cwd': True},
# NetPresenz
{'line': 'drwxrwxr-x folder 2 May 10 1996 bar.sit',
'type': 'NetPresenz/Mac',
'size': 2,
'time': (1996, 5, 10, 0, 0, 0, 0, 0, -1),
'name': 'bar.sit',
'try_cwd': True},
# MultiNet/VMS (no size with these)
{'line': 'CORE.DIR;1 1 8-NOV-1999 07:02 [SYSTEM] (RWED,RWED,RE,RE)',
'type': 'MultiNet/VMS',
'size': 0,
'time': (1999, 11, 8, 7, 2, 0, 0, 0, -1),
'name': 'CORE',
'try_cwd': True},
{'line': '00README.TXT;1 2 30-DEC-1976 17:44 [SYSTEM] (RWED,RWED,RE,RE)',
'type': 'MultiNet/VMS',
'size': 0,
'time': (1976, 12, 30, 17, 44, 0, 0, 0, -1),
'name': '00README.TXT',
'try_cwd': False},
{'line': 'CII-MANUAL.TEX;1 213/216 29-JAN-1996 03:33:12 [ANONYMOU,ANONYMOUS] (RWED,RWED,,)',
'type': 'MultiNet/VMS',
'size': 0,
# Doesn't parse the seconds
'time': (1996, 1, 29, 03, 33, 0, 0, 0, -1),
'name': 'CII-MANUAL.TEX',
'try_cwd': False},
# MS-DOS
{'line': '04-27-00 09:09PM <DIR> licensed',
'type': 'MS-DOS',
'size': 0,
'time': (2000, 4, 27, 21, 9, 0, 0, 0, -1),
'name': 'licensed',
'try_cwd': True},
{'line': '11-18-03 10:16AM <DIR> pub',
'type': 'MS-DOS',
'size': 0,
'time': (2003, 11, 18, 10, 16, 0, 0, 0, -1),
'name': 'pub',
'try_cwd': True},
{'line': '04-14-99 03:47PM 589 readme.htm',
'type': 'MS-DOS',
'size': 589,
'time': (1999, 04, 14, 15, 47, 0, 0, 0, -1),
'name': 'readme.htm',
'try_cwd': False},
]
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class TestFTPListParse(object):
def setUp(self):
pass
def assertEquals(self, test_value, expected_value, prefix=None):
error_message = '%s: ' % prefix if prefix else ''
error_message += 'Expected %s, got %s' % (expected_value, test_value)
assert test_value == expected_value, error_message
def test_parsing(self):
parser = FTPListDataParser()
i = 0
for t in TEST_DATA:
yield self.parse_one_line, parser, t, i
i += 1
def parse_one_line(self, parser, test_data, identifier):
line = test_data['line']
prefix = 'Test %d (%s)' % (identifier, test_data['type'])
name = test_data['name']
print '%s: "%s"' % (prefix, name)
result = parser.parse_line(line)
self.assertEquals(result.raw_line, line, prefix)
self.assertEquals(result.size, test_data['size'], prefix)
self.assertEquals(result.name, name, prefix)
self.assertEquals(result.try_cwd, test_data['try_cwd'], prefix)
expected_time = time.mktime(test_data['time'])
self.assertEquals(time.localtime(result.mtime),
time.localtime(expected_time),
prefix)
self.assertEquals(result.mtime, expected_time, prefix)
| mit |
popazerty/enigma2-obh | lib/python/Components/ServiceEventTracker.py | 124 | 4175 | InfoBarCount = 0
class InfoBarBase:
onInfoBarOpened = [ ]
onInfoBarClosed = [ ]
@staticmethod
def connectInfoBarOpened(fnc):
if not fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.append(fnc)
@staticmethod
def disconnectInfoBarOpened(fnc):
if fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.remove(fnc)
@staticmethod
def infoBarOpened(infobar):
for x in InfoBarBase.onInfoBarOpened:
x(infobar)
@staticmethod
def connectInfoBarClosed(fnc):
if not fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.append(fnc)
@staticmethod
def disconnectInfoBarClosed(fnc):
if fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.remove(fnc)
@staticmethod
def infoBarClosed(infobar):
for x in InfoBarBase.onInfoBarClosed:
x(infobar)
def __init__(self, steal_current_service = False):
if steal_current_service:
ServiceEventTracker.setActiveInfoBar(self, None, None)
else:
nav = self.session.nav
ServiceEventTracker.setActiveInfoBar(self, not steal_current_service and nav.getCurrentService(), nav.getCurrentlyPlayingServiceOrGroup())
self.onClose.append(self.__close)
InfoBarBase.infoBarOpened(self)
global InfoBarCount
InfoBarCount += 1
def __close(self):
ServiceEventTracker.popActiveInfoBar()
InfoBarBase.infoBarClosed(self)
global InfoBarCount
InfoBarCount -= 1
class ServiceEventTracker:
"""Tracks service events into a screen"""
InfoBarStack = [ ]
InfoBarStackSize = 0
oldServiceStr = None
EventMap = { }
navcore = None
@staticmethod
def event(evt):
set = ServiceEventTracker
func_list = set.EventMap.setdefault(evt, [])
if func_list:
nav = set.navcore
cur_ref = nav.getCurrentlyPlayingServiceOrGroup()
old_service_running = set.oldRef and cur_ref and cur_ref == set.oldRef and set.oldServiceStr == nav.getCurrentService().getPtrString()
if not old_service_running and set.oldServiceStr:
set.oldServiceStr = None
set.oldRef = None
ssize = set.InfoBarStackSize
stack = set.InfoBarStack
for func in func_list:
if (func[0] or # let pass all events to screens not derived from InfoBarBase
(not old_service_running and stack[ssize-1] == func[1]) or # let pass events from currently running service just to current active screen (derived from InfoBarBase)
(old_service_running and ssize > 1 and stack[ssize-2] == func[1])): # let pass events from old running service just to previous active screen (derived from InfoBarBase)
func[2]()
@staticmethod
def setActiveInfoBar(infobar, old_service, old_ref):
set = ServiceEventTracker
set.oldRef = old_ref
set.oldServiceStr = old_service and old_service.getPtrString()
assert infobar not in set.InfoBarStack, "FATAL: Infobar '" + str(infobar) + "' is already active!"
set.InfoBarStack.append(infobar)
set.InfoBarStackSize += 1
# print "ServiceEventTracker set active '" + str(infobar) + "'"
@staticmethod
def popActiveInfoBar():
set = ServiceEventTracker
stack = set.InfoBarStack
if set.InfoBarStackSize:
nav = set.navcore
set.InfoBarStackSize -= 1
del stack[set.InfoBarStackSize]
old_service = nav.getCurrentService()
set.oldServiceStr = old_service and old_service.getPtrString()
set.oldRef = nav.getCurrentlyPlayingServiceOrGroup()
# if set.InfoBarStackSize:
# print "ServiceEventTracker reset active '" + str(stack[set.InfoBarStackSize-1]) + "'"
def __init__(self, screen, eventmap):
self.__screen = screen
self.__eventmap = eventmap
self.__passall = not isinstance(screen, InfoBarBase) # let pass all events to screens not derived from InfoBarBase
EventMap = ServiceEventTracker.EventMap
if not len(EventMap):
screen.session.nav.event.append(ServiceEventTracker.event)
ServiceEventTracker.navcore = screen.session.nav
EventMap = EventMap.setdefault
for x in eventmap.iteritems():
EventMap(x[0], []).append((self.__passall, screen, x[1]))
screen.onClose.append(self.__del_event)
def __del_event(self):
EventMap = ServiceEventTracker.EventMap.setdefault
for x in self.__eventmap.iteritems():
EventMap(x[0], []).remove((self.__passall, self.__screen, x[1]))
| gpl-2.0 |
michaelaye/vispy | vispy/visuals/text/_sdf.py | 13 | 10545 | # -*- coding: utf-8 -*-
"""
Jump flooding algoritm for EDT using GLSL code:
Author: Stefan Gustavson (stefan.gustavson@gmail.com)
2010-08-24. This code is in the public domain.
Adapted to `vispy` by Eric Larson <larson.eric.d@gmail.com>.
"""
import numpy as np
from os import path as op
from ...gloo import (Program, FrameBuffer, VertexBuffer, Texture2D,
set_viewport, set_state)
this_dir = op.dirname(__file__)
vert_seed = """
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_uv;
void main( void )
{
v_uv = a_texcoord.xy;
gl_Position = vec4(a_position.xy, 0., 1.);
}
"""
vert = """
uniform float u_texw;
uniform float u_texh;
uniform float u_step;
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying float v_stepu;
varying float v_stepv;
varying vec2 v_uv;
void main( void )
{
v_uv = a_texcoord.xy;
v_stepu = u_step / u_texw; // Saves a division in the fragment shader
v_stepv = u_step / u_texh;
gl_Position = vec4(a_position.xy, 0., 1.);
}
"""
frag_seed = """
uniform sampler2D u_texture;
varying vec2 v_uv;
void main( void )
{
float pixel = texture2D(u_texture, v_uv).r;
vec4 myzero = vec4(128. / 255., 128. / 255., 0., 0.); // Zero
vec4 myinfinity = vec4(0., 0., 0., 0.); // Infinity
// Pixels >= 0.5 are objects, others are background
gl_FragColor = pixel >= 0.5 ? myzero : myinfinity;
}
"""
frag_flood = """
uniform sampler2D u_texture;
varying float v_stepu;
varying float v_stepv;
varying vec2 v_uv;
vec2 remap(vec4 floatdata) {
vec2 scaleddata = vec2(floatdata.x * 65280. + floatdata.z * 255.,
floatdata.y * 65280. + floatdata.w * 255.);
return scaleddata / 32768. - 1.0;
}
vec4 remap_inv(vec2 floatvec) {
vec2 data = (floatvec + 1.0) * 32768.;
float x = floor(data.x / 256.);
float y = floor(data.y / 256.);
return vec4(x, y, data.x - x * 256., data.y - y * 256.) / 255.;
}
void main( void )
{
// Search for better distance vectors among 8 candidates
vec2 stepvec; // Relative offset to candidate being tested
vec2 newvec; // Absolute position of that candidate
vec3 newseed; // Closest point from that candidate (.xy) and its dist (.z)
vec3 bestseed; // Closest seed so far
bestseed.xy = remap(texture2D(u_texture, v_uv).rgba);
bestseed.z = length(bestseed.xy);
stepvec = vec2(-v_stepu, -v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(-v_stepu, 0.0);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(-v_stepu, v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(0.0, -v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(0.0, v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(v_stepu, -v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(v_stepu, 0.0);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
stepvec = vec2(v_stepu, v_stepv);
newvec = v_uv + stepvec;
if (all(bvec4(lessThan(newvec, vec2(1.0)), greaterThan(newvec, vec2(0.0))))){
newseed.xy = remap(texture2D(u_texture, newvec).rgba);
if(newseed.x > -0.99999) { // if the new seed is not "indeterminate dist"
newseed.xy = newseed.xy + stepvec;
newseed.z = length(newseed.xy);
if(newseed.z < bestseed.z) {
bestseed = newseed;
}
}
}
gl_FragColor = remap_inv(bestseed.xy);
}
"""
frag_insert = """
uniform sampler2D u_texture;
uniform sampler2D u_pos_texture;
uniform sampler2D u_neg_texture;
varying float v_stepu;
varying float v_stepv;
varying vec2 v_uv;
vec2 remap(vec4 floatdata) {
vec2 scaled_data = vec2(floatdata.x * 65280. + floatdata.z * 255.,
floatdata.y * 65280. + floatdata.w * 255.);
return scaled_data / 32768. - 1.0;
}
void main( void )
{
float pixel = texture2D(u_texture, v_uv).r;
// convert distance from normalized units -> pixels
vec2 rescale = vec2(v_stepu, v_stepv);
float shrink = 8.;
rescale = rescale * 256. / shrink;
// Without the division, 1 RGB increment = 1 px distance
vec2 pos_distvec = remap(texture2D(u_pos_texture, v_uv).rgba) / rescale;
vec2 neg_distvec = remap(texture2D(u_neg_texture, v_uv).rgba) / rescale;
if (pixel <= 0.5)
gl_FragColor = vec4(0.5 - length(pos_distvec));
else
gl_FragColor = vec4(0.5 - (shrink - 1.) / 256. + length(neg_distvec));
}
"""
class SDFRenderer(object):
def __init__(self):
self.program_seed = Program(vert_seed, frag_seed)
self.program_flood = Program(vert, frag_flood)
self.program_insert = Program(vert, frag_insert)
self.programs = [self.program_seed, self.program_flood,
self.program_insert]
# Initialize variables
self.fbo_to = [FrameBuffer(), FrameBuffer(), FrameBuffer()]
vtype = np.dtype([('a_position', np.float32, 2),
('a_texcoord', np.float32, 2)])
vertices = np.zeros(4, dtype=vtype)
vertices['a_position'] = [[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]
vertices['a_texcoord'] = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
vertices = VertexBuffer(vertices)
self.program_insert['u_step'] = 1.
for program in self.programs:
program.bind(vertices)
def render_to_texture(self, data, texture, offset, size):
"""Render a SDF to a texture at a given offset and size
Parameters
----------
data : array
Must be 2D with type np.ubyte.
texture : instance of Texture2D
The texture to render to.
offset : tuple of int
Offset (x, y) to render to inside the texture.
size : tuple of int
Size (w, h) to render inside the texture.
"""
assert isinstance(texture, Texture2D)
set_state(blend=False, depth_test=False)
# calculate the negative half (within object)
orig_tex = Texture2D(255 - data, format='luminance',
wrapping='clamp_to_edge', interpolation='nearest')
edf_neg_tex = self._render_edf(orig_tex)
# calculate positive half (outside object)
orig_tex[:, :, 0] = data
edf_pos_tex = self._render_edf(orig_tex)
# render final product to output texture
self.program_insert['u_texture'] = orig_tex
self.program_insert['u_pos_texture'] = edf_pos_tex
self.program_insert['u_neg_texture'] = edf_neg_tex
self.fbo_to[-1].color_buffer = texture
with self.fbo_to[-1]:
set_viewport(tuple(offset) + tuple(size))
self.program_insert.draw('triangle_strip')
def _render_edf(self, orig_tex):
"""Render an EDF to a texture"""
# Set up the necessary textures
sdf_size = orig_tex.shape[:2]
comp_texs = []
for _ in range(2):
tex = Texture2D(sdf_size + (4,), format='rgba',
interpolation='nearest', wrapping='clamp_to_edge')
comp_texs.append(tex)
self.fbo_to[0].color_buffer = comp_texs[0]
self.fbo_to[1].color_buffer = comp_texs[1]
for program in self.programs[1:]: # program_seed does not need this
program['u_texh'], program['u_texw'] = sdf_size
# Do the rendering
last_rend = 0
with self.fbo_to[last_rend]:
set_viewport(0, 0, sdf_size[1], sdf_size[0])
self.program_seed['u_texture'] = orig_tex
self.program_seed.draw('triangle_strip')
stepsize = (np.array(sdf_size) // 2).max()
while stepsize > 0:
self.program_flood['u_step'] = stepsize
self.program_flood['u_texture'] = comp_texs[last_rend]
last_rend = 1 if last_rend == 0 else 0
with self.fbo_to[last_rend]:
set_viewport(0, 0, sdf_size[1], sdf_size[0])
self.program_flood.draw('triangle_strip')
stepsize //= 2
return comp_texs[last_rend]
| bsd-3-clause |
jjscarafia/odoo | addons/website_membership/models/product.py | 338 | 1264 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
}
_defaults = {
'website_published': False,
}
| agpl-3.0 |
beaker-project/beaker | Client/src/bkr/client/command.py | 1 | 13879 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import optparse
import os
import sys
from optparse import Option
import six
from bkr.common.hub import HubProxy
from bkr.common.pyconfig import PyConfigParser
def username_prompt(prompt=None, default_value=None):
"""
Ask for a username.
"""
if default_value is not None:
return default_value
prompt = prompt or "Enter your username: "
sys.stderr.write(prompt)
return sys.stdin.readline()
def password_prompt(prompt=None, default_value=None):
"""
Ask for a password.
"""
import getpass
if default_value is not None:
return default_value
prompt = prompt or "Enter your password: "
try:
# try to use stderr stream
result = getpass.getpass(prompt, stream=sys.stderr)
except TypeError:
# fall back to stdout
result = getpass.getpass(prompt)
return result
def yes_no_prompt(prompt, default_value=None):
"""
Give a yes/no (y/n) question.
"""
if default_value is not None:
if default_value not in ("Y", "N"):
raise ValueError("Invalid default value: %s" % default_value)
default_value = default_value.upper()
prompt = "%s [%s/%s]: " % (prompt, ("y", "Y")[default_value == "Y"], ("n", "N")[default_value == "N"])
sys.stderr.write(prompt)
while True:
user_input = sys.stdin.readline().strip().upper()
if user_input == "" and default_value is not None:
user_input = default_value
if user_input == "Y":
return True
if user_input == "N":
return False
def are_you_sure_prompt(prompt=None):
"""
Give a yes/no (y/n) question.
"""
prompt = prompt or "Are you sure? Enter 'YES' to continue: "
sys.stderr.write(prompt)
user_input = sys.stdin.readline().strip()
if user_input == "YES":
return True
return False
class Plugin(object):
"""A plugin base class."""
author = None
version = None
enabled = False
def __getattr__(self, name):
"""
Get missing attribute from a container.
This is quite hackish but it allows to define settings
and methods per container.
"""
return getattr(self.container, name)
class Command(Plugin):
"""
An abstract class representing a command for CommandOptionParser.
"""
enabled = False
admin = False
username_prompt = staticmethod(username_prompt)
password_prompt = staticmethod(password_prompt)
yes_no_prompt = staticmethod(yes_no_prompt)
are_you_sure_prompt = staticmethod(are_you_sure_prompt)
def __init__(self, parser):
Plugin.__init__(self)
self.parser = parser
def options(self):
"""
Add options to self.parser.
"""
pass
def run(self, *args, **kwargs):
"""
Run a command. Arguments contain parsed options.
"""
raise NotImplementedError()
class PluginContainer(object):
"""
A plugin container.
Usage: Inherit PluginContainer and register plugins to the new class.
"""
def __getitem__(self, name):
return self._get_plugin(name)
def __iter__(self):
return six.iterkeys(self.plugins)
@classmethod
def normalize_name(cls, name):
return name
@classmethod
def _get_plugins(cls):
"""
Return dictionary of registered plugins.
"""
result = {}
parent_plugins = cls._get_parent_plugins(cls.normalize_name) # pylint: disable=no-member
class_plugins = getattr(cls, "_class_plugins", {})
d = parent_plugins.copy()
d.update(class_plugins)
for name, plugin_class in d.items():
result[name] = plugin_class
return result
@classmethod
def _get_parent_plugins(cls, normalize_function):
result = {}
for parent in cls.__bases__:
if parent is PluginContainer:
# don't use PluginContainer itself - plugins have to be registered to subclasses
continue
if not issubclass(parent, PluginContainer):
# skip parents which are not PluginContainer subclasses
continue
# read inherited plugins first (conflicts are resolved recursively)
plugins = parent._get_parent_plugins(normalize_function) # pylint: disable=no-member
# read class plugins, override inherited on name conflicts
if hasattr(parent, "_class_plugins"):
for plugin_class in parent._class_plugins.values(): # pylint: disable=no-member
normalized_name = normalize_function(plugin_class.__name__)
plugins[normalized_name] = plugin_class
for name, value in six.iteritems(plugins):
if result.get(name, value) != value:
raise RuntimeError(
"Cannot register plugin '%s'. "
"Another plugin with the same normalized name (%s) "
"is already in the container." % (str(value), normalized_name))
result.update(plugins)
return result
@property
def plugins(self):
if not hasattr(self, "_plugins"):
self._plugins = self.__class__._get_plugins()
return self._plugins
def _get_plugin(self, name):
"""
Return a plugin or raise KeyError.
"""
normalized_name = self.normalize_name(name)
if normalized_name not in self.plugins:
raise KeyError("Plugin not found: %s" % normalized_name)
plugin = self.plugins[normalized_name]
plugin.container = self
plugin.normalized_name = normalized_name
return plugin
@classmethod
def register_plugin(cls, plugin, name=None):
"""
Register a new plugin. Return normalized plugin name.
"""
if cls is PluginContainer:
raise TypeError("Can't register plugin to the PluginContainer base class.")
if "_class_plugins" not in cls.__dict__:
cls._class_plugins = {}
if not getattr(plugin, "enabled", False):
return
if not name:
name = cls.normalize_name(plugin.__name__)
cls._class_plugins[name] = plugin
return name
@classmethod
def register_module(cls, module, prefix=None, skip_broken=False):
"""
Register all plugins in a module's sub-modules.
@param module: a python module that contains plugin sub-modules
@type module: module
@param prefix: if specified, only modules with this prefix will be processed
@type prefix: str
@param skip_broken: skip broken sub-modules and print a warning
@type skip_broken: bool
"""
path = os.path.dirname(module.__file__)
module_list = []
for fn in os.listdir(path):
if not fn.endswith(".py"):
continue
if fn.startswith("_"):
continue
if prefix and not fn.startswith(prefix):
continue
if not os.path.isfile(os.path.join(path, fn)):
continue
module_list.append(fn[:-3])
if skip_broken:
for mod in module_list[:]:
try:
__import__(module.__name__, {}, {}, [mod])
except:
import sys
sys.stderr.write("WARNING: Skipping broken plugin module: %s.%s"
% (module.__name__, mod))
module_list.remove(mod)
else:
__import__(module.__name__, {}, {}, module_list)
for mn in module_list:
mod = getattr(module, mn)
for pn in dir(mod):
plugin = getattr(mod, pn)
if type(plugin) is type and issubclass(plugin, Plugin) and plugin is not Plugin:
cls.register_plugin(plugin)
class BeakerClientConfigurationError(ValueError):
"""
Raised to indicate that the Beaker client is not configured properly.
"""
pass
class CommandContainer(PluginContainer):
"""
Container for Command classes.
"""
@classmethod
def normalize_name(cls, name):
"""
Replace some characters in command names.
"""
return name.lower().replace('_', '-').replace(' ', '-')
class ClientCommandContainer(CommandContainer):
def __init__(self, conf, **kwargs):
self.conf = PyConfigParser()
self.conf.load_from_conf(conf)
self.conf.load_from_dict(kwargs)
def set_hub(self, username=None, password=None, auto_login=True, proxy_user=None):
if username:
if password is None:
password = password_prompt(default_value=password)
self.conf["AUTH_METHOD"] = "password"
self.conf["USERNAME"] = username
self.conf["PASSWORD"] = password
if proxy_user:
self.conf["PROXY_USER"] = proxy_user
cacert = self.conf.get('CA_CERT')
if cacert and not os.path.exists(cacert):
raise BeakerClientConfigurationError(
'CA_CERT configuration points to non-existing file: %s' % cacert)
self.hub = HubProxy(conf=self.conf, auto_login=auto_login)
class CommandOptionParser(optparse.OptionParser):
"""Enhanced OptionParser with plugin support."""
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
command_container=None,
default_command="help",
add_username_password_options=False):
usage = usage or "%prog <command> [args] [--help]"
self.container = command_container
self.default_command = default_command
self.command = None
formatter = formatter or optparse.IndentedHelpFormatter(max_help_position=33)
optparse.OptionParser.__init__(self, usage, option_list, option_class, version,
conflict_handler, description, formatter, add_help_option,
prog)
if add_username_password_options:
option_list = [
optparse.Option("--username", help="specify user"),
optparse.Option("--password", help="specify password"),
]
self._populate_option_list(option_list, add_help=False)
def print_help(self, file=None, admin=False):
if file is None:
file = sys.stdout
file.write(self.format_help())
if self.command in (None, "help", "help-admin"):
file.write("\n")
file.write(self.format_help_commands(admin=admin))
def format_help_commands(self, admin=False):
commands = []
admin_commands = []
for name, plugin in sorted(six.iteritems(self.container.plugins)):
if getattr(plugin, 'hidden', False):
continue
is_admin = getattr(plugin, "admin", False)
text = " %-30s %s" % (name, plugin.__doc__.strip() if plugin.__doc__ else "")
if is_admin:
if admin:
admin_commands.append(text)
else:
commands.append(text)
if commands:
commands.insert(0, "commands:")
commands.append("")
if admin_commands:
admin_commands.insert(0, "admin commands:")
admin_commands.append("")
return "\n".join(commands + admin_commands)
def parse_args(self, args=None, values=None):
"""
Return (command_instance, opts, args)
"""
args = self._get_args(args)
if len(args) > 0 and not args[0].startswith("-"):
command = args[0]
args = args[1:]
else:
command = self.default_command
# keep args as is
if not command in self.container.plugins:
self.error("unknown command: %s" % command)
CommandClass = self.container[command]
cmd = CommandClass(self)
if self.command != cmd.normalized_name:
self.command = cmd.normalized_name
cmd.options()
cmd_opts, cmd_args = optparse.OptionParser.parse_args(self, args, values)
return cmd, cmd_opts, cmd_args
def run(self, args=None, values=None):
"""
Parse arguments and run a command
"""
cmd, cmd_opts, cmd_args = self.parse_args(args, values)
cmd_kwargs = cmd_opts.__dict__
cmd.run(*cmd_args, **cmd_kwargs)
class Help(Command):
"""
Show this help message and exit
"""
enabled = True
def options(self):
pass
def run(self, *args, **kwargs):
self.parser.print_help(admin=False)
class Help_Admin(Command):
"""
Show help message about administrative commands and exit
"""
enabled = True
def options(self):
# override default --help option
opt = self.parser.get_option("--help")
opt.action = "store_true"
opt.dest = "help"
def run(self, *args, **kwargs):
self.parser.print_help(admin=True)
CommandContainer.register_plugin(Help)
CommandContainer.register_plugin(Help_Admin)
| gpl-2.0 |
yyu168/linux | tools/testing/selftests/tc-testing/tdc.py | 40 | 12519 | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""
tdc.py - Linux tc (Traffic Control) unit test driver
Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
"""
import re
import os
import sys
import argparse
import json
import subprocess
from collections import OrderedDict
from string import Template
from tdc_config import *
from tdc_helper import *
USE_NS = True
def replace_keywords(cmd):
"""
For a given executable command, substitute any known
variables contained within NAMES with the correct values
"""
tcmd = Template(cmd)
subcmd = tcmd.safe_substitute(NAMES)
return subcmd
def exec_cmd(command, nsonly=True):
"""
Perform any required modifications on an executable command, then run
it in a subprocess and return the results.
"""
if (USE_NS and nsonly):
command = 'ip netns exec $NS ' + command
if '$' in command:
command = replace_keywords(command)
proc = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(rawout, serr) = proc.communicate()
if proc.returncode != 0 and len(serr) > 0:
foutput = serr.decode("utf-8")
else:
foutput = rawout.decode("utf-8")
proc.stdout.close()
proc.stderr.close()
return proc, foutput
def prepare_env(cmdlist):
"""
Execute the setup/teardown commands for a test case. Optionally
terminate test execution if the command fails.
"""
for cmdinfo in cmdlist:
if (type(cmdinfo) == list):
exit_codes = cmdinfo[1:]
cmd = cmdinfo[0]
else:
exit_codes = [0]
cmd = cmdinfo
if (len(cmd) == 0):
continue
(proc, foutput) = exec_cmd(cmd)
if proc.returncode not in exit_codes:
print
print("Could not execute:")
print(cmd)
print("\nError message:")
print(foutput)
print("\nAborting test run.")
ns_destroy()
exit(1)
def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
Prints information about the tests being run, executes the setup and
teardown commands and the command under test itself. Also determines
success/failure based on the information in the test case and generates
TAP output accordingly.
"""
testlist = filtered_tests
tcount = len(testlist)
index = 1
tap = str(index) + ".." + str(tcount) + "\n"
for tidx in testlist:
result = True
tresult = ""
if "flower" in tidx["category"] and args.device == None:
continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
exit_code = p.returncode
if (exit_code != int(tidx["expExitCode"])):
result = False
print("exit:", exit_code, int(tidx["expExitCode"]))
print(procout)
else:
match_pattern = re.compile(str(tidx["matchPattern"]), re.DOTALL)
(p, procout) = exec_cmd(tidx["verifyCmd"])
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
result = False
if result == True:
tresult += "ok "
else:
tresult += "not ok "
tap += tresult + str(index) + " " + tidx["id"] + " " + tidx["name"] + "\n"
if result == False:
tap += procout
prepare_env(tidx["teardown"])
index += 1
return tap
def ns_create():
"""
Create the network namespace in which the tests will be run and set up
the required network devices for it.
"""
if (USE_NS):
cmd = 'ip netns add $NS'
exec_cmd(cmd, False)
cmd = 'ip link add $DEV0 type veth peer name $DEV1'
exec_cmd(cmd, False)
cmd = 'ip link set $DEV1 netns $NS'
exec_cmd(cmd, False)
cmd = 'ip link set $DEV0 up'
exec_cmd(cmd, False)
cmd = 'ip -n $NS link set $DEV1 up'
exec_cmd(cmd, False)
cmd = 'ip link set $DEV2 netns $NS'
exec_cmd(cmd, False)
cmd = 'ip -n $NS link set $DEV2 up'
exec_cmd(cmd, False)
def ns_destroy():
"""
Destroy the network namespace for testing (and any associated network
devices as well)
"""
if (USE_NS):
cmd = 'ip netns delete $NS'
exec_cmd(cmd, False)
def has_blank_ids(idlist):
"""
Search the list for empty ID fields and return true/false accordingly.
"""
return not(all(k for k in idlist))
def load_from_file(filename):
"""
Open the JSON file containing the test cases and return them
as list of ordered dictionary objects.
"""
try:
with open(filename) as test_data:
testlist = json.load(test_data, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as jde:
print('IGNORING test case file {}\n\tBECAUSE: {}'.format(filename, jde))
testlist = list()
else:
idlist = get_id_list(testlist)
if (has_blank_ids(idlist)):
for k in testlist:
k['filename'] = filename
return testlist
def args_parse():
"""
Create the argument parser.
"""
parser = argparse.ArgumentParser(description='Linux TC unit tests')
return parser
def set_args(parser):
"""
Set the command line arguments for tdc.
"""
parser.add_argument('-p', '--path', type=str,
help='The full path to the tc executable to use')
parser.add_argument('-c', '--category', type=str, nargs='?', const='+c',
help='Run tests only from the specified category, or if no category is specified, list known categories.')
parser.add_argument('-f', '--file', type=str,
help='Run tests from the specified file')
parser.add_argument('-l', '--list', type=str, nargs='?', const="++", metavar='CATEGORY',
help='List all test cases, or those only within the specified category')
parser.add_argument('-s', '--show', type=str, nargs=1, metavar='ID', dest='showID',
help='Display the test case with specified id')
parser.add_argument('-e', '--execute', type=str, nargs=1, metavar='ID',
help='Execute the single test case with specified ID')
parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
help='Generate ID numbers for new test cases')
parser.add_argument('-d', '--device',
help='Execute the test case in flower category')
return parser
def check_default_settings(args):
"""
Process any arguments overriding the default settings, and ensure the
settings are correct.
"""
# Allow for overriding specific settings
global NAMES
if args.path != None:
NAMES['TC'] = args.path
if args.device != None:
NAMES['DEV2'] = args.device
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
def get_id_list(alltests):
"""
Generate a list of all IDs in the test cases.
"""
return [x["id"] for x in alltests]
def check_case_id(alltests):
"""
Check for duplicate test case IDs.
"""
idl = get_id_list(alltests)
return [x for x in idl if idl.count(x) > 1]
def does_id_exist(alltests, newid):
"""
Check if a given ID already exists in the list of test cases.
"""
idl = get_id_list(alltests)
return (any(newid == x for x in idl))
def generate_case_ids(alltests):
"""
If a test case has a blank ID field, generate a random hex ID for it
and then write the test cases back to disk.
"""
import random
for c in alltests:
if (c["id"] == ""):
while True:
newid = str('%04x' % random.randrange(16**4))
if (does_id_exist(alltests, newid)):
continue
else:
c['id'] = newid
break
ufilename = []
for c in alltests:
if ('filename' in c):
ufilename.append(c['filename'])
ufilename = get_unique_item(ufilename)
for f in ufilename:
testlist = []
for t in alltests:
if 'filename' in t:
if t['filename'] == f:
del t['filename']
testlist.append(t)
outfile = open(f, "w")
json.dump(testlist, outfile, indent=4)
outfile.close()
def get_test_cases(args):
"""
If a test case file is specified, retrieve tests from that file.
Otherwise, glob for all json files in subdirectories and load from
each one.
"""
import fnmatch
if args.file != None:
if not os.path.isfile(args.file):
print("The specified test case file " + args.file + " does not exist.")
exit(1)
flist = [args.file]
else:
flist = []
for root, dirnames, filenames in os.walk('tc-tests'):
for filename in fnmatch.filter(filenames, '*.json'):
flist.append(os.path.join(root, filename))
alltests = list()
for casefile in flist:
alltests = alltests + (load_from_file(casefile))
return alltests
def set_operation_mode(args):
"""
Load the test case data and process remaining arguments to determine
what the script should do for this run, and call the appropriate
function.
"""
alltests = get_test_cases(args)
if args.gen_id:
idlist = get_id_list(alltests)
if (has_blank_ids(idlist)):
alltests = generate_case_ids(alltests)
else:
print("No empty ID fields found in test files.")
exit(0)
duplicate_ids = check_case_id(alltests)
if (len(duplicate_ids) > 0):
print("The following test case IDs are not unique:")
print(str(set(duplicate_ids)))
print("Please correct them before continuing.")
exit(1)
ucat = get_test_categories(alltests)
if args.showID:
show_test_case_by_id(alltests, args.showID[0])
exit(0)
if args.execute:
target_id = args.execute[0]
else:
target_id = ""
if args.category:
if (args.category == '+c'):
print("Available categories:")
print_sll(ucat)
exit(0)
else:
target_category = args.category
else:
target_category = ""
testcases = get_categorized_testlist(alltests, ucat)
if args.list:
if (args.list == "++"):
list_test_cases(alltests)
exit(0)
elif(len(args.list) > 0):
if (args.list not in ucat):
print("Unknown category " + args.list)
print("Available categories:")
print_sll(ucat)
exit(1)
list_test_cases(testcases[args.list])
exit(0)
if (os.geteuid() != 0):
print("This script must be run with root privileges.\n")
exit(1)
ns_create()
if (len(target_category) == 0):
if (len(target_id) > 0):
alltests = list(filter(lambda x: target_id in x['id'], alltests))
if (len(alltests) == 0):
print("Cannot find a test case with ID matching " + target_id)
exit(1)
catresults = test_runner(alltests, args)
print("All test results: " + "\n\n" + catresults)
elif (len(target_category) > 0):
if (target_category == "flower") and args.device == None:
print("Please specify a NIC device (-d) to run category flower")
exit(1)
if (target_category not in ucat):
print("Specified category is not present in this file.")
exit(1)
else:
catresults = test_runner(testcases[target_category], args)
print("Category " + target_category + "\n\n" + catresults)
ns_destroy()
def main():
"""
Start of execution; set up argument parser and get the arguments,
and start operations.
"""
parser = args_parse()
parser = set_args(parser)
(args, remaining) = parser.parse_known_args()
check_default_settings(args)
set_operation_mode(args)
exit(0)
if __name__ == "__main__":
main()
| gpl-2.0 |
reneenoble/datacats | datacats/environment.py | 2 | 38413 | # Copyright 2014-2015 Boxkite Inc.
# This file is part of the DataCats package and is released under
# the terms of the GNU Affero General Public License version 3.0.
# See LICENSE.txt or http://www.fsf.org/licensing/licenses/agpl-3.0.html
from os.path import isdir, exists, join
from os import makedirs, remove, environ
import sys
import subprocess
import shutil
import json
import time
import socket
from sha import sha
from struct import unpack
from ConfigParser import (SafeConfigParser, Error as ConfigParserError)
from datacats import task, scripts
from datacats.docker import (web_command, run_container, remove_container,
inspect_container, is_boot2docker,
docker_host, container_logs, APIError)
from datacats.template import ckan_extension_template
from datacats.network import wait_for_service_available, ServiceTimeout
from datacats.password import generate_password
from datacats.error import DatacatsError, WebCommandError, PortAllocatedError
WEB_START_TIMEOUT_SECONDS = 30
DB_INIT_RETRY_SECONDS = 30
DB_INIT_RETRY_DELAY = 2
DOCKER_EXE = 'docker'
class Environment(object):
"""
DataCats environment settings object
Create with Environment.new(path) or Environment.load(path)
"""
def __init__(self, name, target, datadir, site_name, ckan_version=None,
port=None, deploy_target=None, site_url=None, always_prod=False,
extension_dir='ckan', address=None, remote_server_key=None,
extra_containers=None):
self.name = name
self.target = target
self.datadir = datadir
self.extension_dir = extension_dir
self.ckan_version = ckan_version
# This is the site that all commands will operate on.
self.site_name = site_name
self.port = int(port if port else self._choose_port())
self.address = address if not is_boot2docker() else None
self.deploy_target = deploy_target
self.remote_server_key = remote_server_key
self.site_url = site_url
self.always_prod = always_prod
self.sites = None
if extra_containers:
self.extra_containers = extra_containers
else:
self.extra_containers = []
def _set_site_name(self, site_name):
self._site_name = site_name
self.sitedir = join(self.datadir, 'sites', site_name)
def _get_site_name(self):
return self._site_name
site_name = property(fget=_get_site_name, fset=_set_site_name)
def _load_sites(self):
"""
Gets the names of all of the sites from the datadir and stores them
in self.sites. Also returns this list.
"""
if not self.sites:
self.sites = task.list_sites(self.datadir)
return self.sites
def save_site(self, create=True):
"""
Save environment settings in the directory that need to be saved
even when creating only a new sub-site env.
"""
self._load_sites()
if create:
self.sites.append(self.site_name)
task.save_new_site(self.site_name, self.sitedir, self.target, self.port,
self.address, self.site_url, self.passwords)
def save(self):
"""
Save environment settings into environment directory, overwriting
any existing configuration and discarding site config
"""
task.save_new_environment(self.name, self.datadir, self.target,
self.ckan_version, self.deploy_target, self.always_prod)
@classmethod
def new(cls, path, ckan_version, site_name, **kwargs):
"""
Return a Environment object with settings for a new project.
No directories or containers are created by this call.
:params path: location for new project directory, may be relative
:params ckan_version: release of CKAN to install
:params site_name: The name of the site to install database and solr \
eventually.
For additional keyword arguments see the __init__ method.
Raises DatcatsError if directories or project with same
name already exits.
"""
if ckan_version == 'master':
ckan_version = 'latest'
name, datadir, srcdir = task.new_environment_check(path, site_name, ckan_version)
environment = cls(name, srcdir, datadir, site_name, ckan_version, **kwargs)
environment._generate_passwords()
return environment
@classmethod
def load(cls, environment_name=None, site_name='primary', data_only=False, allow_old=False):
"""
Return an Environment object based on an existing environnment+site.
:param environment_name: exising environment name, path or None to
look in current or parent directories for project
:param data_only: set to True to only load from data dir, not
the project dir; Used for purging environment data.
:param allow_old: load a very minimal subset of what we usually
load. This will only work for purging environment data on an old site.
Raises DatacatsError if environment can't be found or if there is an
error parsing the environment information.
"""
srcdir, extension_dir, datadir = task.find_environment_dirs(
environment_name, data_only)
if datadir and data_only:
return cls(environment_name, None, datadir, site_name)
(datadir, name, ckan_version, always_prod, deploy_target,
remote_server_key, extra_containers) = task.load_environment(srcdir, datadir, allow_old)
if not allow_old:
(port, address, site_url, passwords) = task.load_site(srcdir, datadir, site_name)
else:
(port, address, site_url, passwords) = (None, None, None, None)
environment = cls(name, srcdir, datadir, site_name, ckan_version=ckan_version,
port=port, deploy_target=deploy_target, site_url=site_url,
always_prod=always_prod, address=address,
extension_dir=extension_dir,
remote_server_key=remote_server_key,
extra_containers=extra_containers)
if passwords:
environment.passwords = passwords
else:
environment._generate_passwords()
if not allow_old:
environment._load_sites()
return environment
def data_exists(self):
"""
Return True if the datadir for this environment exists
"""
return isdir(self.datadir)
def require_valid_site(self):
if self.site_name not in self.sites:
raise DatacatsError('Invalid site name: {}. Valid names are: {}'
.format(self.site_name,
', '.join(self.sites)))
def data_complete(self):
"""
Return True if all the expected datadir files are present
"""
return task.data_complete(self.datadir, self.sitedir,
self._get_container_name)
def require_data(self):
"""
raise a DatacatsError if the datadir or volumes are missing or damaged
"""
files = task.source_missing(self.target)
if files:
raise DatacatsError('Missing files in source directory:\n' +
'\n'.join(files))
if not self.data_exists():
raise DatacatsError('Environment datadir missing. '
'Try "datacats init".')
if not self.data_complete():
raise DatacatsError('Environment datadir damaged or volumes '
'missing. '
'To reset and discard all data use '
'"datacats reset"')
def create_directories(self, create_project_dir=True):
"""
Call once for new projects to create the initial project directories.
"""
return task.create_directories(self.datadir, self.sitedir,
self.target if create_project_dir else None)
def create_bash_profile(self):
"""
Create a default .bash_profile for the shell user that
activates the ckan virtualenv
"""
with open(self.target + '/.bash_profile', 'w') as prof:
prof.write('source /usr/lib/ckan/bin/activate\n')
def _preload_image(self):
# pylint: disable=no-self-use
"""
Return the preloaded ckan src and venv image name
"""
# get the preload name from self.ckan_version
return 'datacats/ckan:{}'.format(self.ckan_version)
def create_virtualenv(self):
"""
Populate venv from preloaded image
"""
return task.create_virtualenv(self.target, self.datadir,
self._preload_image(), self._get_container_name)
def clean_virtualenv(self):
"""
Empty our virtualenv so that new (or older) dependencies may be
installed
"""
self.user_run_script(
script=scripts.get_script_path('clean_virtualenv.sh'),
args=[],
rw_venv=True,
)
def install_extra(self):
self.user_run_script(
script=scripts.get_script_path('install_extra_packages.sh'),
args=[],
rw_venv=True
)
def create_source(self, datapusher=True):
"""
Populate ckan directory from preloaded image and copy
who.ini and schema.xml info conf directory
"""
task.create_source(self.target, self._preload_image(), datapusher)
def start_supporting_containers(self, log_syslog=False):
"""
Start all supporting containers (containers required for CKAN to
operate) if they aren't already running.
:param log_syslog: A flag to redirect all container logs to host's syslog
"""
log_syslog = True if self.always_prod else log_syslog
# in production we always use log_syslog driver (to aggregate all the logs)
task.start_supporting_containers(
self.sitedir,
self.target,
self.passwords,
self._get_container_name,
self.extra_containers,
log_syslog=log_syslog
)
def stop_supporting_containers(self):
"""
Stop and remove supporting containers (containers that are used by CKAN but don't host
CKAN or CKAN plugins). This method should *only* be called after CKAN has been stopped
or behaviour is undefined.
"""
task.stop_supporting_containers(self._get_container_name, self.extra_containers)
def fix_storage_permissions(self):
"""
Set the owner of all apache storage files to www-data container user
"""
web_command(
command='/bin/chown -R www-data: /var/www/storage',
rw={self.sitedir + '/files': '/var/www/storage'})
def create_ckan_ini(self):
"""
Use make-config to generate an initial development.ini file
"""
self.run_command(
command='/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config'
' ckan /project/development.ini',
rw_project=True,
ro={scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh'},
)
def update_ckan_ini(self, skin=True):
"""
Use config-tool to update development.ini with our environment settings
:param skin: use environment template skin plugin True/False
"""
command = [
'/usr/lib/ckan/bin/paster', '--plugin=ckan', 'config-tool',
'/project/development.ini', '-e',
'sqlalchemy.url = postgresql://<hidden>',
'ckan.datastore.read_url = postgresql://<hidden>',
'ckan.datastore.write_url = postgresql://<hidden>',
'ckan.datapusher.url = http://datapusher:8800',
'solr_url = http://solr:8080/solr',
'ckan.storage_path = /var/www/storage',
'ckan.plugins = datastore resource_proxy text_view ' +
('datapusher ' if exists(self.target + '/datapusher') else '')
+ 'recline_grid_view recline_graph_view'
+ (' {0}_theme'.format(self.name) if skin else ''),
'ckan.site_title = ' + self.name,
'ckan.site_logo =',
'ckan.auth.create_user_via_web = false',
]
self.run_command(command=command, rw_project=True)
def create_install_template_skin(self):
"""
Create an example ckan extension for this environment and install it
"""
ckan_extension_template(self.name, self.target)
self.install_package_develop('ckanext-' + self.name + 'theme')
def ckan_db_init(self, retry_seconds=DB_INIT_RETRY_SECONDS):
"""
Run db init to create all ckan tables
:param retry_seconds: how long to retry waiting for db to start
"""
# XXX workaround for not knowing how long we need to wait
# for postgres to be ready. fix this by changing the postgres
# entrypoint, or possibly running once with command=/bin/true
started = time.time()
while True:
try:
self.run_command(
'/usr/lib/ckan/bin/paster --plugin=ckan db init '
'-c /project/development.ini',
db_links=True,
clean_up=True,
)
break
except WebCommandError:
if started + retry_seconds > time.time():
raise
time.sleep(DB_INIT_RETRY_DELAY)
def install_postgis_sql(self):
web_command(
'/scripts/install_postgis.sh',
image='datacats/postgres',
ro={scripts.get_script_path('install_postgis.sh'): '/scripts/install_postgis.sh'},
links={self._get_container_name('postgres'): 'db'},
)
def _generate_passwords(self):
"""
Generate new DB passwords and store them in self.passwords
"""
self.passwords = {
'POSTGRES_PASSWORD': generate_password(),
'CKAN_PASSWORD': generate_password(),
'DATASTORE_RO_PASSWORD': generate_password(),
'DATASTORE_RW_PASSWORD': generate_password(),
'BEAKER_SESSION_SECRET': generate_password(),
}
def needs_datapusher(self):
cp = SafeConfigParser()
try:
cp.read(self.target + '/development.ini')
return ('datapusher' in cp.get('app:main', 'ckan.plugins')
and isdir(self.target + '/datapusher'))
except ConfigParserError as e:
raise DatacatsError('Failed to read and parse development.ini: ' + str(e))
def start_ckan(self, production=False, log_syslog=False, paster_reload=True):
"""
Start the apache server or paster serve
:param log_syslog: A flag to redirect all container logs to host's syslog
:param production: True for apache, False for paster serve + debug on
:param paster_reload: Instruct paster to watch for file changes
"""
self.stop_ckan()
address = self.address or '127.0.0.1'
port = self.port
# in prod we always use log_syslog driver
log_syslog = True if self.always_prod else log_syslog
production = production or self.always_prod
# We only override the site URL with the docker URL on three conditions
override_site_url = (self.address is None
and not is_boot2docker()
and not self.site_url)
command = ['/scripts/web.sh', str(production), str(override_site_url), str(paster_reload)]
# XXX nasty hack, remove this once we have a lessc command
# for users (not just for building our preload image)
if not production:
css = self.target + '/ckan/ckan/public/base/css'
if not exists(css + '/main.debug.css'):
from shutil import copyfile
copyfile(css + '/main.css', css + '/main.debug.css')
ro = {
self.target: '/project',
scripts.get_script_path('datapusher.sh'): '/scripts/datapusher.sh'
}
if not is_boot2docker():
ro[self.datadir + '/venv'] = '/usr/lib/ckan'
datapusher = self.needs_datapusher()
if datapusher:
run_container(
self._get_container_name('datapusher'),
'datacats/web',
'/scripts/datapusher.sh',
ro=ro,
volumes_from=(self._get_container_name('venv') if is_boot2docker() else None),
log_syslog=log_syslog)
while True:
self._create_run_ini(port, production)
try:
self._run_web_container(port, command, address, log_syslog=log_syslog,
datapusher=datapusher)
if not is_boot2docker():
self.address = address
except PortAllocatedError:
port = self._next_port(port)
continue
break
def _create_run_ini(self, port, production, output='development.ini',
source='development.ini', override_site_url=True):
"""
Create run/development.ini in datadir with debug and site_url overridden
and with correct db passwords inserted
"""
cp = SafeConfigParser()
try:
cp.read([self.target + '/' + source])
except ConfigParserError:
raise DatacatsError('Error reading development.ini')
cp.set('DEFAULT', 'debug', 'false' if production else 'true')
if self.site_url:
site_url = self.site_url
else:
if is_boot2docker():
web_address = socket.gethostbyname(docker_host())
else:
web_address = self.address
site_url = 'http://{}:{}'.format(web_address, port)
if override_site_url:
cp.set('app:main', 'ckan.site_url', site_url)
cp.set('app:main', 'sqlalchemy.url',
'postgresql://ckan:{0}@db:5432/ckan'
.format(self.passwords['CKAN_PASSWORD']))
cp.set('app:main', 'ckan.datastore.read_url',
'postgresql://ckan_datastore_readonly:{0}@db:5432/ckan_datastore'
.format(self.passwords['DATASTORE_RO_PASSWORD']))
cp.set('app:main', 'ckan.datastore.write_url',
'postgresql://ckan_datastore_readwrite:{0}@db:5432/ckan_datastore'
.format(self.passwords['DATASTORE_RW_PASSWORD']))
cp.set('app:main', 'solr_url', 'http://solr:8080/solr')
cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET'])
if not isdir(self.sitedir + '/run'):
makedirs(self.sitedir + '/run') # upgrade old datadir
with open(self.sitedir + '/run/' + output, 'w') as runini:
cp.write(runini)
def _run_web_container(self, port, command, address, log_syslog=False,
datapusher=True):
"""
Start web container on port with command
"""
if is_boot2docker():
ro = {}
volumes_from = self._get_container_name('venv')
else:
ro = {self.datadir + '/venv': '/usr/lib/ckan'}
volumes_from = None
links = {
self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'
}
links.update({self._get_container_name(container): container
for container in self.extra_containers})
if datapusher:
if 'datapusher' not in self.containers_running():
raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all",
False, False))
links[self._get_container_name('datapusher')] = 'datapusher'
try:
run_container(
name=self._get_container_name('web'),
image='datacats/web',
rw={self.sitedir + '/files': '/var/www/storage',
self.sitedir + '/run/development.ini':
'/project/development.ini'},
ro=dict({
self.target: '/project/',
scripts.get_script_path('web.sh'): '/scripts/web.sh',
scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'},
**ro),
links=links,
volumes_from=volumes_from,
command=command,
port_bindings={
5000: port if is_boot2docker() else (address, port)},
log_syslog=log_syslog
)
except APIError as e:
if '409' in str(e):
raise DatacatsError('Web container already running. '
'Please stop_web before running.')
else:
raise
def wait_for_web_available(self):
"""
Wait for the web server to become available or raise DatacatsError
if it fails to start.
"""
try:
if not wait_for_service_available(
self._get_container_name('web'),
self.web_address(),
WEB_START_TIMEOUT_SECONDS):
raise DatacatsError('Error while starting web container:\n' +
container_logs(self._get_container_name('web'), "all",
False, None))
except ServiceTimeout:
raise DatacatsError('Timeout while starting web container. Logs:' +
container_logs(self._get_container_name('web'), "all", False, None))
def _choose_port(self):
"""
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
"""
# instead of random let's base it on the name chosen (and the site name)
return 5000 + unpack('Q',
sha((self.name + self.site_name)
.decode('ascii')).digest()[:8])[0] % 1000
def _next_port(self, port):
"""
Return another port from the 5000-5999 range
"""
port = 5000 + (port + 1) % 1000
if port == self.port:
raise DatacatsError('Too many instances running')
return port
def stop_ckan(self):
"""
Stop and remove the web container
"""
remove_container(self._get_container_name('web'), force=True)
remove_container(self._get_container_name('datapusher'), force=True)
def _current_web_port(self):
"""
return just the port number for the web container, or None if
not running
"""
info = inspect_container(self._get_container_name('web'))
if info is None:
return None
try:
if not info['State']['Running']:
return None
return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort']
except TypeError:
return None
def fully_running(self):
"""
Returns true iff the environment is fully up and functioning.
Returns False otherwise.
"""
running = self.containers_running()
return ('postgres' in running and
'solr' in running and
'web' in running)
def add_extra_container(self, container, error_on_exists=False):
"""
Add a container as a 'extra'. These are running containers which are not necessary for
running default CKAN but are useful for certain extensions
:param container: The container name to add
:param error_on_exists: Raise a DatacatsError if the extra container already exists.
"""
if container in self.extra_containers:
if error_on_exists:
raise DatacatsError('{} is already added as an extra container.'.format(container))
else:
return
self.extra_containers.append(container)
cp = SafeConfigParser()
cp.read(self.target + '/.datacats-environment')
cp.set('datacats', 'extra_containers', ' '.join(self.extra_containers))
with open(self.target + '/.datacats-environment', 'w') as f:
cp.write(f)
def containers_running(self):
"""
Return a list of containers tracked by this environment that are running
"""
return task.containers_running(self._get_container_name)
def web_address(self):
"""
Return the url of the web server or None if not running
"""
port = self._current_web_port()
address = self.address or '127.0.0.1'
if port is None:
return None
return 'http://{0}:{1}/'.format(
address if address and not is_boot2docker() else docker_host(),
port)
def create_admin_set_password(self, password):
"""
create 'admin' account with given password
"""
with open(self.sitedir + '/run/admin.json', 'w') as out:
json.dump({
'name': 'admin',
'email': 'none',
'password': password,
'sysadmin': True},
out)
self.user_run_script(
script=scripts.get_script_path('update_add_admin.sh'),
args=[],
db_links=True,
ro={
self.sitedir + '/run/admin.json': '/input/admin.json'
},
)
remove(self.sitedir + '/run/admin.json')
def interactive_shell(self, command=None, paster=False, detach=False):
"""
launch interactive shell session with all writable volumes
:param: list of strings to execute instead of bash
"""
if not exists(self.target + '/.bash_profile'):
# this file is required for activating the virtualenv
self.create_bash_profile()
if not command:
command = []
use_tty = sys.stdin.isatty() and sys.stdout.isatty()
background = environ.get('CIRCLECI', False) or detach
if is_boot2docker():
venv_volumes = ['--volumes-from', self._get_container_name('venv')]
else:
venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw']
self._create_run_ini(self.port, production=False, output='run.ini')
self._create_run_ini(self.port, production=True, output='test.ini',
source='ckan/test-core.ini', override_site_url=False)
script = scripts.get_script_path('shell.sh')
if paster:
script = scripts.get_script_path('paster.sh')
if command and command != ['help'] and command != ['--help']:
command += ['--config=/project/development.ini']
command = [self.extension_dir] + command
proxy_settings = self._proxy_settings()
if proxy_settings:
venv_volumes += ['-v',
self.sitedir + '/run/proxy-environment:/etc/environment:ro']
links = {self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'}
links.update({self._get_container_name(container): container for container
in self.extra_containers})
link_params = []
for link in links:
link_params.append('--link')
link_params.append(link + ':' + links[link])
# FIXME: consider switching this to dockerpty
# using subprocess for docker client's interactive session
return subprocess.call([
DOCKER_EXE, 'run',
] + (['--rm'] if not background else []) + [
'-t' if use_tty else '',
'-d' if detach else '-i',
] + venv_volumes + [
'-v', self.target + ':/project:rw',
'-v', self.sitedir + '/files:/var/www/storage:rw',
'-v', script + ':/scripts/shell.sh:ro',
'-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro',
'-v', self.sitedir + '/run/run.ini:/project/development.ini:ro',
'-v', self.sitedir +
'/run/test.ini:/project/ckan/test-core.ini:ro'] +
link_params
+ (['--link', self._get_container_name('datapusher') + ':datapusher']
if self.needs_datapusher() else []) +
['--hostname', self.name,
'datacats/web', '/scripts/shell.sh'] + command)
def install_package_requirements(self, psrc, stream_output=None):
"""
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
"""
package = self.target + '/' + psrc
assert isdir(package), package
reqname = '/requirements.txt'
if not exists(package + reqname):
reqname = '/pip-requirements.txt'
if not exists(package + reqname):
return
return self.user_run_script(
script=scripts.get_script_path('install_reqs.sh'),
args=['/project/' + psrc + reqname],
rw_venv=True,
rw_project=True,
stream_output=stream_output
)
def install_package_develop(self, psrc, stream_output=None):
"""
Install a src package in place (setup.py develop)
:param psrc: name of directory under project directory
"""
package = self.target + '/' + psrc
assert isdir(package), package
if not exists(package + '/setup.py'):
return
return self.user_run_script(
script=scripts.get_script_path('install_package.sh'),
args=['/project/' + psrc],
rw_venv=True,
rw_project=True,
stream_output=stream_output
)
def user_run_script(self, script, args, db_links=False, rw_venv=False,
rw_project=False, rw=None, ro=None, stream_output=None):
return self.run_command(
command=['/scripts/run_as_user.sh', '/scripts/run.sh'] + args,
db_links=db_links,
rw_venv=rw_venv,
rw_project=rw_project,
rw=rw,
ro=dict(ro or {}, **{
scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh',
script: '/scripts/run.sh',
}),
stream_output=stream_output
)
def run_command(self, command, db_links=False, rw_venv=False,
rw_project=False, rw=None, ro=None, clean_up=False,
stream_output=None):
rw = {} if rw is None else dict(rw)
ro = {} if ro is None else dict(ro)
ro.update(self._proxy_settings())
if is_boot2docker():
volumes_from = self._get_container_name('venv')
else:
volumes_from = None
venvmount = rw if rw_venv else ro
venvmount[self.datadir + '/venv'] = '/usr/lib/ckan'
projectmount = rw if rw_project else ro
projectmount[self.target] = '/project'
if db_links:
self._create_run_ini(self.port, production=False, output='run.ini')
links = {
self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db',
}
ro[self.sitedir + '/run/run.ini'] = '/project/development.ini'
else:
links = None
return web_command(command=command, ro=ro, rw=rw, links=links,
volumes_from=volumes_from, clean_up=clean_up,
commit=True, stream_output=stream_output)
def purge_data(self, which_sites=None, never_delete=False):
"""
Remove uploaded files, postgres db, solr index, venv
"""
# Default to the set of all sites
if not exists(self.datadir + '/.version'):
format_version = 1
else:
with open(self.datadir + '/.version') as f:
format_version = int(f.read().strip())
if format_version == 1:
print 'WARNING: Defaulting to old purge for version 1.'
datadirs = ['files', 'solr']
if is_boot2docker():
remove_container('datacats_pgdata_{}'.format(self.name))
remove_container('datacats_venv_{}'.format(self.name))
else:
datadirs += ['postgres', 'venv']
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
shutil.rmtree(self.datadir)
elif format_version == 2:
if not which_sites:
which_sites = self.sites
datadirs = []
boot2docker = is_boot2docker()
if which_sites:
if self.target:
cp = SafeConfigParser()
cp.read([self.target + '/.datacats-environment'])
for site in which_sites:
if boot2docker:
remove_container(self._get_container_name('pgdata'))
else:
datadirs += [site + '/postgres']
# Always rm the site dir & solr & files
datadirs += [site, site + '/files', site + '/solr']
if self.target:
cp.remove_section('site_' + site)
self.sites.remove(site)
if self.target:
with open(self.target + '/.datacats-environment', 'w') as conf:
cp.write(conf)
datadirs = ['sites/' + datadir for datadir in datadirs]
if not self.sites and not never_delete:
datadirs.append('venv')
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
if not self.sites and not never_delete:
shutil.rmtree(self.datadir)
else:
raise DatacatsError('Unknown format version {}'.format(format_version))
def logs(self, container, tail='all', follow=False, timestamps=False):
"""
:param container: 'web', 'solr' or 'postgres'
:param tail: number of lines to show
:param follow: True to return generator instead of list
:param timestamps: True to include timestamps
"""
return container_logs(
self._get_container_name(container),
tail,
follow,
timestamps)
def compile_less(self):
c = run_container(
name=self._get_container_name('lessc'), image='datacats/lessc',
rw={self.target: '/project/target'},
ro={scripts.get_script_path('compile_less.sh'): '/project/compile_less.sh'})
for log in container_logs(c['Id'], "all", True, False):
yield log
remove_container(c)
def _proxy_settings(self):
"""
Create/replace ~/.datacats/run/proxy-environment and return
entry for ro mount for containers
"""
if not ('https_proxy' in environ or 'HTTPS_PROXY' in environ
or 'http_proxy' in environ or 'HTTP_PROXY' in environ):
return {}
https_proxy = environ.get('https_proxy')
if https_proxy is None:
https_proxy = environ.get('HTTPS_PROXY')
http_proxy = environ.get('http_proxy')
if http_proxy is None:
http_proxy = environ.get('HTTP_PROXY')
no_proxy = environ.get('no_proxy')
if no_proxy is None:
no_proxy = environ.get('NO_PROXY', '')
no_proxy = no_proxy + ',solr,db'
out = [
'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:'
'/bin:/usr/games:/usr/local/games"\n']
if https_proxy is not None:
out.append('https_proxy=' + posix_quote(https_proxy) + '\n')
out.append('HTTPS_PROXY=' + posix_quote(https_proxy) + '\n')
if http_proxy is not None:
out.append('http_proxy=' + posix_quote(http_proxy) + '\n')
out.append('HTTP_PROXY=' + posix_quote(http_proxy) + '\n')
if no_proxy is not None:
out.append('no_proxy=' + posix_quote(no_proxy) + '\n')
out.append('NO_PROXY=' + posix_quote(no_proxy) + '\n')
with open(self.sitedir + '/run/proxy-environment', 'w') as f:
f.write("".join(out))
return {self.sitedir + '/run/proxy-environment': '/etc/environment'}
def _get_container_name(self, container_type):
"""
Gets the full name of a container of the type specified.
Currently the supported types are:
- 'venv'
- 'postgres'
- 'solr'
- 'web'
- 'pgdata'
- 'lessc'
- 'datapusher'
- 'redis'
The name will be formatted appropriately with any prefixes and postfixes
needed.
:param container_type: The type of container name to generate (see above).
"""
if container_type in ['venv']:
return 'datacats_{}_{}'.format(container_type, self.name)
else:
return 'datacats_{}_{}_{}'.format(container_type, self.name, self.site_name)
def posix_quote(s):
return "\\'".join("'" + p + "'" for p in s.split("'"))
| agpl-3.0 |
andmos/ansible | lib/ansible/module_utils/cloudstack.py | 7 | 24184 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, René Moser <mail@renemoser.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import time
import traceback
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import missing_required_lib
CS_IMP_ERR = None
try:
from cs import CloudStack, CloudStackException, read_config
HAS_LIB_CS = True
except ImportError:
CS_IMP_ERR = traceback.format_exc()
HAS_LIB_CS = False
CS_HYPERVISORS = [
'KVM', 'kvm',
'VMware', 'vmware',
'BareMetal', 'baremetal',
'XenServer', 'xenserver',
'LXC', 'lxc',
'HyperV', 'hyperv',
'UCS', 'ucs',
'OVM', 'ovm',
'Simulator', 'simulator',
]
if sys.version_info > (3,):
long = int
def cs_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret']]
class AnsibleCloudStack:
def __init__(self, module):
if not HAS_LIB_CS:
module.fail_json(msg=missing_required_lib('cs'), exception=CS_IMP_ERR)
self.result = {
'changed': False,
'diff': {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._cs = None
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.network_acl = None
@property
def cs(self):
if self._cs is None:
api_config = self.get_api_config()
self._cs = CloudStack(**api_config)
return self._cs
def get_api_config(self):
api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
try:
config = read_config(api_region)
except KeyError:
config = {}
api_config = {
'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
'key': self.module.params.get('api_key') or config.get('key'),
'secret': self.module.params.get('api_secret') or config.get('secret'),
'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
}
self.result.update({
'api_region': api_region,
'api_url': api_config['endpoint'],
'api_key': api_config['key'],
'api_timeout': int(api_config['timeout']),
'api_http_method': api_config['method'],
})
if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
self.fail_json(msg="Missing api credentials: can not authenticate")
return api_config
def fail_json(self, **kwargs):
self.result.update(kwargs)
self.module.fail_json(**self.result)
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
result = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
before_value = to_text(current_dict[key])
after_value = to_text(value)
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if before_value != after_value:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
# Test for diff in case insensitive way
elif before_value.lower() != after_value.lower():
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
else:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = to_text(value)
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def query_api(self, command, **args):
try:
res = getattr(self.cs, command)(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
except CloudStackException as e:
self.fail_json(msg='CloudStackException: %s' % to_native(e))
except Exception as e:
self.fail_json(msg=to_native(e))
return res
def get_network_acl(self, key=None):
if self.network_acl is None:
args = {
'name': self.module.params.get('network_acl'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
self.network_acl = network_acls['networkacllist'][0]
self.result['network_acl'] = self.network_acl['name']
if self.network_acl:
return self._get_by_key(key, self.network_acl)
else:
self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if not vpcs:
self.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
else:
self.vpc = v
self.result['vpc'] = v['name']
if self.vpc:
return self._get_by_key(key, self.vpc)
self.fail_json(msg="VPC '%s' not found" % vpc)
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network', []):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.result['network'] = n['name']
self.network = n
return self._get_by_key(key, self.network)
self.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.result['project'] = p['name']
self.project = p
return self._get_by_key(key, self.project)
self.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.query_api('listPublicIpAddresses', **args)
if not ip_addresses:
self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None, filter_zone=True):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.fail_json(msg="Virtual machine param 'vm' is required")
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id') if filter_zone else None,
'fetch_list': True,
}
vms = self.query_api('listVirtualMachines', **args)
if vms:
for v in vms:
if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.vm = v
return self._get_by_key(key, self.vm)
self.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.query_api('listDiskOfferings')
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.query_api('listZones')
if not zones:
self.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
self.result['zone'] = self.zone['name']
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [z['name'].lower(), z['id']]:
self.result['zone'] = z['name']
self.zone = z
return self._get_by_key(key, self.zone)
self.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.query_api('listOsTypes')
if os_types:
for o in os_types['ostype']:
if os_type in [o['description'], o['id']]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.query_api('listHypervisors')
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.fail_json(msg="Account must be specified with Domain")
args = {
'name': account,
'domainid': self.get_domain(key='id'),
'listall': True
}
accounts = self.query_api('listAccounts', **args)
if accounts:
self.account = accounts['account'][0]
self.result['account'] = self.account['name']
return self._get_by_key(key, self.account)
self.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {
'listall': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
self.domain = d
self.result['domain'] = d['path']
return self._get_by_key(key, self.domain)
self.fail_json(msg="Domain '%s' not found" % domain)
def query_tags(self, resource, resource_type):
args = {
'resourceid': resource['id'],
'resourcetype': resource_type,
}
tags = self.query_api('listTags', **args)
return self.get_tags(resource=tags, key='tag')
def get_tags(self, resource=None, key='tags'):
existing_tags = []
for tag in resource.get(key) or []:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
'tags': tags,
}
if operation == "create":
response = self.query_api('createTags', **args)
else:
response = self.query_api('deleteTags', **args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.query_api('listCapabilities')
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.items():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.items():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
if 'tags' in resource:
self.result['tags'] = resource['tags']
return self.result
def get_result_and_facts(self, facts_name, resource):
result = self.get_result(resource)
ansible_facts = {
facts_name: result.copy()
}
for k in ['diff', 'changed']:
if k in ansible_facts[facts_name]:
del ansible_facts[facts_name][k]
result.update(ansible_facts=ansible_facts)
return result
| gpl-3.0 |
KMK-ONLINE/ansible | lib/ansible/module_utils/lxd.py | 6 | 6082 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
# httplib/http.client connection using unix domain socket
import socket
import ssl
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
# Python 3
from http.client import HTTPConnection, HTTPSConnection
class UnixHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
from ansible.module_utils.urls import generic_urlparse
try:
from urlparse import urlparse
except ImportError:
# Python 3
from url.parse import urlparse
class LXDClientException(Exception):
def __init__(self, msg, **kwargs):
self.msg = msg
self.kwargs = kwargs
class LXDClient(object):
def __init__(self, url, key_file=None, cert_file=None, debug=False):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
:type url: ``str``
:param key_file: The path of the client certificate key file.
:type key_file: ``str``
:param cert_file: The path of the client certificate file.
:type cert_file: ``str``
:param debug: The debug flag. The request and response are stored in logs when debug is true.
:type debug: ``bool``
"""
self.url = url
self.debug = debug
self.logs = []
if url.startswith('https:'):
self.cert_file = cert_file
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
unix_socket_path = url[len('unix:'):]
self.connection = UnixHTTPConnection(unix_socket_path)
else:
raise LXDClientException('URL scheme must be unix: or https:')
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
if resp_json['type'] == 'async':
url = '{0}/wait'.format(resp_json['operation'])
resp_json = self._send_request('GET', url)
if resp_json['metadata']['status'] != 'Success':
self._raise_err_from_json(resp_json)
return resp_json
def authenticate(self, trust_password):
body_json = {'type': 'client', 'password': trust_password}
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
try:
body = json.dumps(body_json)
self.connection.request(method, url, body=body)
resp = self.connection.getresponse()
resp_json = json.loads(resp.read())
self.logs.append({
'type': 'sent request',
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
'response': {'json': resp_json}
})
resp_type = resp_json.get('type', None)
if resp_type == 'error':
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
return resp_json
self._raise_err_from_json(resp_json)
return resp_json
except socket.error as e:
raise LXDClientException('cannot connect to the LXD server', err=e)
def _raise_err_from_json(self, resp_json):
err_params = {}
if self.debug:
err_params['logs'] = self.logs
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
@staticmethod
def _get_err_from_resp_json(resp_json):
err = None
metadata = resp_json.get('metadata', None)
if metadata is not None:
err = metadata.get('err', None)
if err is None:
err = resp_json.get('error', None)
return err
| gpl-3.0 |
dmordom/nipype | nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py | 5 | 1366 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.registration.specialized import FiducialRegistration
def test_FiducialRegistration_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fixedLandmarks=dict(argstr='--fixedLandmarks %s...',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
movingLandmarks=dict(argstr='--movingLandmarks %s...',
),
outputMessage=dict(argstr='--outputMessage %s',
),
rms=dict(argstr='--rms %f',
),
saveTransform=dict(argstr='--saveTransform %s',
hash_files=False,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
transformType=dict(argstr='--transformType %s',
),
)
inputs = FiducialRegistration.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_FiducialRegistration_outputs():
output_map = dict(saveTransform=dict(),
)
outputs = FiducialRegistration.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
mbauskar/alec_frappe5_erpnext | erpnext/buying/doctype/supplier_quotation/supplier_quotation.py | 59 | 2532 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.buying_controller import BuyingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SupplierQuotation(BuyingController):
def validate(self):
super(SupplierQuotation, self).validate()
if not self.status:
self.status = "Draft"
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
self.validate_common()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", "qty")
def on_submit(self):
frappe.db.set(self, "status", "Submitted")
def on_cancel(self):
frappe.db.set(self, "status", "Cancelled")
def on_trash(self):
pass
def validate_with_previous_doc(self):
super(SupplierQuotation, self).validate_with_previous_doc({
"Material Request": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="]],
},
"Material Request Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["item_code", "="], ["uom", "="]],
"is_child_table": True
}
})
def validate_common(self):
pc = frappe.get_doc('Purchase Common')
pc.validate_for_items(self)
@frappe.whitelist()
def make_purchase_order(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("get_schedule_dates")
target.run_method("calculate_taxes_and_totals")
def update_item(obj, target, source_parent):
target.conversion_factor = 1
doclist = get_mapped_doc("Supplier Quotation", source_name, {
"Supplier Quotation": {
"doctype": "Purchase Order",
"validation": {
"docstatus": ["=", 1],
}
},
"Supplier Quotation Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "supplier_quotation_item"],
["parent", "supplier_quotation"],
["uom", "stock_uom"],
["uom", "uom"],
["prevdoc_detail_docname", "prevdoc_detail_docname"],
["prevdoc_doctype", "prevdoc_doctype"],
["prevdoc_docname", "prevdoc_docname"]
],
"postprocess": update_item
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
},
}, target_doc, set_missing_values)
return doclist
| agpl-3.0 |
bruderstein/PythonScript | PythonLib/full/distutils/core.py | 55 | 8876 | """distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
import os
import sys
from distutils.debug import DEBUG
from distutils.errors import *
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.config import PyPIRCCommand
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if 'script_name' not in attrs:
attrs['script_name'] = os.path.basename(sys.argv[0])
if 'script_args' not in attrs:
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError as msg:
if 'name' not in attrs:
raise SystemExit("error in setup command: %s" % msg)
else:
raise SystemExit("error in %s setup command: %s" % \
(attrs['name'], msg))
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print("options (after parsing config files):")
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError as msg:
raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
if DEBUG:
print("options (after parsing command line):")
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit("interrupted")
except OSError as exc:
if DEBUG:
sys.stderr.write("error: %s\n" % (exc,))
raise
else:
raise SystemExit("error: %s" % (exc,))
except (DistutilsError,
CCompilerError) as msg:
if DEBUG:
raise
else:
raise SystemExit("error: " + str(msg))
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be read and run with 'exec()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv.copy()
g = {'__file__': script_name}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
with open(script_name, 'rb') as f:
exec(f.read(), g)
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
if _setup_distribution is None:
raise RuntimeError(("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name)
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
| gpl-2.0 |
cpacia/OpenBazaar-Server | keys/keychain.py | 6 | 2191 | __author__ = 'chris'
import bitcointools
import nacl.signing
import nacl.encoding
import threading
from keys.guid import GUID
class KeyChain(object):
def __init__(self, database, callback=None, heartbeat_server=None):
self.db = database
guid_keys = self.db.keys.get_key("guid")
if guid_keys is None:
if heartbeat_server:
heartbeat_server.set_status("generating GUID")
threading.Thread(target=self.create_keychain, args=[callback]).start()
else:
g = GUID.from_privkey(guid_keys[0])
self.guid = g.guid
self.signing_key = g.signing_key
self.verify_key = g.verify_key
# pylint: disable=W0633
self.bitcoin_master_privkey, self.bitcoin_master_pubkey = self.db.keys.get_key("bitcoin")
self.encryption_key = self.signing_key.to_curve25519_private_key()
self.encryption_pubkey = self.verify_key.to_curve25519_public_key()
if callable(callback):
callback(self)
def create_keychain(self, callback=None):
"""
The guid generation can take a while. While it's doing that we will
open a port to allow a UI to connect and listen for generation to
complete.
"""
print "Generating GUID, this may take a few minutes..."
g = GUID()
self.guid = g.guid
self.signing_key = g.signing_key
self.verify_key = g.verify_key
self.db.keys.set_key("guid", self.signing_key.encode(encoder=nacl.encoding.HexEncoder),
self.verify_key.encode(encoder=nacl.encoding.HexEncoder))
self.bitcoin_master_privkey = bitcointools.bip32_master_key(bitcointools.sha256(self.signing_key.encode()))
self.bitcoin_master_pubkey = bitcointools.bip32_privtopub(self.bitcoin_master_privkey)
self.db.keys.set_key("bitcoin", self.bitcoin_master_privkey, self.bitcoin_master_pubkey)
self.encryption_key = self.signing_key.to_curve25519_private_key()
self.encryption_pubkey = self.verify_key.to_curve25519_public_key()
if callable(callback):
callback(self, True)
| mit |
Azulinho/ansible | lib/ansible/module_utils/ovirt.py | 7 | 25775 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.0.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.0.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
return get_dict_of_struct(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None),
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
return sdk.Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{}={}'.format(k, v) for k, v in kwargs.items())
)
else:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, collections.Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self, condition=lambda e: True):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity and condition(entity):
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']))
elif search_params is not None:
entity = search_by_attributes(self._service, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, name=self._module.params['name'])
return entity
| gpl-3.0 |
mozilla/verbatim | vendor/lib/python/raven/contrib/pylons/__init__.py | 5 | 1073 | """
raven.contrib.pylons
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from raven.middleware import Sentry as Middleware
from raven.base import Client
def list_from_setting(config, setting):
value = config.get(setting)
if not value:
return None
return value.split()
class Sentry(Middleware):
def __init__(self, app, config, client_cls=Client):
client = client_cls(
dsn=config.get('sentry.dsn'),
servers=list_from_setting(config, 'sentry.servers'),
name=config.get('sentry.name'),
public_key=config.get('sentry.public_key'),
secret_key=config.get('sentry.secret_key'),
project=config.get('sentry.project'),
site=config.get('sentry.site'),
include_paths=list_from_setting(config, 'sentry.include_paths'),
exclude_paths=list_from_setting(config, 'sentry.exclude_paths'),
)
super(Sentry, self).__init__(app, client)
| gpl-2.0 |
ctruchi/deluge-webui2 | deluge/ui/gtkui/files_tab.py | 6 | 35308 | #
# files_tab.py
#
# Copyright (C) 2008 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import gtk
import gtk.gdk
import gobject
import os.path
import cPickle
import logging
from deluge.ui.gtkui.torrentdetails import Tab
from deluge.ui.client import client
import deluge.configmanager
import deluge.component as component
import deluge.common
import common
log = logging.getLogger(__name__)
def _(message): return message
TRANSLATE = {
"Do Not Download": _("Do Not Download"),
"Normal Priority": _("Normal Priority"),
"High Priority": _("High Priority"),
"Highest Priority": _("Highest Priority"),
}
del _
def _t(text):
if text in TRANSLATE:
text = TRANSLATE[text]
return _(text)
def cell_priority(column, cell, model, row, data):
if model.get_value(row, 5) == -1:
# This is a folder, so lets just set it blank for now
cell.set_property("text", "")
return
priority = model.get_value(row, data)
cell.set_property("text", _t(deluge.common.FILE_PRIORITY[priority]))
def cell_priority_icon(column, cell, model, row, data):
if model.get_value(row, 5) == -1:
# This is a folder, so lets just set it blank for now
cell.set_property("stock-id", None)
return
priority = model.get_value(row, data)
if deluge.common.FILE_PRIORITY[priority] == "Do Not Download":
cell.set_property("stock-id", gtk.STOCK_NO)
elif deluge.common.FILE_PRIORITY[priority] == "Normal Priority":
cell.set_property("stock-id", gtk.STOCK_YES)
elif deluge.common.FILE_PRIORITY[priority] == "High Priority":
cell.set_property("stock-id", gtk.STOCK_GO_UP)
elif deluge.common.FILE_PRIORITY[priority] == "Highest Priority":
cell.set_property("stock-id", gtk.STOCK_GOTO_TOP)
def cell_filename(column, cell, model, row, data):
"""Only show the tail portion of the file path"""
filepath = model.get_value(row, data)
cell.set_property("text", os.path.split(filepath)[1])
def cell_progress(column, cell, model, row, data):
text = model.get_value(row, data[0])
value = model.get_value(row, data[1])
cell.set_property("visible", True)
cell.set_property("text", text)
cell.set_property("value", value)
class FilesTab(Tab):
def __init__(self):
Tab.__init__(self)
builder = component.get("MainWindow").get_builder()
self._name = "Files"
self._child_widget = builder.get_object("files_tab")
self._tab_label = builder.get_object("files_tab_label")
self.listview = builder.get_object("files_listview")
# filename, size, progress string, progress value, priority, file index, icon id
self.treestore = gtk.TreeStore(str, gobject.TYPE_UINT64, str, float, int, int, str)
# We need to store the row that's being edited to prevent updating it until
# it's been done editing
self._editing_index = None
# Filename column
self.filename_column_name = _("Filename")
column = gtk.TreeViewColumn(self.filename_column_name)
render = gtk.CellRendererPixbuf()
column.pack_start(render, False)
column.add_attribute(render, "stock-id", 6)
render = gtk.CellRendererText()
render.set_property("editable", True)
render.connect("edited", self._on_filename_edited)
render.connect("editing-started", self._on_filename_editing_start)
render.connect("editing-canceled", self._on_filename_editing_canceled)
column.pack_start(render, True)
column.add_attribute(render, "text", 0)
column.set_sort_column_id(0)
column.set_clickable(True)
column.set_resizable(True)
column.set_expand(False)
column.set_min_width(200)
column.set_reorderable(True)
self.listview.append_column(column)
# Size column
column = gtk.TreeViewColumn(_("Size"))
render = gtk.CellRendererText()
column.pack_start(render, False)
column.set_cell_data_func(render, deluge.ui.gtkui.listview.cell_data_size, 1)
column.set_sort_column_id(1)
column.set_clickable(True)
column.set_resizable(True)
column.set_expand(False)
column.set_min_width(50)
column.set_reorderable(True)
self.listview.append_column(column)
# Progress column
column = gtk.TreeViewColumn(_("Progress"))
render = gtk.CellRendererProgress()
column.pack_start(render)
column.set_cell_data_func(render, cell_progress, (2, 3))
column.set_sort_column_id(3)
column.set_clickable(True)
column.set_resizable(True)
column.set_expand(False)
column.set_min_width(100)
column.set_reorderable(True)
self.listview.append_column(column)
# Priority column
column = gtk.TreeViewColumn(_("Priority"))
render = gtk.CellRendererPixbuf()
column.pack_start(render, False)
column.set_cell_data_func(render, cell_priority_icon, 4)
render = gtk.CellRendererText()
column.pack_start(render, False)
column.set_cell_data_func(render, cell_priority, 4)
column.set_sort_column_id(4)
column.set_clickable(True)
column.set_resizable(True)
column.set_expand(False)
column.set_min_width(100)
# Bugfix: Last column needs max_width set to stop scrollbar appearing
column.set_max_width(200)
column.set_reorderable(True)
self.listview.append_column(column)
self.listview.set_model(self.treestore)
self.listview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.file_menu = builder.get_object("menu_file_tab")
self.file_menu_priority_items = [
builder.get_object("menuitem_donotdownload"),
builder.get_object("menuitem_normal"),
builder.get_object("menuitem_high"),
builder.get_object("menuitem_highest"),
builder.get_object("menuitem_priority_sep")
]
self.localhost_widgets = [
builder.get_object("menuitem_open_file"),
builder.get_object("menuitem3")
]
self.listview.connect("row-activated", self._on_row_activated)
self.listview.connect("key-press-event", self._on_key_press_event)
self.listview.connect("button-press-event", self._on_button_press_event)
self.listview.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK,
[('text/plain', 0, 0)],
gtk.gdk.ACTION_DEFAULT | gtk.gdk.ACTION_MOVE)
self.listview.enable_model_drag_dest([('text/plain', 0, 0)], gtk.gdk.ACTION_DEFAULT)
self.listview.connect("drag_data_get", self._on_drag_data_get_data)
self.listview.connect("drag_data_received", self._on_drag_data_received_data)
component.get("MainWindow").connect_signals({
"on_menuitem_open_file_activate": self._on_menuitem_open_file_activate,
"on_menuitem_donotdownload_activate": self._on_menuitem_donotdownload_activate,
"on_menuitem_normal_activate": self._on_menuitem_normal_activate,
"on_menuitem_high_activate": self._on_menuitem_high_activate,
"on_menuitem_highest_activate": self._on_menuitem_highest_activate,
"on_menuitem_expand_all_activate": self._on_menuitem_expand_all_activate
})
# Connect to various events from the daemon
client.register_event_handler("TorrentFileRenamedEvent", self._on_torrentfilerenamed_event)
client.register_event_handler("TorrentFolderRenamedEvent", self._on_torrentfolderrenamed_event)
client.register_event_handler("TorrentRemovedEvent", self._on_torrentremoved_event)
# Attempt to load state
self.load_state()
# torrent_id: (filepath, size)
self.files_list = {}
self.torrent_id = None
def start(self):
attr = "hide" if not client.is_localhost() else "show"
for widget in self.localhost_widgets:
getattr(widget, attr)()
def save_state(self):
filename = "files_tab.state"
# Get the current sort order of the view
column_id, sort_order = self.treestore.get_sort_column_id()
# Setup state dict
state = {
"columns": {},
"sort_id": int(column_id) if column_id >= 0 else None,
"sort_order": int(sort_order) if sort_order >= 0 else None
}
for index, column in enumerate(self.listview.get_columns()):
state["columns"][column.get_title()] = {
"position": index,
"width": column.get_width()
}
# Get the config location for saving the state file
config_location = deluge.configmanager.get_config_dir()
try:
log.debug("Saving FilesTab state file: %s", filename)
state_file = open(os.path.join(config_location, filename), "wb")
cPickle.dump(state, state_file)
state_file.close()
except IOError, e:
log.warning("Unable to save state file: %s", e)
def load_state(self):
filename = "files_tab.state"
# Get the config location for loading the state file
config_location = deluge.configmanager.get_config_dir()
state = None
try:
log.debug("Loading FilesTab state file: %s", filename)
state_file = open(os.path.join(config_location, filename), "rb")
state = cPickle.load(state_file)
state_file.close()
except (EOFError, IOError, AttributeError, cPickle.UnpicklingError), e:
log.warning("Unable to load state file: %s", e)
if state == None:
return
if state["sort_id"] is not None and state["sort_order"] is not None:
self.treestore.set_sort_column_id(state["sort_id"], state["sort_order"])
for (index, column) in enumerate(self.listview.get_columns()):
cname = column.get_title()
if state["columns"].has_key(cname):
cstate = state["columns"][cname]
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(cstate["width"] if cstate["width"] > 0 else 10)
if state["sort_id"] == index and state["sort_order"] is not None:
column.set_sort_indicator(True)
column.set_sort_order(state["sort_order"])
if cstate["position"] != index:
# Column is in wrong position
if cstate["position"] == 0:
self.listview.move_column_after(column, None)
elif self.listview.get_columns()[cstate["position"] - 1].get_title() != cname:
self.listview.move_column_after(column, self.listview.get_columns()[cstate["position"] - 1])
def update(self):
# Get the first selected torrent
torrent_id = component.get("TorrentView").get_selected_torrents()
# Only use the first torrent in the list or return if None selected
if len(torrent_id) != 0:
torrent_id = torrent_id[0]
else:
# No torrent is selected in the torrentview
self.clear()
return
status_keys = ["file_progress", "file_priorities"]
if torrent_id != self.torrent_id:
# We only want to do this if the torrent_id has changed
self.treestore.clear()
self.torrent_id = torrent_id
status_keys += ["compact", "is_seed"]
if self.torrent_id in self.files_list:
# We already have the files list stored, so just update the view
self.update_files()
if self.torrent_id not in self.files_list or not self.files_list[self.torrent_id]:
# We need to get the files list
log.debug("Getting file list from core..")
status_keys += ["files"]
component.get("SessionProxy").get_torrent_status(self.torrent_id, status_keys).addCallback(self._on_get_torrent_status, self.torrent_id)
def clear(self):
self.treestore.clear()
self.torrent_id = None
def _on_row_activated(self, tree, path, view_column):
if client.is_localhost:
component.get("SessionProxy").get_torrent_status(self.torrent_id, ["save_path", "files"]).addCallback(self._on_open_file)
def get_file_path(self, row, path=""):
if not row:
return path
path = self.treestore.get_value(row, 0) + path
return self.get_file_path(self.treestore.iter_parent(row), path)
def _on_open_file(self, status):
paths = self.listview.get_selection().get_selected_rows()[1]
selected = []
for path in paths:
selected.append(self.treestore.get_iter(path))
for select in selected:
path = self.get_file_path(select).split("/")
filepath = os.path.join(status["save_path"], *path)
log.debug("Open file '%s'", filepath)
deluge.common.open_file(filepath)
## The following 3 methods create the folder/file view in the treeview
def prepare_file_store(self, files):
split_files = { }
i = 0
for file in files:
self.prepare_file(file, file["path"], i, split_files)
i += 1
self.add_files(None, split_files)
def prepare_file(self, file, file_name, file_num, files_storage):
first_slash_index = file_name.find("/")
if first_slash_index == -1:
files_storage[file_name] = (file_num, file)
else:
file_name_chunk = file_name[:first_slash_index+1]
if file_name_chunk not in files_storage:
files_storage[file_name_chunk] = { }
self.prepare_file(file, file_name[first_slash_index+1:],
file_num, files_storage[file_name_chunk])
def add_files(self, parent_iter, split_files):
ret = 0
for key,value in split_files.iteritems():
if key.endswith("/"):
chunk_iter = self.treestore.append(parent_iter,
[key, 0, "", 0, 0, -1, gtk.STOCK_DIRECTORY])
chunk_size = self.add_files(chunk_iter, value)
self.treestore.set(chunk_iter, 1, chunk_size)
ret += chunk_size
else:
self.treestore.append(parent_iter, [key,
value[1]["size"], "", 0, 0, value[0], gtk.STOCK_FILE])
ret += value[1]["size"]
return ret
###
def update_files(self):
self.treestore.clear()
self.prepare_file_store(self.files_list[self.torrent_id])
self.listview.expand_row("0", False)
def get_selected_files(self):
"""Returns a list of file indexes that are selected"""
def get_iter_children(itr, selected):
i = self.treestore.iter_children(itr)
while i:
selected.append(self.treestore[i][5])
if self.treestore.iter_has_child(i):
get_iter_children(i, selected)
i = self.treestore.iter_next(i)
selected = []
paths = self.listview.get_selection().get_selected_rows()[1]
for path in paths:
i = self.treestore.get_iter(path)
selected.append(self.treestore[i][5])
if self.treestore.iter_has_child(i):
get_iter_children(i, selected)
return selected
def get_files_from_tree(self, rows, files_list, indent):
if not rows:
return None
for row in rows:
if row[5] > -1:
files_list.append((row[5], row))
self.get_files_from_tree(row.iterchildren(), files_list, indent+1)
return None
def update_folder_percentages(self):
"""
Go through the tree and update the folder complete percentages.
"""
root = self.treestore.get_iter_root()
if root is None or self.treestore[root][5] != -1:
return
def get_completed_bytes(row):
bytes = 0
parent = self.treestore.iter_parent(row)
while row:
if self.treestore.iter_children(row):
bytes += get_completed_bytes(self.treestore.iter_children(row))
else:
bytes += self.treestore[row][1] * (float(self.treestore[row][3]) / 100.0)
row = self.treestore.iter_next(row)
try:
value = (float(bytes) / float(self.treestore[parent][1])) * 100
except ZeroDivisionError:
# Catch the unusal error found when moving folders around
value = 0
self.treestore[parent][3] = value
self.treestore[parent][2] = "%.2f%%" % value
return bytes
get_completed_bytes(self.treestore.iter_children(root))
def _on_get_torrent_status(self, status, torrent_id):
# Check stored torrent id matches the callback id
if self.torrent_id != torrent_id:
return
# Store this torrent's compact setting
if "compact" in status:
self.__compact = status["compact"]
if "is_seed" in status:
self.__is_seed = status["is_seed"]
if "files" in status:
self.files_list[self.torrent_id] = status["files"]
self.update_files()
# (index, iter)
files_list = []
self.get_files_from_tree(self.treestore, files_list, 0)
files_list.sort()
for index, row in files_list:
# Do not update a row that is being edited
if self._editing_index == row[5]:
continue
try:
progress_string = "%.2f%%" % (status["file_progress"][index] * 100)
except IndexError:
continue
if row[2] != progress_string:
row[2] = progress_string
progress_value = status["file_progress"][index] * 100
if row[3] != progress_value:
row[3] = progress_value
file_priority = status["file_priorities"][index]
if row[4] != file_priority:
row[4] = file_priority
if self._editing_index != -1:
# Only update if no folder is being edited
self.update_folder_percentages()
def _on_button_press_event(self, widget, event):
"""This is a callback for showing the right-click context menu."""
log.debug("on_button_press_event")
# We only care about right-clicks
if event.button == 3:
x, y = event.get_coords()
cursor_path = self.listview.get_path_at_pos(int(x), int(y))
if not cursor_path:
return
paths = self.listview.get_selection().get_selected_rows()[1]
if cursor_path[0] not in paths:
row = self.treestore.get_iter(cursor_path[0])
self.listview.get_selection().unselect_all()
self.listview.get_selection().select_iter(row)
for widget in self.file_menu_priority_items:
widget.set_sensitive(not (self.__compact or self.__is_seed))
self.file_menu.popup(None, None, None, event.button, event.time)
return True
def _on_key_press_event(self, widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname is not None:
func = getattr(self, 'keypress_' + keyname, None)
selected_rows = self.listview.get_selection().get_selected_rows()[1]
if func and selected_rows:
return func(event)
def keypress_Menu(self, event):
self.file_menu.popup(None, None, None, 3, event.time)
return True
def keypress_F2(self, event):
path, col = self.listview.get_cursor()
for column in self.listview.get_columns():
if column.get_title() == self.filename_column_name:
self.listview.set_cursor(path, column, True)
return True
def _on_menuitem_open_file_activate(self, menuitem):
self._on_row_activated(None, None, None)
def _set_file_priorities_on_user_change(self, selected, priority):
"""Sets the file priorities in the core. It will change the selected
with the 'priority'"""
file_priorities = []
def set_file_priority(model, path, iter, data):
index = model.get_value(iter, 5)
if index in selected and index != -1:
file_priorities.append((index, priority))
elif index != -1:
file_priorities.append((index, model.get_value(iter, 4)))
self.treestore.foreach(set_file_priority, None)
file_priorities.sort()
priorities = [p[1] for p in file_priorities]
log.debug("priorities: %s", priorities)
client.core.set_torrent_file_priorities(self.torrent_id, priorities)
def _on_menuitem_donotdownload_activate(self, menuitem):
self._set_file_priorities_on_user_change(
self.get_selected_files(),
deluge.common.FILE_PRIORITY["Do Not Download"])
def _on_menuitem_normal_activate(self, menuitem):
self._set_file_priorities_on_user_change(
self.get_selected_files(),
deluge.common.FILE_PRIORITY["Normal Priority"])
def _on_menuitem_high_activate(self, menuitem):
self._set_file_priorities_on_user_change(
self.get_selected_files(),
deluge.common.FILE_PRIORITY["High Priority"])
def _on_menuitem_highest_activate(self, menuitem):
self._set_file_priorities_on_user_change(
self.get_selected_files(),
deluge.common.FILE_PRIORITY["Highest Priority"])
def _on_menuitem_expand_all_activate(self, menuitem):
self.listview.expand_all()
def _on_filename_edited(self, renderer, path, new_text):
index = self.treestore[path][5]
log.debug("new_text: %s", new_text)
# Don't do anything if the text hasn't changed
if new_text == self.treestore[path][0]:
self._editing_index = None
return
if index > -1:
# We are renaming a file
itr = self.treestore.get_iter(path)
# Recurse through the treestore to get the actual path of the file
def get_filepath(i):
ip = self.treestore.iter_parent(i)
fp = ""
while ip:
fp = self.treestore[ip][0] + fp
ip = self.treestore.iter_parent(ip)
return fp
# Only recurse if file is in a folder..
if self.treestore.iter_parent(itr):
filepath = get_filepath(itr) + new_text
else:
filepath = new_text
log.debug("filepath: %s", filepath)
client.core.rename_files(self.torrent_id, [(index, filepath)])
else:
# We are renaming a folder
folder = self.treestore[path][0]
parent_path = ""
itr = self.treestore.iter_parent(self.treestore.get_iter(path))
while itr:
parent_path = self.treestore[itr][0] + parent_path
itr = self.treestore.iter_parent(itr)
client.core.rename_folder(self.torrent_id, parent_path + folder, parent_path + new_text)
self._editing_index = None
def _on_filename_editing_start(self, renderer, editable, path):
self._editing_index = self.treestore[path][5]
def _on_filename_editing_canceled(self, renderer):
self._editing_index = None
def _on_torrentfilerenamed_event(self, torrent_id, index, name):
log.debug("index: %s name: %s", index, name)
if torrent_id not in self.files_list:
return
old_name = self.files_list[torrent_id][index]["path"]
self.files_list[torrent_id][index]["path"] = name
# We need to update the filename displayed if we're currently viewing
# this torrents files.
if torrent_id == self.torrent_id:
old_name_len = len(old_name.split("/"))
name_len = len(name.split("/"))
if old_name_len != name_len:
# The parent path list changes depending on which way the file
# is moving in the tree
if old_name_len < name_len:
parent_path = [o for o in old_name.split("/")[:-1]]
else:
parent_path = [o for o in name.split("/")[:-1]]
# Find the iter to the parent folder we need to add a new folder
# to.
def find_parent(model, path, itr, user_data):
if model[itr][0] == parent_path[0] + "/":
if len(parent_path) == 1:
# This is the parent iter
to_create = name.split("/")[len(old_name.split("/")[:-1]):-1]
parent_iter = itr
for tc in to_create:
# We need to check if these folders need to be created
child_iter = self.treestore.iter_children(parent_iter)
create = True
while child_iter:
if self.treestore[child_iter][0] == tc + "/":
create = False
parent_iter = child_iter
break
child_iter = self.treestore.iter_next(child_iter)
if create:
parent_iter = self.treestore.append(parent_iter,
[tc + "/", 0, "", 0, 0, -1, gtk.STOCK_DIRECTORY])
# Find the iter for the file that needs to be moved
def get_file_iter(model, path, itr, user_data):
if model[itr][5] == index:
model[itr][0] = name.split("/")[-1]
t = self.treestore.append(
parent_iter,
self.treestore.get(itr,
*xrange(self.treestore.get_n_columns())))
itr_parent = self.treestore.iter_parent(itr)
self.treestore.remove(itr)
self.remove_childless_folders(itr_parent)
return True
self.treestore.foreach(get_file_iter, None)
return True
else:
log.debug("parent_path: %s remove: %s", parent_path, model[itr][0])
parent_path.remove(model[itr][0][:-1])
if parent_path:
self.treestore.foreach(find_parent, None)
else:
new_folders = name.split("/")[:-1]
parent_iter = None
for f in new_folders:
parent_iter = self.treestore.append(parent_iter,
[f + "/", 0, "", 0, 0, -1, gtk.STOCK_DIRECTORY])
child = self.get_iter_at_path(old_name)
self.treestore.append(
parent_iter,
self.treestore.get(child, *xrange(self.treestore.get_n_columns())))
self.treestore.remove(child)
else:
# This is just changing a filename without any folder changes
def set_file_name(model, path, itr, user_data):
if model[itr][5] == index:
model[itr][0] = os.path.split(name)[-1]
return True
self.treestore.foreach(set_file_name, None)
def get_iter_at_path(self, filepath):
"""
Returns the gtkTreeIter for filepath
"""
log.debug("get_iter_at_path: %s", filepath)
is_dir = False
if filepath[-1] == "/":
is_dir = True
filepath = filepath.split("/")
if "" in filepath:
filepath.remove("")
path_iter = None
itr = self.treestore.iter_children(None)
level = 0
while itr:
ipath = self.treestore[itr][0]
if (level + 1) != len(filepath) and ipath == filepath[level] + "/":
# We're not at the last index, but we do have a match
itr = self.treestore.iter_children(itr)
level += 1
continue
elif (level + 1) == len(filepath) and ipath == filepath[level] + "/" if is_dir else filepath[level]:
# This is the iter we've been searching for
path_iter = itr
break
else:
itr = self.treestore.iter_next(itr)
continue
return path_iter
def remove_childless_folders(self, itr):
"""
Goes up the tree removing childless itrs starting at itr
"""
while not self.treestore.iter_children(itr):
parent = self.treestore.iter_parent(itr)
self.treestore.remove(itr)
itr = parent
def _on_torrentfolderrenamed_event(self, torrent_id, old_folder, new_folder):
log.debug("on_torrent_folder_renamed_signal")
log.debug("old_folder: %s new_folder: %s", old_folder, new_folder)
if torrent_id not in self.files_list:
return
if old_folder[-1] != "/":
old_folder += "/"
if new_folder[-1] != "/":
new_folder += "/"
for fd in self.files_list[torrent_id]:
if fd["path"].startswith(old_folder):
fd["path"] = fd["path"].replace(old_folder, new_folder, 1)
if torrent_id == self.torrent_id:
old_split = old_folder.split("/")
try:
old_split.remove("")
except:
pass
new_split = new_folder.split("/")
try:
new_split.remove("")
except:
pass
old_folder_iter = self.get_iter_at_path(old_folder)
old_folder_iter_parent = self.treestore.iter_parent(old_folder_iter)
new_folder_iter = self.get_iter_at_path(new_folder)
if len(new_split) == len(old_split):
# These are at the same tree depth, so it's a simple rename
self.treestore[old_folder_iter][0] = new_split[-1] + "/"
return
if new_folder_iter:
# This means that a folder by this name already exists
common.reparent_iter(self.treestore, self.treestore.iter_children(old_folder_iter), new_folder_iter)
else:
parent = old_folder_iter_parent
for ns in new_split[:-1]:
parent = self.treestore.append(parent, [ns + "/", 0, "", 0, 0, -1, gtk.STOCK_DIRECTORY])
self.treestore[old_folder_iter][0] = new_split[-1] + "/"
common.reparent_iter(self.treestore, old_folder_iter, parent)
# We need to check if the old_folder_iter_parent no longer has children
# and if so, we delete it
self.remove_childless_folders(old_folder_iter_parent)
def _on_torrentremoved_event(self, torrent_id):
if torrent_id in self.files_list:
del self.files_list[torrent_id]
def _on_drag_data_get_data(self, treeview, context, selection, target_id, etime):
paths = self.listview.get_selection().get_selected_rows()[1]
selection.set_text(cPickle.dumps(paths))
def _on_drag_data_received_data(self, treeview, context, x, y, selection, info, etime):
try:
selected = cPickle.loads(selection.data)
except cPickle.UnpicklingError:
log.debug("Invalid selection data: %s", selection.data)
return
log.debug("selection.data: %s", selected)
drop_info = treeview.get_dest_row_at_pos(x, y)
model = treeview.get_model()
if drop_info:
itr = model.get_iter(drop_info[0])
parent_iter = model.iter_parent(itr)
parent_path = ""
if model[itr][5] == -1:
parent_path += model[itr][0]
while parent_iter:
parent_path = model[parent_iter][0] + parent_path
parent_iter = model.iter_parent(parent_iter)
if model[selected[0]][5] == -1:
log.debug("parent_path: %s", parent_path)
log.debug("rename_to: %s", parent_path + model[selected[0]][0])
# Get the full path of the folder we want to rename
pp = ""
itr = self.treestore.iter_parent(self.treestore.get_iter(selected[0]))
while itr:
pp = self.treestore[itr][0] + pp
itr = self.treestore.iter_parent(itr)
client.core.rename_folder(self.torrent_id, pp + model[selected[0]][0], parent_path + model[selected[0]][0])
else:
#[(index, filepath), ...]
to_rename = []
for s in selected:
to_rename.append((model[s][5], parent_path + model[s][0]))
log.debug("to_rename: %s", to_rename)
client.core.rename_files(self.torrent_id, to_rename)
| gpl-3.0 |
palominodb/tableizer | tableizer/ttt_gui/rrd.py | 1 | 5968 | # rrd.py
# Copyright (C) 2009-2013 PalominoDB, Inc.
#
# You may contact the maintainers at eng@palominodb.com.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
from django.conf import settings
import rrdtool
from utilities.utils import flatten, titleize, str_to_datetime, datetime_to_int
class Rrdtool(object):
def server_graph(self, servers, since, type_='full'):
msgs = []
ok = True
for srv in flatten([servers]):
path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '')
rrd_path = os.path.join(path, srv.name, 'server_%s.rrd' % (srv.name))
opts = self.__common_opts('server_%s' % (srv.name), since, type_, 'Server Aggregate - %s' % (srv.name))
opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [
['data_length', ['AREA%s:STACK', '#00ff40']],
['index_length', ['AREA%s', '#0040ff']],
#['data_free', ['LINE2%s', '#0f00f0']],
]))
opts = flatten(opts)
opts = map(lambda x: str(x), opts)
try:
rrdtool.graph(opts)
except Exception, e:
msgs.append(e)
ok = False
return [ok, msgs]
def database_graph(self, databases, since, type_='full'):
msgs = []
ok = True
for db in flatten([databases]):
path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '')
rrd_path = os.path.join(path, db.server.name, 'database_%s.rrd' % (db.name))
opts = self.__common_opts('database_%s_%s' % (db.server.name, db.name), since,
type_, 'Database Aggregate - %s.%s' % (db.server.name, db.name))
opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [
['data_length', ['AREA%s:STACK', '#00ff40']],
['index_length', ['AREA%s', '#0040ff']],
#['data_free', ['LINE2%s', '#0f00f0']],
]))
opts = flatten(opts)
opts = map(lambda x: str(x), opts)
try:
rrdtool.graph(opts)
except Exception, e:
msgs.append(e)
ok = False
return [ok, msgs]
def table_graph(self, tables, since, type_='full'):
msgs = []
ok = True
for tbl in flatten([tables]):
path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '')
rrd_path = os.path.join(path, tbl.schema.server.name, tbl.schema.name, '%s.rrd' % (tbl.name))
opts = self.__common_opts('table_%s_%s_%s' % (tbl.schema.server.name, tbl.schema.name, tbl.name),
since, type_, 'Table - %s.%s.%s' % (tbl.schema.server.name, tbl.schema.name, tbl.name))
opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [
['data_length', ['AREA%s:STACK', '#00ff40']],
['index_length', ['AREA%s', '#0040ff']],
#['data_free', ['LINE2%s', '#0f00f0']],
]))
opts = flatten(opts)
opts = map(lambda x: str(x), opts)
try:
rrdtool.graph(opts)
except Exception, e:
msgs.append(e)
ok = False
return [ok, msgs]
def __common_opts(self, path_frag, since, type_, title):
filename = '%s.%s.%s.png' % (path_frag, since, type_)
since = str_to_datetime(since)
since = datetime_to_int(since)
if not os.path.isdir(os.path.join(settings.MEDIA_ROOT, 'graphs')):
os.makedirs(os.path.join(settings.MEDIA_ROOT, 'graphs'))
path = os.path.join(settings.MEDIA_ROOT, 'graphs', filename)
o = [path, '-s', str(since), '--width', '640' if type_ == 'full' else '128',
'-e', 'now', '--title', '%s' % (str(title))]
if type_ == 'thumb':
o.append('-j')
o.append('--height')
o.append('16')
return o
def __common_ds_opts(self, ds, rrd_path):
dsname = ds[0]
gitems = ds[1:]
ret = []
ret.append('DEF:avg_{0}={1}:{0}:AVERAGE'.format(dsname, rrd_path))
ret.append('DEF:min_{0}={1}:{0}:MIN'.format(dsname, rrd_path))
ret.append('DEF:max_{0}={1}:{0}:MAX'.format(dsname, rrd_path))
ret.append('VDEF:v_last_{0}=avg_{0},LAST'.format(dsname))
ret.append('VDEF:v_avg_{0}=avg_{0},AVERAGE'.format(dsname))
ret.append('VDEF:v_min_{0}=avg_{0},MINIMUM'.format(dsname))
ret.append('VDEF:v_max_{0}=avg_{0},MAXIMUM'.format(dsname))
for gi in gitems:
ret.append(gi[0] % ':avg_{0}{1}:"{2}"'.format(dsname, gi[1], titleize(dsname)))
ret.append('GPRINT:v_last_{0}:"Current\\: %0.2lf%s"'.format(dsname))
ret.append('GPRINT:v_avg_{0}:"Avg\\: %0.2lf%s"'.format(dsname))
ret.append('GPRINT:v_min_{0}:"Min\\: %0.2lf%s"'.format(dsname))
ret.append('GPRINT:v_max_{0}:"Max\\: %0.2lf%s"'.format(dsname))
ret.append('COMMENT:"\\s"')
ret.append('COMMENT:"\\s"')
return ret
| gpl-2.0 |
nkhuyu/SFrame | oss_src/unity/python/sframe/cython/python_printer_callback.py | 9 | 1141 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import sys
try:
import IPython
from IPython.core.interactiveshell import InteractiveShell
have_ipython = True
except ImportError:
have_ipython = False
def print_callback(val):
"""
Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook and
when all else fails, just prints locally.
"""
success = False
try:
# for reasons I cannot fathom, regular printing, even directly
# to io.stdout does not work.
# I have to intrude rather deep into IPython to make it behave
if have_ipython:
if InteractiveShell.initialized():
IPython.display.publish_display_data('graphlab.callback', {'text/plain':val,'text/html':'<pre>' + val + '</pre>'})
success = True
except:
pass
if not success:
print val
sys.stdout.flush()
| bsd-3-clause |
harlowja/networkx | networkx/algorithms/link_prediction.py | 40 | 16527 | """
Link prediction algorithms.
"""
from __future__ import division
import math
import networkx as nx
from networkx.utils.decorators import *
__all__ = ['resource_allocation_index',
'jaccard_coefficient',
'adamic_adar_index',
'preferential_attachment',
'cn_soundarajan_hopcroft',
'ra_index_soundarajan_hopcroft',
'within_inter_cluster']
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def resource_allocation_index(G, ebunch=None):
r"""Compute the resource allocation index of all node pairs in ebunch.
Resource allocation index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}
where :math:`\Gamma(u)` denotes the set of neighbors of `u`.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Resource allocation index will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their resource allocation index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.75000000'
'(2, 3) -> 0.75000000'
References
----------
.. [1] T. Zhou, L. Lu, Y.-C. Zhang.
Predicting missing links via local information.
Eur. Phys. J. B 71 (2009) 623.
http://arxiv.org/pdf/0901.0553.pdf
"""
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
return ((u, v, predict(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def jaccard_coefficient(G, ebunch=None):
r"""Compute the Jaccard coefficient of all node pairs in ebunch.
Jaccard coefficient of nodes `u` and `v` is defined as
.. math::
\frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}
where :math:`\Gamma(u)` denotes the set of neighbors of `u`.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Jaccard coefficient will be computed for each pair of nodes
given in the iterable. The pairs must be given as 2-tuples
(u, v) where u and v are nodes in the graph. If ebunch is None
then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Jaccard coefficient.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.60000000'
'(2, 3) -> 0.60000000'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
cnbors = list(nx.common_neighbors(G, u, v))
union_size = len(set(G[u]) | set(G[v]))
if union_size == 0:
return 0
else:
return len(cnbors) / union_size
return ((u, v, predict(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def adamic_adar_index(G, ebunch=None):
r"""Compute the Adamic-Adar index of all node pairs in ebunch.
Adamic-Adar index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}
where :math:`\Gamma(u)` denotes the set of neighbors of `u`.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Adamic-Adar index will be computed for each pair of nodes given
in the iterable. The pairs must be given as 2-tuples (u, v)
where u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Adamic-Adar index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 2.16404256'
'(2, 3) -> 2.16404256'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
return sum(1 / math.log(G.degree(w))
for w in nx.common_neighbors(G, u, v))
return ((u, v, predict(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def preferential_attachment(G, ebunch=None):
r"""Compute the preferential attachment score of all node pairs in ebunch.
Preferential attachment score of `u` and `v` is defined as
.. math::
|\Gamma(u)| |\Gamma(v)|
where :math:`\Gamma(u)` denotes the set of neighbors of `u`.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Preferential attachment score will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their preferential attachment score.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
...
'(0, 1) -> 16'
'(2, 3) -> 16'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return ((u, v, G.degree(u) * G.degree(v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Count the number of common neighbors of all node pairs in ebunch
using community information.
For two nodes `u` and `v`, this function computes the number of
common neighbors and bonus one for each common neighbor belonging to
the same community as `u` and `v`. Mathematically,
.. math::
|\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w)
where `f(w)` equals 1 if `w` belongs to the same community as `u`
and `v` or 0 otherwise and :math:`\Gamma(u)` denotes the set of
neighbors of `u`.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(3)
>>> G.node[0]['community'] = 0
>>> G.node[1]['community'] = 0
>>> G.node[2]['community'] = 0
>>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
...
'(0, 2) -> 2'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
cnbors = list(nx.common_neighbors(G, u, v))
if Cu == Cv:
return len(cnbors) + sum(_community(G, w, community) == Cu
for w in cnbors)
else:
return len(cnbors)
return ((u, v, predict(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Compute the resource allocation index of all node pairs in
ebunch using community information.
For two nodes `u` and `v`, this function computes the resource
allocation index considering only common neighbors belonging to the
same community as `u` and `v`. Mathematically,
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|}
where `f(w)` equals 1 if `w` belongs to the same community as `u`
and `v` or 0 otherwise and :math:`\Gamma(u)` denotes the set of
neighbors of `u`.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
>>> G.node[0]['community'] = 0
>>> G.node[1]['community'] = 0
>>> G.node[2]['community'] = 1
>>> G.node[3]['community'] = 0
>>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 3) -> 0.50000000'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu == Cv:
cnbors = nx.common_neighbors(G, u, v)
return sum(1 / G.degree(w) for w in cnbors
if _community(G, w, community) == Cu)
else:
return 0
return ((u, v, predict(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def within_inter_cluster(G, ebunch=None, delta=0.001, community='community'):
"""Compute the ratio of within- and inter-cluster common neighbors
of all node pairs in ebunch.
For two nodes `u` and `v`, if a common neighbor `w` belongs to the
same community as them, `w` is considered as within-cluster common
neighbor of `u` and `v`. Otherwise, it is considered as
inter-cluster common neighbor of `u` and `v`. The ratio between the
size of the set of within- and inter-cluster common neighbors is
defined as the WIC measure. [1]_
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The WIC measure will be computed for each pair of nodes given in
the iterable. The pairs must be given as 2-tuples (u, v) where
u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
delta : float, optional (default = 0.001)
Value to prevent division by zero in case there is no
inter-cluster common neighbor between two nodes. See [1]_ for
details. Default value: 0.001.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their WIC measure.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)])
>>> G.node[0]['community'] = 0
>>> G.node[1]['community'] = 1
>>> G.node[2]['community'] = 0
>>> G.node[3]['community'] = 0
>>> G.node[4]['community'] = 0
>>> preds = nx.within_inter_cluster(G, [(0, 4)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.99800200'
>>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5)
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.33333333'
References
----------
.. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes.
Link prediction in complex networks based on cluster information.
In Proceedings of the 21st Brazilian conference on Advances in
Artificial Intelligence (SBIA'12)
http://dx.doi.org/10.1007/978-3-642-34459-6_10
"""
if delta <= 0:
raise nx.NetworkXAlgorithmError('Delta must be greater than zero')
if ebunch is None:
ebunch = nx.non_edges(G)
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu == Cv:
cnbors = set(nx.common_neighbors(G, u, v))
within = set(w for w in cnbors
if _community(G, w, community) == Cu)
inter = cnbors - within
return len(within) / (len(inter) + delta)
else:
return 0
return ((u, v, predict(u, v)) for u, v in ebunch)
def _community(G, u, community):
"""Get the community of the given node."""
node_u = G.node[u]
try:
return node_u[community]
except KeyError:
raise nx.NetworkXAlgorithmError('No community information')
| bsd-3-clause |
cdrooom/odoo | addons/crm/wizard/crm_merge_opportunities.py | 8 | 4442 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_merge_opportunity(osv.osv_memory):
"""
Merge opportunities together.
If we're talking about opportunities, it's just because it makes more sense
to merge opps than leads, because the leads are more ephemeral objects.
But since opportunities are leads, it's also possible to merge leads
together (resulting in a new lead), or leads and opps together (resulting
in a new opp).
"""
_name = 'crm.merge.opportunity'
_description = 'Merge opportunities'
_columns = {
'opportunity_ids': fields.many2many('crm.lead', rel='merge_opportunity_rel', id1='merge_id', id2='opportunity_id', string='Leads/Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id', select=True),
}
def action_merge(self, cr, uid, ids, context=None):
context = dict(context or {})
lead_obj = self.pool.get('crm.lead')
wizard = self.browse(cr, uid, ids[0], context=context)
opportunity2merge_ids = wizard.opportunity_ids
#TODO: why is this passed through the context ?
context['lead_ids'] = [opportunity2merge_ids[0].id]
merge_id = lead_obj.merge_opportunity(cr, uid, [x.id for x in opportunity2merge_ids], wizard.user_id.id, wizard.team_id.id, context=context)
# The newly created lead might be a lead or an opp: redirect toward the right view
merge_result = lead_obj.browse(cr, uid, merge_id, context=context)
if merge_result.type == 'opportunity':
return lead_obj.redirect_opportunity_view(cr, uid, merge_id, context=context)
else:
return lead_obj.redirect_lead_view(cr, uid, merge_id, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Use active_ids from the context to fetch the leads/opps to merge.
In order to get merged, these leads/opps can't be in 'Dead' or 'Closed'
"""
if context is None:
context = {}
record_ids = context.get('active_ids', False)
res = super(crm_merge_opportunity, self).default_get(cr, uid, fields, context=context)
if record_ids:
opp_ids = []
opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)
for opp in opps:
if opp.probability < 100:
opp_ids.append(opp.id)
if 'opportunity_ids' in fields:
res.update({'opportunity_ids': opp_ids})
return res
def on_change_user(self, cr, uid, ids, user_id, team_id, context=None):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
if user_id:
if team_id:
user_in_team = self.pool.get('crm.team').search(cr, uid, [('id', '=', team_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_team = False
if not user_in_team:
team_id = False
team_ids = self.pool.get('crm.team').search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context)
if team_ids:
team_id = team_ids[0]
return {'value': {'team_id': team_id}}
| agpl-3.0 |
grembo/buildbot | master/buildbot/test/unit/test_changes_mail.py | 10 | 4302 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
from twisted.trial import unittest
from buildbot.changes import mail
from buildbot.test.util import changesource
from buildbot.test.util import dirs
class TestMaildirSource(changesource.ChangeSourceMixin, dirs.DirsMixin,
unittest.TestCase):
def setUp(self):
self.maildir = os.path.abspath("maildir")
d = self.setUpChangeSource()
d.addCallback(lambda _: self.setUpDirs(self.maildir))
return d
def populateMaildir(self):
"create a fake maildir with a fake new message ('newmsg') in it"
newdir = os.path.join(self.maildir, "new")
os.makedirs(newdir)
curdir = os.path.join(self.maildir, "cur")
os.makedirs(curdir)
fake_message = "Subject: test\n\nthis is a test"
mailfile = os.path.join(newdir, "newmsg")
with open(mailfile, "w") as f:
f.write(fake_message)
def assertMailProcessed(self):
self.assertFalse(
os.path.exists(os.path.join(self.maildir, "new", "newmsg")))
self.assertTrue(
os.path.exists(os.path.join(self.maildir, "cur", "newmsg")))
def tearDown(self):
d = self.tearDownDirs()
d.addCallback(lambda _: self.tearDownChangeSource())
return d
# tests
def test_describe(self):
mds = mail.MaildirSource(self.maildir)
self.assertSubstring(self.maildir, mds.describe())
def test_messageReceived_svn(self):
self.populateMaildir()
mds = mail.MaildirSource(self.maildir)
self.attachChangeSource(mds)
# monkey-patch in a parse method
def parse(message, prefix):
assert 'this is a test' in message.get_payload()
return (u'svn', dict(author=u'jimmy'))
mds.parse = parse
d = mds.messageReceived('newmsg')
def check(_):
self.assertMailProcessed()
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': 'jimmy',
'branch': None,
'category': None,
'codebase': None,
'comments': None,
'files': None,
'project': '',
'properties': {},
'repository': '',
'revision': None,
'revlink': '',
'src': 'svn',
'when_timestamp': None,
}])
d.addCallback(check)
return d
def test_messageReceived_bzr(self):
self.populateMaildir()
mds = mail.MaildirSource(self.maildir)
self.attachChangeSource(mds)
# monkey-patch in a parse method
def parse(message, prefix):
assert 'this is a test' in message.get_payload()
return (u'bzr', dict(author=u'jimmy'))
mds.parse = parse
d = mds.messageReceived('newmsg')
def check(_):
self.assertMailProcessed()
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': 'jimmy',
'branch': None,
'category': None,
'codebase': None,
'comments': None,
'files': None,
'project': '',
'properties': {},
'repository': '',
'revision': None,
'revlink': '',
'src': 'bzr',
'when_timestamp': None,
}])
d.addCallback(check)
return d
| gpl-2.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/distutils/command/build_ext.py | 46 | 32370 | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import sys, os, re
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
# this keeps compatibility from 2.3 to 2.5
if sys.version < "2.6":
USER_BASE = None
HAS_USER_SITE = False
else:
from site import USER_BASE
HAS_USER_SITE = True
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp']
if HAS_USER_SITE:
user_options.append(('user', None,
"add user include, library and rpath"))
boolean_options.append('user')
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if isinstance(self.libraries, str):
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux or Solaris with a shared Python library,
# Python's library directory must be appended to library_dirs
sysconfig.get_config_var('Py_ENABLE_SHARED')
if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu')
or sys.platform.startswith('sunos'))
and sysconfig.get_config_var('Py_ENABLE_SHARED')):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
try:
self.build_extension(ext)
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' %
(ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return os.path.join(*ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
elif sys.platform[:3] == 'aix':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
sys.abiflags)
return ext.libraries + [pythonlib]
else:
return ext.libraries
| apache-2.0 |
Korkki/django | django/contrib/messages/storage/cookie.py | 471 | 6545 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
florian-wagner/gimli | python/pygimli/gui/vtk/wxVTKRenderWindowInteractor.py | 1 | 24830 | # -*- coding: utf-8 -*-
"""
A VTK RenderWindowInteractor widget for wxPython.
Find wxPython info at http://wxPython.org
Created by Prabhu Ramachandran, April 2002
Based on wxVTKRenderWindow.py
Fixes and updates by Charl P. Botha 2003-2008
Updated to new wx namespace and some cleaning up by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App before
creating the window.
Behaviour:
Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just
like a vtkGenericRenderWindowInteractor.
----------------------------------------
"""
# import usual libraries
import math
import sys
import os
baseClass = object
_useCapture = None
try:
import wx
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
except ImportError as e:
import traceback
#traceback.print_exc(file=sys.stdout)
sys.stderr.write("No proper wx installed'.\n")
try:
import vtk
except Exception as e:
sys.stderr.write("No proper vtk installed'.\n")
# end of configuration items
class EventTimer(wx.Timer):
"""Simple wx.Timer class."""
def __init__(self, iren):
"""
Default class constructor.
@param iren: current render window
"""
wx.Timer.__init__(self)
self.iren = iren
def Notify(self):
"""The timer has expired."""
self.iren.TimerEvent()
class wxVTKRenderWindowInteractor(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
# class variable that can also be used to request instances that use
# stereo; this is overridden by the stereo=1/0 parameter. If you set
# it to True, the NEXT instantiated object will attempt to allocate a
# stereo visual. E.g.:
# wxVTKRenderWindowInteractor.USE_STEREO = True
# myRWI = wxVTKRenderWindowInteractor(parent, -1)
USE_STEREO = False
def __init__(self, parent, ID, *args, **kw):
"""
Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# private attributes
self.__RenderWhenDisabled = 0
# First do special handling of some keywords:
# stereo, position, size, style
stereo = 0
if 'stereo' in kw:
if kw['stereo']:
stereo = 1
del kw['stereo']
elif self.USE_STEREO:
stereo = 1
position, size = wx.DefaultPosition, wx.DefaultSize
if 'position' in kw:
position = kw['position']
del kw['position']
if 'size' in kw:
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if 'style' in kw:
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
if wx.Platform != '__WXMSW__':
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
if baseClass.__name__ == 'GLCanvas':
# code added by cpbotha to enable stereo and double
# buffering correctly where the user requests this; remember
# that the glXContext in this case is NOT allocated by VTK,
# but by WX, hence all of this.
# Initialize GLCanvas with correct attriblist
attribList = [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_MIN_RED, 1,
wx.glcanvas.WX_GL_MIN_GREEN, 1,
wx.glcanvas.WX_GL_MIN_BLUE, 1,
wx.glcanvas.WX_GL_DEPTH_SIZE, 16,
wx.glcanvas.WX_GL_DOUBLEBUFFER]
if stereo:
attribList.append(wx.glcanvas.WX_GL_STEREO)
try:
baseClass.__init__(self, parent, id = ID, pos = position, size = size, style = style,
attribList=attribList)
except wx.PyAssertionError:
# visual couldn't be allocated, so we go back to default
baseClass.__init__(self, parent, ID, position, size, style)
if stereo:
# and make sure everyone knows that the stereo
# visual wasn't set.
stereo = 0
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow( vtk.vtkRenderWindow() )
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
try:
self._Iren.GetRenderWindow().SetSize(size.width, size.height)
except AttributeError:
self._Iren.GetRenderWindow().SetSize(size[0], size[1])
if stereo:
self._Iren.GetRenderWindow().StereoCapableWindowOn()
self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes()
self.__handle = None
self.BindEvents()
# with this, we can make sure that the reparenting logic in
# Render() isn't called before the first OnPaint() has
# successfully been run (and set up the VTK/WX display links)
self.__has_painted = False
# set when we have captured the mouse.
self._own_mouse = False
# used to store WHICH mouse button led to mouse capture
self._mouse_capture_button = 0
# A mapping for cursor changes.
self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT
1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW
2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE
3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE
4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW
5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE
6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS
7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE
8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL
9: wx.CURSOR_HAND, # VTK_CURSOR_HAND
10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR
}
def BindEvents(self):
"""Binds all the necessary events for navigation, sizing, drawing."""
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
# the wx 2.8.7.1 documentation states that you HAVE to handle
# this event if you make use of CaptureMouse, which we do.
if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'):
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST,
self.OnMouseCaptureLost)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor."""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ + \
" has no attribute named " + attr)
def CreateTimer(self, obj, evt):
"""Creates a timer."""
self._timer = EventTimer(self)
self._timer.Start(10, True)
def DestroyTimer(self, obj, evt):
"""The timer is a one shot timer so will expire automatically."""
return 1
def _CursorChangedEvent(self, obj, evt):
"""Change the wx cursor if the renderwindow's cursor was changed."""
cur = self._cursor_map[obj.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the
# current cursor is not yet set so we defer this by which time
# the current cursor should have been set.
wx.CallAfter(self._CursorChangedEvent, obj, evt)
def HideCursor(self):
"""Hides the cursor."""
c = wx.StockCursor(wx.CURSOR_BLANK)
self.SetCursor(c)
def ShowCursor(self):
"""Shows the cursor."""
rw = self._Iren.GetRenderWindow()
cur = self._cursor_map[rw.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def GetDisplayId(self):
"""
Function to get X11 Display ID from WX and return it in a format that
can be used by VTK Python.
We query the X11 Display with a new call that was added in wxPython
2.6.0.1. The call returns a SWIG object which we can query for the
address and subsequently turn into an old-style SWIG-mangled string
representation to pass to VTK.
"""
d = None
try:
d = wx.GetXDisplay()
except NameError:
# wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1
# if it's not available, we can't pass it. In general,
# things will still work; on some setups, it'll break.
pass
else:
# wx returns None on platforms where wx.GetXDisplay is not relevant
if d:
d = hex(d)
# On wxPython-2.6.3.2 and above there is no leading '0x'.
if not d.startswith('0x'):
d = '0x' + d
# we now have 0xdeadbeef
# VTK wants it as: _deadbeef_void_p (pre-SWIG-1.3 style)
d = '_%s_%s' % (d[2:], 'void_p')
return d
def OnMouseCaptureLost(self, event):
"""
This is signalled when we lose mouse capture due to an external event,
such as when a dialog box is shown.
See the wx documentation.
"""
# the documentation seems to imply that by this time we've
# already lost capture. I have to assume that we don't need
# to call ReleaseMouse ourselves.
if _useCapture and self._own_mouse:
self._own_mouse = False
def OnPaint(self,event):
"""Handles the wx.EVT_PAINT event for wxVTKRenderWindowInteractor."""
# wx should continue event processing after this handler.
# We call this BEFORE Render(), so that if Render() raises
# an exception, wx doesn't re-call OnPaint repeatedly.
event.Skip()
dc = wx.PaintDC(self)
# make sure the RenderWindow is sized correctly
self._Iren.GetRenderWindow().SetSize(self.GetSizeTuple())
# Tell the RenderWindow to render inside the wx.Window.
if not self.__handle:
# on relevant platforms, set the X11 Display ID
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# store the handle
self.__handle = self.GetHandle()
# and give it to VTK
self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle))
# now that we've painted once, the Render() reparenting logic
# is safe
self.__has_painted = True
self.Render()
def OnSize(self,event):
"""Handles the wx.EVT_SIZE event for wxVTKRenderWindowInteractor."""
# event processing should continue (we call this before the
# Render(), in case it raises an exception)
event.Skip()
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._Iren.SetSize(width, height)
self._Iren.ConfigureEvent()
# this will check for __handle
self.Render()
def OnMotion(self,event):
"""Handles the wx.EVT_MOTION event for wxVTKRenderWindowInteractor."""
# event processing should continue
# we call this early in case any of the VTK code raises an
# exception.
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.MouseMoveEvent()
def OnEnter(self,event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.EnterEvent()
def OnLeave(self,event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.LeaveEvent()
def OnButtonDown(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindowInteractor."""
# allow wx event processing to continue
# on wxPython 2.6.0.1, omitting this will cause problems with
# the initial focus, resulting in the wxVTKRWI ignoring keypresses
# until we focus elsewhere and then refocus the wxVTKRWI frame
# we do it this early in case any of the following VTK code
# raises an exception.
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
button = 0
if event.RightDown():
self._Iren.RightButtonPressEvent()
button = 'Right'
elif event.LeftDown():
self._Iren.LeftButtonPressEvent()
button = 'Left'
elif event.MiddleDown():
self._Iren.MiddleButtonPressEvent()
button = 'Middle'
# save the button and capture mouse until the button is released
# we only capture the mouse if it hasn't already been captured
if _useCapture and not self._own_mouse:
self._own_mouse = True
self._mouse_capture_button = button
self.CaptureMouse()
def OnButtonUp(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
button = 0
if event.RightUp():
button = 'Right'
elif event.LeftUp():
button = 'Left'
elif event.MiddleUp():
button = 'Middle'
# if the same button is released that captured the mouse, and
# we have the mouse, release it.
# (we need to get rid of this as soon as possible; if we don't
# and one of the event handlers raises an exception, mouse
# is never released.)
if _useCapture and self._own_mouse and \
button==self._mouse_capture_button:
self.ReleaseMouse()
self._own_mouse = False
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if button == 'Right':
self._Iren.RightButtonReleaseEvent()
elif button == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif button == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def OnMouseWheel(self,event):
"""Handles the wx.EVT_MOUSEWHEEL event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if event.GetWheelRotation() > 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def OnKeyDown(self,event):
"""Handles the wx.EVT_KEY_DOWN event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
# wxPython 2.6.0.1 does not return a valid event.Get{X,Y}()
# for this event, so we use the cached position.
(x,y)= self._Iren.GetEventPosition()
self._Iren.SetEventInformation(x, y,
ctrl, shift, key, 0,
keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def OnKeyUp(self,event):
"""Handles the wx.EVT_KEY_UP event for wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, key, 0,
keysym)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow)."""
return self._Iren.GetRenderWindow()
def Render(self):
"""Actually renders the VTK scene on screen."""
RenderAllowed = 1
if not self.__RenderWhenDisabled:
# the user doesn't want us to render when the toplevel frame
# is disabled - first find the top level parent
topParent = wx.GetTopLevelParent(self)
if topParent:
# if it exists, check whether it's enabled
# if it's not enabeld, RenderAllowed will be false
RenderAllowed = topParent.IsEnabled()
if RenderAllowed:
if self.__handle and self.__handle == self.GetHandle():
self._Iren.GetRenderWindow().Render()
elif self.GetHandle() and self.__has_painted:
# this means the user has reparented us; let's adapt to the
# new situation by doing the WindowRemap dance
self._Iren.GetRenderWindow().SetNextWindowInfo(
str(self.GetHandle()))
# make sure the DisplayId is also set correctly
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# do the actual remap with the new parent information
self._Iren.GetRenderWindow().WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._Iren.GetRenderWindow().Render()
def SetRenderWhenDisabled(self, newValue):
"""
Change value of __RenderWhenDisabled ivar.
If __RenderWhenDisabled is false (the default), this widget will not
call Render() on the RenderWindow if the top level frame (i.e. the
containing frame) has been disabled.
This prevents recursive rendering during wx.SafeYield() calls.
wx.SafeYield() can be called during the ProgressMethod() callback of
a VTK object to have progress bars and other GUI elements updated -
it does this by disabling all windows (disallowing user-input to
prevent re-entrancy of code) and then handling all outstanding
GUI events.
However, this often triggers an OnPaint() method for wxVTKRWIs,
resulting in a Render(), resulting in Update() being called whilst
still in progress.
"""
self.__RenderWhenDisabled = bool(newValue)
#--------------------------------------------------------------------
def wxVTKRenderWindowInteractorConeExample():
"""Like it says, just a simple example."""
# every wx app needs an app
app = wx.PySimpleApp()
# create the top-level frame, sizer and wxVTKRWI
frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(400,400))
widget = wxVTKRenderWindowInteractor(frame, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(widget, 1, wx.EXPAND)
frame.SetSizer(sizer)
frame.Layout()
# It would be more correct (API-wise) to call widget.Initialize() and
# widget.Start() here, but Initialize() calls RenderWindow.Render().
# That Render() call will get through before we can setup the
# RenderWindow() to render via the wxWidgets-created context; this
# causes flashing on some platforms and downright breaks things on
# other platforms. Instead, we call widget.Enable(). This means
# that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE,
# that doesn't matter.
widget.Enable(1)
widget.AddObserver("ExitEvent", lambda o,e,f=frame: f.Close())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowInteractorConeExample()
| gpl-3.0 |
felipsmartins/namebench | nb_third_party/dns/dnssec.py | 215 | 2144 | # Copyright (C) 2003-2007, 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNSSEC-related functions and constants."""
RSAMD5 = 1
DH = 2
DSA = 3
ECC = 4
RSASHA1 = 5
DSANSEC3SHA1 = 6
RSASHA1NSEC3SHA1 = 7
RSASHA256 = 8
RSASHA512 = 10
INDIRECT = 252
PRIVATEDNS = 253
PRIVATEOID = 254
_algorithm_by_text = {
'RSAMD5' : RSAMD5,
'DH' : DH,
'DSA' : DSA,
'ECC' : ECC,
'RSASHA1' : RSASHA1,
'DSANSEC3SHA1' : DSANSEC3SHA1,
'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
'RSASHA256' : RSASHA256,
'RSASHA512' : RSASHA512,
'INDIRECT' : INDIRECT,
'PRIVATEDNS' : PRIVATEDNS,
'PRIVATEOID' : PRIVATEOID,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_algorithm_by_value = dict([(y, x) for x, y in _algorithm_by_text.iteritems()])
class UnknownAlgorithm(Exception):
"""Raised if an algorithm is unknown."""
pass
def algorithm_from_text(text):
"""Convert text into a DNSSEC algorithm value
@rtype: int"""
value = _algorithm_by_text.get(text.upper())
if value is None:
value = int(text)
return value
def algorithm_to_text(value):
"""Convert a DNSSEC algorithm value to text
@rtype: string"""
text = _algorithm_by_value.get(value)
if text is None:
text = str(value)
return text
| apache-2.0 |
klipstein/dojango | dojango/data/modelstore/utils.py | 13 | 3281 | from django.utils.datastructures import SortedDict
from django.db.models import get_model
from fields import StoreField
from exceptions import StoreException
def get_object_from_identifier(identifier, valid=None):
""" Helper function to resolve an item identifier
into a model instance.
Raises StoreException if the identifier is invalid
or the requested Model could not be found
Raises <Model>.DoesNotExist if the object lookup fails
Arguments (optional):
valid
One or more Django model classes to compare the
returned model instance to.
"""
try:
model_str, pk = identifier.split('__')
except ValueError:
raise StoreException('Invalid identifier string')
Model = get_model(*model_str.split('.'))
if Model is None:
raise StoreException('Model from identifier string "%s" not found' % model_str)
if valid is not None:
if not isinstance(valid, (list, tuple) ):
valid = (valid,)
if Model not in valid:
raise StoreException('Model type mismatch')
# This will raise Model.DoesNotExist if lookup fails
return Model._default_manager.get(pk=pk)
def get_fields_and_servicemethods(bases, attrs, include_bases=True):
""" This function was pilfered (and slightly modified) from django/forms/forms.py
See the original function for doc and comments.
"""
fields = [ (field_name, attrs.pop(field_name)) for \
field_name, obj in attrs.items() if isinstance(obj, StoreField)]
# Get the method name directly from the __servicemethod__ dict
# as set by the decorator
methods = [ (method.__servicemethod__['name'], method) for \
method in attrs.values() if hasattr(method, '__servicemethod__') ]
if include_bases:
for base in bases[::-1]:
# Grab the fields and servicemethods from the base classes
try:
fields = base.fields.items() + fields
except AttributeError:
pass
try:
methods = base.servicemethods.items() + methods
except AttributeError:
pass
return SortedDict(fields), SortedDict(methods)
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
""" resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'
Modification Note:
(unless it's the special '__unicode__' method)
If the optional allow_dotted_names argument is False, dots are not
supported and this function operates similar to getattr(obj, attr).
NOTE:
This method was (mostly) copied straight over from SimpleXMLRPCServer.py in the
standard library
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_') and i != '__unicode__': # Allow the __unicode__ method to be called
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
| bsd-3-clause |
ddd332/presto | presto-docs/target/sphinx/reportlab/pdfbase/pdfdoc.py | 9 | 83521 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/pdfdoc.py
__version__=''' $Id: pdfdoc.py 3795 2010-09-30 15:52:16Z rgbecker $ '''
__doc__="""
The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that
all objects are properly cross-referenced and indexed to the nearest byte. The
'inner structure' - the page descriptions - are presumed to be generated before
each page is saved.
pdfgen.py calls this and provides a 'canvas' object to handle page marking operators.
piddlePDF calls pdfgen and offers a high-level interface.
The classes within this generally mirror structures in the PDF file
and are not part of any public interface. Instead, canvas and font
classes are made available elsewhere for users to manipulate.
"""
import string, types, binascii, codecs
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase.pdfutils import LINEEND # this constant needed in both
from reportlab import rl_config
from reportlab.lib.utils import import_zlib, open_for_read, fp_str, _digester
from reportlab.pdfbase import pdfmetrics
try:
from hashlib import md5
except ImportError:
from md5 import md5
from sys import platform
try:
from sys import version_info
except: # pre-2.0
# may be inaccurate but will at least
#work in anything which seeks to format
# version_info into a string
version_info = (1,5,2,'unknown',0)
if platform[:4] == 'java' and version_info[:2] == (2, 1):
# workaround for list()-bug in Jython 2.1 (should be fixed in 2.2)
def list(sequence):
def f(x):
return x
return map(f, sequence)
def utf8str(x):
if isinstance(x,unicode):
return x.encode('utf8')
else:
return str(x)
class PDFError(Exception):
pass
# set this flag to get more vertical whitespace (and larger files)
LongFormat = 1
##if LongFormat: (doesn't work)
## pass
##else:
## LINEEND = "\n" # no wasteful carriage returns!
# __InternalName__ is a special attribute that can only be set by the Document arbitrator
__InternalName__ = "__InternalName__"
# __RefOnly__ marks reference only elements that must be formatted on top level
__RefOnly__ = "__RefOnly__"
# __Comment__ provides a (one line) comment to inline with an object ref, if present
# if it is more than one line then percentize it...
__Comment__ = "__Comment__"
# If DoComments is set then add helpful (space wasting) comment lines to PDF files
DoComments = 1
if not LongFormat:
DoComments = 0
# name for standard font dictionary
BasicFonts = "BasicFonts"
# name for the pages object
Pages = "Pages"
### generic utilities
# for % substitutions
LINEENDDICT = {"LINEEND": LINEEND, "PERCENT": "%"}
PDF_VERSION_DEFAULT = (1, 3)
PDF_SUPPORT_VERSION = dict( #map keyword to min version that supports it
transparency = (1, 4),
)
from types import InstanceType
def format(element, document, toplevel=0, InstanceType=InstanceType):
"""Indirection step for formatting.
Ensures that document parameters alter behaviour
of formatting for all elements.
"""
if hasattr(element,'__PDFObject__'):
if not toplevel and hasattr(element, __RefOnly__):
# the object cannot be a component at non top level.
# make a reference to it and return it's format
return document.Reference(element).format(document)
else:
f = element.format(document)
if not rl_config.invariant and DoComments and hasattr(element, __Comment__):
f = "%s%s%s%s" % ("% ", element.__Comment__, LINEEND, f)
return f
elif type(element) in (float, int):
#use a controlled number formatting routine
#instead of str, so Jython/Python etc do not differ
return fp_str(element)
else:
return str(element)
def xObjectName(externalname):
return "FormXob.%s" % externalname
# backwards compatibility
formName = xObjectName
# no encryption
class NoEncryption:
def encode(self, t):
"encode a string, stream, text"
return t
def prepare(self, document):
# get ready to do encryption
pass
def register(self, objnum, version):
# enter a new direct object
pass
def info(self):
# the representation of self in file if any (should be None or PDFDict)
return None
class DummyDoc:
"used to bypass encryption when required"
__PDFObject__ = True
encrypt = NoEncryption()
### the global document structure manager
class PDFDocument:
__PDFObject__ = True
_ID = None
objectcounter = 0
inObject = None
# set this to define filters
defaultStreamFilters = None
encrypt = NoEncryption() # default no encryption
pageCounter = 1
def __init__(self,
dummyoutline=0,
compression=rl_config.pageCompression,
invariant=rl_config.invariant,
filename=None,
pdfVersion=PDF_VERSION_DEFAULT,
):
# allow None value to be passed in to mean 'give system defaults'
if invariant is None:
self.invariant = rl_config.invariant
else:
self.invariant = invariant
self.setCompression(compression)
self._pdfVersion = pdfVersion
# signature for creating PDF ID
sig = self.signature = md5()
sig.update("a reportlab document")
if not self.invariant:
cat = _getTimeStamp()
else:
cat = 946684800.0
sig.update(repr(cat)) # initialize with timestamp digest
# mapping of internal identifier ("Page001") to PDF objectnumber and generation number (34, 0)
self.idToObjectNumberAndVersion = {}
# mapping of internal identifier ("Page001") to PDF object (PDFPage instance)
self.idToObject = {}
# internal id to file location
self.idToOffset = {}
# number to id
self.numberToId = {}
cat = self.Catalog = self._catalog = PDFCatalog()
pages = self.Pages = PDFPages()
cat.Pages = pages
if dummyoutline:
outlines = PDFOutlines0()
else:
outlines = PDFOutlines()
self.Outlines = self.outline = outlines
cat.Outlines = outlines
self.info = PDFInfo()
self.info.invariant = self.invariant
#self.Reference(self.Catalog)
#self.Reference(self.Info)
self.fontMapping = {}
#make an empty font dictionary
DD = PDFDictionary({})
DD.__Comment__ = "The standard fonts dictionary"
self.Reference(DD, BasicFonts)
self.delayedFonts = []
def setCompression(self, onoff):
# XXX: maybe this should also set self.defaultStreamFilters?
self.compression = onoff
def ensureMinPdfVersion(self, *keys):
"Ensure that the pdf version is greater than or equal to that specified by the keys"
for k in keys:
self._pdfVersion = max(self._pdfVersion, PDF_SUPPORT_VERSION[k])
def updateSignature(self, thing):
"add information to the signature"
if self._ID: return # but not if its used already!
self.signature.update(utf8str(thing))
def ID(self):
"A unique fingerprint for the file (unless in invariant mode)"
if self._ID:
return self._ID
digest = self.signature.digest()
doc = DummyDoc()
ID = PDFString(digest,enc='raw')
IDs = ID.format(doc)
self._ID = "%s %% ReportLab generated PDF document -- digest (http://www.reportlab.com) %s [%s %s] %s" % (
LINEEND, LINEEND, IDs, IDs, LINEEND)
return self._ID
def SaveToFile(self, filename, canvas):
if hasattr(getattr(filename, "write",None),'__call__'):
myfile = 0
f = filename
filename = utf8str(getattr(filename,'name',''))
else :
myfile = 1
filename = utf8str(filename)
f = open(filename, "wb")
f.write(self.GetPDFData(canvas))
if myfile:
f.close()
import os
if os.name=='mac':
from reportlab.lib.utils import markfilename
markfilename(filename) # do platform specific file junk
if getattr(canvas,'_verbosity',None): print 'saved', filename
def GetPDFData(self, canvas):
# realize delayed fonts
for fnt in self.delayedFonts:
fnt.addObjects(self)
# add info stuff to signature
self.info.invariant = self.invariant
self.info.digest(self.signature)
### later: maybe add more info to sig?
# prepare outline
self.Reference(self.Catalog)
self.Reference(self.info)
outline = self.outline
outline.prepare(self, canvas)
return self.format()
def inPage(self):
"""specify the current object as a page (enables reference binding and other page features)"""
if self.inObject is not None:
if self.inObject=="page": return
raise ValueError, "can't go in page already in object %s" % self.inObject
self.inObject = "page"
def inForm(self):
"""specify that we are in a form xobject (disable page features, etc)"""
# don't need this check anymore since going in a form pushes old context at canvas level.
#if self.inObject not in ["form", None]:
# raise ValueError, "can't go in form already in object %s" % self.inObject
self.inObject = "form"
# don't need to do anything else, I think...
def getInternalFontName(self, psfontname):
fm = self.fontMapping
if psfontname in fm:
return fm[psfontname]
else:
try:
# does pdfmetrics know about it? if so, add
fontObj = pdfmetrics.getFont(psfontname)
if fontObj._dynamicFont:
raise PDFError("getInternalFontName(%s) called for a dynamic font" % repr(psfontname))
fontObj.addObjects(self)
#self.addFont(fontObj)
return fm[psfontname]
except KeyError:
raise PDFError("Font %s not known!" % repr(psfontname))
def thisPageName(self):
return "Page"+repr(self.pageCounter)
def thisPageRef(self):
return PDFObjectReference(self.thisPageName())
def addPage(self, page):
name = self.thisPageName()
self.Reference(page, name)
self.Pages.addPage(page)
self.pageCounter += 1
self.inObject = None
def addForm(self, name, form):
"""add a Form XObject."""
# XXX should check that name is a legal PDF name
if self.inObject != "form":
self.inForm()
self.Reference(form, xObjectName(name))
self.inObject = None
def annotationName(self, externalname):
return "Annot.%s"%externalname
def addAnnotation(self, name, annotation):
self.Reference(annotation, self.annotationName(name))
def refAnnotation(self, name):
internalname = self.annotationName(name)
return PDFObjectReference(internalname)
def addColor(self,cmyk):
sname = cmyk.spotName
name = PDFName(sname)[1:]
if name not in self.idToObject:
sep = PDFSeparationCMYKColor(cmyk).value() #PDFArray([/Separation /name /DeviceCMYK tint_tf])
self.Reference(sep,name)
return name,sname
def setTitle(self, title):
"embeds in PDF file"
if title is None:
self.info.title = '(anonymous)'
else:
self.info.title = title
def setAuthor(self, author):
"embedded in PDF file"
#allow resetting to clear it
if author is None:
self.info.author = '(anonymous)'
else:
self.info.author = author
def setSubject(self, subject):
"embeds in PDF file"
#allow resetting to clear it
if subject is None:
self.info.subject = '(unspecified)'
else:
self.info.subject = subject
def setCreator(self, creator):
"embeds in PDF file"
#allow resetting to clear it
if creator is None:
self.info.creator = '(unspecified)'
else:
self.info.creator = creator
def setKeywords(self, keywords):
"embeds a string containing keywords in PDF file"
#allow resetting to clear it but ensure it's a string
if keywords is None:
self.info.keywords = ''
else:
self.info.keywords = keywords
def setDateFormatter(self, dateFormatter):
self.info._dateFormatter = dateFormatter
def getAvailableFonts(self):
fontnames = self.fontMapping.keys()
# the standard 14 are also always available! (even if not initialized yet)
import _fontdata
for name in _fontdata.standardFonts:
if name not in fontnames:
fontnames.append(name)
fontnames.sort()
return fontnames
def format(self):
# register the Catalog/INfo and then format the objects one by one until exhausted
# (possible infinite loop if there is a bug that continually makes new objects/refs...)
# Prepare encryption
self.encrypt.prepare(self)
cat = self.Catalog
info = self.info
self.Reference(self.Catalog)
self.Reference(self.info)
# register the encryption dictionary if present
encryptref = None
encryptinfo = self.encrypt.info()
if encryptinfo:
encryptref = self.Reference(encryptinfo)
# make std fonts (this could be made optional
counter = 0 # start at first object (object 1 after preincrement)
ids = [] # the collection of object ids in object number order
numbertoid = self.numberToId
idToNV = self.idToObjectNumberAndVersion
idToOb = self.idToObject
idToOf = self.idToOffset
### note that new entries may be "appended" DURING FORMATTING
done = None
File = PDFFile(self._pdfVersion) # output collector
while done is None:
counter += 1 # do next object...
if counter in numbertoid:
id = numbertoid[counter]
#printidToOb
obj = idToOb[id]
IO = PDFIndirectObject(id, obj)
# register object number and version
#encrypt.register(id,
IOf = IO.format(self)
# add a comment to the PDF output
if not rl_config.invariant and DoComments:
try:
classname = obj.__class__.__name__
except:
classname = repr(obj)
File.add("%% %s: class %s %s" % (repr(id), classname[:50], LINEEND))
offset = File.add(IOf)
idToOf[id] = offset
ids.append(id)
else:
done = 1
# sanity checks (must happen AFTER formatting)
lno = len(numbertoid)
if counter-1!=lno:
raise ValueError, "counter %s doesn't match number to id dictionary %s" %(counter, lno)
# now add the xref
xref = PDFCrossReferenceTable()
xref.addsection(0, ids)
xreff = xref.format(self)
xrefoffset = File.add(xreff)
# now add the trailer
trailer = PDFTrailer(
startxref = xrefoffset,
Size = lno+1,
Root = self.Reference(cat),
Info = self.Reference(info),
Encrypt = encryptref,
ID = self.ID(),
)
trailerf = trailer.format(self)
File.add(trailerf)
# return string format for pdf file
return File.format(self)
def hasForm(self, name):
"""test for existence of named form"""
internalname = xObjectName(name)
return internalname in self.idToObject
def getFormBBox(self, name, boxType="MediaBox"):
"""get the declared bounding box of the form as a list.
If you specify a different PDF box definition (e.g. the
ArtBox) and it has one, that's what you'll get."""
internalname = xObjectName(name)
if internalname in self.idToObject:
theform = self.idToObject[internalname]
if hasattr(theform,'_extra_pageCatcher_info'):
return theform._extra_pageCatcher_info[boxType]
if isinstance(theform, PDFFormXObject):
# internally defined form
return theform.BBoxList()
elif isinstance(theform, PDFStream):
# externally defined form
return list(theform.dictionary.dict[boxType].sequence)
else:
raise ValueError, "I don't understand the form instance %s" % repr(name)
def getXObjectName(self, name):
"""Lets canvas find out what form is called internally.
Never mind whether it is defined yet or not."""
return xObjectName(name)
def xobjDict(self, formnames):
"""construct an xobject dict (for inclusion in a resource dict, usually)
from a list of form names (images not yet supported)"""
D = {}
for name in formnames:
internalname = xObjectName(name)
reference = PDFObjectReference(internalname)
D[internalname] = reference
#print "xobjDict D", D
return PDFDictionary(D)
def Reference(self, object, name=None, InstanceType=InstanceType):
### note references may "grow" during the final formatting pass: don't use d.keys()!
# don't make references to other references, or non instances, unless they are named!
#print"object type is ", type(object)
iob = hasattr(object,'__PDFObject__')
idToObject = self.idToObject
if name is None and (not iob or object.__class__ is PDFObjectReference):
return object
if hasattr(object, __InternalName__):
# already registered
intname = object.__InternalName__
if name is not None and name!=intname:
raise ValueError, "attempt to reregister object %s with new name %s" % (
repr(intname), repr(name))
if intname not in idToObject:
raise ValueError, "object named but not registered"
return PDFObjectReference(intname)
# otherwise register the new object
objectcounter = self.objectcounter = self.objectcounter+1
if name is None:
name = "R"+repr(objectcounter)
if name in idToObject:
other = idToObject[name]
if other!=object:
raise ValueError, "redefining named object: "+repr(name)
return PDFObjectReference(name)
if iob:
object.__InternalName__ = name
#print "name", name, "counter", objectcounter
self.idToObjectNumberAndVersion[name] = (objectcounter, 0)
self.numberToId[objectcounter] = name
idToObject[name] = object
return PDFObjectReference(name)
### chapter 4 Objects
PDFtrue = "true"
PDFfalse = "false"
PDFnull = "null"
class PDFText:
__PDFObject__ = True
def __init__(self, t):
self.t = t
def format(self, document):
result = binascii.hexlify(document.encrypt.encode(self.t))
return "<%s>" % result
def __str__(self):
dummydoc = DummyDoc()
return self.format(dummydoc)
def PDFnumber(n):
return n
import re
_re_cleanparens=re.compile('[^()]')
del re
def _isbalanced(s):
'''test whether a string is balanced in parens'''
s = _re_cleanparens.sub('',s)
n = 0
for c in s:
if c=='(': n+=1
else:
n -= 1
if n<0: return 0
return not n and 1 or 0
def _checkPdfdoc(utext):
'''return true if no Pdfdoc encoding errors'''
try:
utext.encode('pdfdoc')
return 1
except UnicodeEncodeError, e:
return 0
class PDFString:
__PDFObject__ = True
def __init__(self, s, escape=1, enc='auto'):
'''s can be unicode/utf8 or a PDFString
if escape is true then the output will be passed through escape
if enc is raw then the string will be left alone
if enc is auto we'll try and automatically adapt to utf_16_be if the
effective string is not entirely in pdfdoc
'''
if isinstance(s,PDFString):
self.s = s.s
self.escape = s.escape
self.enc = s.enc
else:
self.s = s
self.escape = escape
self.enc = enc
def format(self, document):
s = self.s
enc = getattr(self,'enc','auto')
if type(s) is str:
if enc is 'auto':
try:
u = s.decode(s.startswith(codecs.BOM_UTF16_BE) and 'utf16' or 'utf8')
if _checkPdfdoc(u):
s = u.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+u.encode('utf_16_be')
except:
try:
s.decode('pdfdoc')
except:
import sys
print >>sys.stderr, 'Error in',repr(s)
raise
elif type(s) is unicode:
if enc is 'auto':
if _checkPdfdoc(s):
s = s.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
raise ValueError('PDFString argument must be str/unicode not %s' % type(s))
escape = getattr(self,'escape',1)
if not isinstance(document.encrypt,NoEncryption):
s = document.encrypt.encode(s)
escape = 1
if escape:
try:
es = "(%s)" % pdfutils._escape(s)
except:
raise ValueError("cannot escape %s %s" % (s, repr(s)))
if escape&2:
es = es.replace('\\012','\n')
if escape&4 and _isbalanced(s):
es = es.replace('\\(','(').replace('\\)',')')
return es
else:
return '(%s)' % s
def __str__(self):
return "(%s)" % pdfutils._escape(self.s)
def PDFName(data,lo=chr(0x21),hi=chr(0x7e)):
# might need to change this to class for encryption
# NOTE: RESULT MUST ALWAYS SUPPORT MEANINGFUL COMPARISONS (EQUALITY) AND HASH
# first convert the name
L = list(data)
for i,c in enumerate(L):
if c<lo or c>hi or c in "%()<>{}[]#":
L[i] = "#"+hex(ord(c))[2:] # forget the 0x thing...
return "/"+(''.join(L))
class PDFDictionary:
__PDFObject__ = True
multiline = LongFormat
def __init__(self, dict=None):
"""dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122"""
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
def __setitem__(self, name, value):
self.dict[name] = value
def __getitem__(self, a):
return self.dict[a]
def __contains__(self,a):
return a in self.dict
def Reference(self, name, document):
self.dict[name] = document.Reference(self.dict[name])
def format(self, document,IND=LINEEND+' '):
dict = self.dict
try:
keys = dict.keys()
except:
print repr(dict)
raise
keys.sort()
L = [(format(PDFName(k),document)+" "+format(dict[k],document)) for k in keys]
if self.multiline:
L = IND.join(L)
else:
# break up every 6 elements anyway
t=L.insert
for i in xrange(6, len(L), 6):
t(i,LINEEND)
L = " ".join(L)
return "<< %s >>" % L
def copy(self):
return PDFDictionary(self.dict)
class checkPDFNames:
def __init__(self,*names):
self.names = map(PDFName,names)
def __call__(self,value):
if not value.startswith('/'):
value=PDFName(value)
if value in self.names:
return value
def checkPDFBoolean(value):
if value in ('true','false'): return value
class CheckedPDFDictionary(PDFDictionary):
validate = {}
def __init__(self,dict=None,validate=None):
PDFDictionary.__init__(self,dict)
if validate: self.validate = validate
def __setitem__(self,name,value):
if name not in self.validate:
raise ValueError('invalid key, %r' % name)
cvalue = self.validate[name](value)
if cvalue is None:
raise ValueError('Bad value %r for key %r' % (value,name))
PDFDictionary.__setitem__(self,name,cvalue)
class ViewerPreferencesPDFDictionary(CheckedPDFDictionary):
validate=dict(
HideToolbar=checkPDFBoolean,
HideMenubar=checkPDFBoolean,
HideWindowUI=checkPDFBoolean,
FitWindow=checkPDFBoolean,
CenterWindow=checkPDFBoolean,
DisplayDocTitle=checkPDFBoolean, #contributed by mark Erbaugh
NonFullScreenPageMode=checkPDFNames(*'UseNone UseOutlines UseThumbs UseOC'.split()),
Direction=checkPDFNames(*'L2R R2L'.split()),
ViewArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
ViewClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintScaling=checkPDFNames(*'None AppDefault'.split()),
)
# stream filters are objects to support round trip and
# possibly in the future also support parameters
class PDFStreamFilterZCompress:
pdfname = "FlateDecode"
def encode(self, text):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError, "cannot z-compress zlib unavailable"
return zlib.compress(text)
def decode(self, encoded):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError, "cannot z-decompress zlib unavailable"
return zlib.decompress(encoded)
# need only one of these, unless we implement parameters later
PDFZCompress = PDFStreamFilterZCompress()
class PDFStreamFilterBase85Encode:
pdfname = "ASCII85Decode"
def encode(self, text):
from pdfutils import _AsciiBase85Encode, _wrap
text = _AsciiBase85Encode(text)
if rl_config.wrapA85:
text = _wrap(text)
return text
def decode(self, text):
from pdfutils import _AsciiBase85Decode
return _AsciiBase85Decode(text)
# need only one of these too
PDFBase85Encode = PDFStreamFilterBase85Encode()
STREAMFMT = ("%(dictionary)s%(LINEEND)s" # dictionary
"stream" # stream keyword
"%(LINEEND)s" # a line end (could be just a \n)
"%(content)s" # the content, with no lineend
"endstream%(LINEEND)s" # the endstream keyword
)
class PDFStream:
'''set dictionary elements explicitly stream.dictionary[name]=value'''
__PDFObject__ = True
### compression stuff not implemented yet
__RefOnly__ = 1 # must be at top level
def __init__(self, dictionary=None, content=None, filters=None):
if dictionary is None:
dictionary = PDFDictionary()
self.dictionary = dictionary
self.content = content
self.filters = filters
def format(self, document):
dictionary = self.dictionary
# copy it for modification
dictionary = PDFDictionary(dictionary.dict.copy())
content = self.content
filters = self.filters
if self.content is None:
raise ValueError, "stream content not set"
if filters is None:
filters = document.defaultStreamFilters
# only apply filters if they haven't been applied elsewhere
if filters is not None and "Filter" not in dictionary.dict:
# apply filters in reverse order listed
rf = list(filters)
rf.reverse()
fnames = []
for f in rf:
#print "*****************content:"; print repr(content[:200])
#print "*****************filter", f.pdfname
content = f.encode(content)
fnames.insert(0, PDFName(f.pdfname))
#print "*****************finally:"; print content[:200]
#print "****** FILTERS", fnames
#stop
dictionary["Filter"] = PDFArray(fnames)
# "stream encoding is done after all filters have been applied"
content = document.encrypt.encode(content)
fc = format(content, document)
#print "type(content)", type(content), len(content), type(self.dictionary)
lc = len(content)
#if fc!=content: burp
# set dictionary length parameter
dictionary["Length"] = lc
fd = format(dictionary, document)
sdict = LINEENDDICT.copy()
sdict["dictionary"] = fd
sdict["content"] = fc
return STREAMFMT % sdict
def teststream(content=None):
#content = "" # test
if content is None:
content = teststreamcontent
content = string.strip(content)
content = string.replace(content, "\n", LINEEND) + LINEEND
S = PDFStream(content = content,
filters=rl_config.useA85 and [PDFBase85Encode,PDFZCompress] or [PDFZCompress])
# nothing else needed...
S.__Comment__ = "test stream"
return S
teststreamcontent = """
1 0 0 1 0 0 cm BT /F9 12 Tf 14.4 TL ET
1.00 0.00 1.00 rg
n 72.00 72.00 432.00 648.00 re B*
"""
class PDFArray:
__PDFObject__ = True
multiline = LongFormat
_ZLIST = list(9*' ')+[LINEEND]
def __init__(self, sequence):
self.sequence = list(sequence)
def References(self, document):
"""make all objects in sequence references"""
self.sequence = map(document.Reference, self.sequence)
def format(self, document, IND=LINEEND+' '):
L = [format(e, document) for e in self.sequence]
if self.multiline:
L = IND.join(L)
else:
n=len(L)
if n>10:
# break up every 10 elements anyway
m,r = divmod(n,10)
L = ''.join([l+z for l,z in zip(L,m*self._ZLIST+list(r*' '))])
L = L.strip()
else:
L = ' '.join(L)
return "[ %s ]" % L
class PDFArrayCompact(PDFArray):
multiline=False
INDIRECTOBFMT = "%(n)s %(v)s obj%(LINEEND)s%(content)s%(CLINEEND)sendobj%(LINEEND)s"
class PDFIndirectObject:
__PDFObject__ = True
__RefOnly__ = 1
def __init__(self, name, content):
self.name = name
self.content = content
def format(self, document):
name = self.name
n, v = document.idToObjectNumberAndVersion[name]
# set encryption parameters
document.encrypt.register(n, v)
fcontent = format(self.content, document, toplevel=1) # yes this is at top level
D = LINEENDDICT.copy()
D["n"] = n
D["v"] = v
D["content"] = fcontent
D['CLINEEND'] = (LINEEND,'')[fcontent.endswith(LINEEND)]
return INDIRECTOBFMT % D
class PDFObjectReference:
__PDFObject__ = True
def __init__(self, name):
self.name = name
def format(self, document):
try:
return "%s %s R" % document.idToObjectNumberAndVersion[self.name]
except:
raise KeyError, "forward reference to %s not resolved upon final formatting" % repr(self.name)
### chapter 5
# Following Ken Lunde's advice and the PDF spec, this includes
# some high-order bytes. I chose the characters for Tokyo
# in Shift-JIS encoding, as these cannot be mistaken for
# any other encoding, and we'll be able to tell if something
# has run our PDF files through a dodgy Unicode conversion.
PDFHeader = (
"%%PDF-%s.%s"+LINEEND+
"%%\223\214\213\236 ReportLab Generated PDF document http://www.reportlab.com"+LINEEND)
class PDFFile:
__PDFObject__ = True
### just accumulates strings: keeps track of current offset
def __init__(self,pdfVersion=PDF_VERSION_DEFAULT):
self.strings = []
self.write = self.strings.append
self.offset = 0
self.add(PDFHeader % pdfVersion)
def closeOrReset(self):
pass
def add(self, s):
"""should be constructed as late as possible, return position where placed"""
result = self.offset
self.offset = result+len(s)
self.write(s)
return result
def format(self, document):
strings = map(str, self.strings) # final conversion, in case of lazy objects
return string.join(strings, "")
XREFFMT = '%0.10d %0.5d n'
class PDFCrossReferenceSubsection:
__PDFObject__ = True
def __init__(self, firstentrynumber, idsequence):
self.firstentrynumber = firstentrynumber
self.idsequence = idsequence
def format(self, document):
"""id sequence should represent contiguous object nums else error. free numbers not supported (yet)"""
firstentrynumber = self.firstentrynumber
idsequence = self.idsequence
entries = list(idsequence)
nentries = len(idsequence)
# special case: object number 0 is always free
taken = {}
if firstentrynumber==0:
taken[0] = "standard free entry"
nentries = nentries+1
entries.insert(0, "0000000000 65535 f")
idToNV = document.idToObjectNumberAndVersion
idToOffset = document.idToOffset
lastentrynumber = firstentrynumber+nentries-1
for id in idsequence:
(num, version) = idToNV[id]
if num in taken:
raise ValueError, "object number collision %s %s %s" % (num, repr(id), repr(taken[id]))
if num>lastentrynumber or num<firstentrynumber:
raise ValueError, "object number %s not in range %s..%s" % (num, firstentrynumber, lastentrynumber)
# compute position in list
rnum = num-firstentrynumber
taken[num] = id
offset = idToOffset[id]
entries[num] = XREFFMT % (offset, version)
# now add the initial line
firstline = "%s %s" % (firstentrynumber, nentries)
entries.insert(0, firstline)
# make sure it ends with a LINEEND
entries.append("")
if LINEEND=="\n" or LINEEND=="\r":
reflineend = " "+LINEEND # as per spec
elif LINEEND=="\r\n":
reflineend = LINEEND
else:
raise ValueError, "bad end of line! %s" % repr(LINEEND)
return string.join(entries, LINEEND)
class PDFCrossReferenceTable:
__PDFObject__ = True
def __init__(self):
self.sections = []
def addsection(self, firstentry, ids):
section = PDFCrossReferenceSubsection(firstentry, ids)
self.sections.append(section)
def format(self, document):
sections = self.sections
if not sections:
raise ValueError, "no crossref sections"
L = ["xref"+LINEEND]
for s in self.sections:
fs = format(s, document)
L.append(fs)
return string.join(L, "")
TRAILERFMT = ("trailer%(LINEEND)s"
"%(dict)s%(LINEEND)s"
"startxref%(LINEEND)s"
"%(startxref)s%(LINEEND)s"
"%(PERCENT)s%(PERCENT)sEOF%(LINEEND)s")
class PDFTrailer:
__PDFObject__ = True
def __init__(self, startxref, Size=None, Prev=None, Root=None, Info=None, ID=None, Encrypt=None):
self.startxref = startxref
if Size is None or Root is None:
raise ValueError, "Size and Root keys required"
dict = self.dict = PDFDictionary()
for (n,v) in [("Size", Size), ("Prev", Prev), ("Root", Root),
("Info", Info), ("ID", ID), ("Encrypt", Encrypt)]:
if v is not None:
dict[n] = v
def format(self, document):
fdict = format(self.dict, document)
D = LINEENDDICT.copy()
D["dict"] = fdict
D["startxref"] = self.startxref
return TRAILERFMT % D
#### XXXX skipping incremental update,
#### encryption
#### chapter 6, doc structure
class PDFCatalog:
__PDFObject__ = True
__Comment__ = "Document Root"
__RefOnly__ = 1
# to override, set as attributes
__Defaults__ = {"Type": PDFName("Catalog"),
"PageMode": PDFName("UseNone"),
}
__NoDefault__ = string.split("""
Dests Outlines Pages Threads AcroForm Names OpenActions PageMode URI
ViewerPreferences PageLabels PageLayout JavaScript StructTreeRoot SpiderInfo"""
)
__Refs__ = __NoDefault__ # make these all into references, if present
def format(self, document):
self.check_format(document)
defaults = self.__Defaults__
Refs = self.__Refs__
D = {}
for k in defaults.keys():
default = defaults[k]
v = None
if hasattr(self, k) and getattr(self,k) is not None:
v = getattr(self, k)
elif default is not None:
v = default
if v is not None:
D[k] = v
for k in self.__NoDefault__:
if hasattr(self, k):
v = getattr(self,k)
if v is not None:
D[k] = v
# force objects to be references where required
for k in Refs:
if k in D:
#print"k is", k, "value", D[k]
D[k] = document.Reference(D[k])
dict = PDFDictionary(D)
return format(dict, document)
def showOutline(self):
self.setPageMode("UseOutlines")
def showFullScreen(self):
self.setPageMode("FullScreen")
def setPageLayout(self,layout):
if layout:
self.PageLayout = PDFName(layout)
def setPageMode(self,mode):
if mode:
self.PageMode = PDFName(mode)
def check_format(self, document):
"""for use in subclasses"""
pass
class PDFPages(PDFCatalog):
"""PAGES TREE WITH ONE INTERNAL NODE, FOR "BALANCING" CHANGE IMPLEMENTATION"""
__Comment__ = "page tree"
__RefOnly__ = 1
# note: could implement page attribute inheritance...
__Defaults__ = {"Type": PDFName("Pages"),
}
__NoDefault__ = string.split("Kids Count Parent")
__Refs__ = ["Parent"]
def __init__(self):
self.pages = []
def __getitem__(self, item):
return self.pages[item]
def addPage(self, page):
self.pages.append(page)
def check_format(self, document):
# convert all pages to page references
pages = self.pages
kids = PDFArray(pages)
# make sure all pages are references
kids.References(document)
self.Kids = kids
self.Count = len(pages)
class PDFPage(PDFCatalog):
__Comment__ = "Page dictionary"
# all PDF attributes can be set explicitly
# if this flag is set, the "usual" behavior will be suppressed
Override_default_compilation = 0
__RefOnly__ = 1
__Defaults__ = {"Type": PDFName("Page"),
# "Parent": PDFObjectReference(Pages), # no! use document.Pages
}
__NoDefault__ = string.split(""" Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans
""")
__Refs__ = string.split("""
Contents Parent ID
""")
pagewidth = 595
pageheight = 842
stream = None
hasImages = 0
compression = 0
XObjects = None
_colorsUsed = {}
Trans = None
# transitionstring?
# xobjects?
# annotations
def __init__(self):
# set all nodefaults to None
for name in self.__NoDefault__:
setattr(self, name, None)
def setCompression(self, onoff):
self.compression = onoff
def setStream(self, code):
if self.Override_default_compilation:
raise ValueError, "overridden! must set stream explicitly"
from types import ListType
if type(code) is ListType:
code = string.join(code, LINEEND)+LINEEND
self.stream = code
def setPageTransition(self, tranDict):
self.Trans = PDFDictionary(tranDict)
def check_format(self, document):
# set up parameters unless usual behaviour is suppressed
if self.Override_default_compilation:
return
self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight])
if not self.Annots:
self.Annots = None
else:
#print self.Annots
#raise ValueError, "annotations not reimplemented yet"
if not hasattr(self.Annots,'__PDFObject__'):
self.Annots = PDFArray(self.Annots)
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
if self.compression:
S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
S.content = stream
S.__Comment__ = "page stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
if self.ExtGState:
resources.ExtGState = self.ExtGState
resources.setColorSpace(self._colorsUsed)
self.Resources = resources
if not self.Parent:
pages = document.Pages
self.Parent = document.Reference(pages)
#this code contributed by Christian Jacobs <cljacobsen@gmail.com>
class PDFPageLabels(PDFCatalog):
__comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = ["Nums"]
__Refs__ = []
def __init__(self):
self.labels = []
def addPageLabel(self, page, label):
""" Adds a new PDFPageLabel to this catalog.
The 'page' argument, an integer, is the page number in the PDF document
with which the 'label' should be associated. Page numbering in the PDF
starts at zero! Thus, to change the label on the first page, '0' should be
provided as an argument, and to change the 6th page, '5' should be provided
as the argument.
The 'label' argument should be a PDFPageLabel instance, which describes the
format of the labels starting on page 'page' in the PDF and continuing
until the next encounter of a PDFPageLabel.
The order in which labels are added is not important.
"""
self.labels.append((page, label))
def format(self, document):
self.labels.sort()
labels = []
for page, label in self.labels:
labels.append(page)
labels.append(label)
self.Nums = PDFArray(labels) #PDFArray makes a copy with list()
return PDFCatalog.format(self, document)
class PDFPageLabel(PDFCatalog):
__Comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = "Type S P St".split()
__convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER'
ARABIC = 'D'
ROMAN_UPPER = 'R'
ROMAN_LOWER = 'r'
LETTERS_UPPER = 'A'
LETTERS_LOWER = 'a'
def __init__(self, style=None, start=None, prefix=None):
"""
A PDFPageLabel changes the style of page numbering as displayed in a PDF
viewer. PDF page labels have nothing to do with 'physical' page numbers
printed on a canvas, but instead influence the 'logical' page numbers
displayed by PDF viewers. However, when using roman numerals (i, ii,
iii...) or page prefixes for appendecies (A.1, A.2...) on the physical
pages PDF page labels are necessary to change the logical page numbers
displayed by the PDF viewer to match up with the physical numbers. A
PDFPageLabel changes the properties of numbering at the page on which it
appears (see the class 'PDFPageLabels' for specifying where a PDFPageLabel
is associated) and all subsequent pages, until a new PDFPageLabel is
encountered.
The arguments to this initialiser determine the properties of all
subsequent page labels. 'style' determines the numberings style, arabic,
roman, letters; 'start' specifies the starting number; and 'prefix' any
prefix to be applied to the page numbers. All these arguments can be left
out or set to None.
* style:
- None: No numbering, can be used to display the prefix only.
- PDFPageLabel.ARABIC: Use arabic numbers: 1, 2, 3, 4...
- PDFPageLabel.ROMAN_UPPER: Use upper case roman numerals: I, II, III...
- PDFPageLabel.ROMAN_LOWER: Use lower case roman numerals: i, ii, iii...
- PDFPageLabel.LETTERS_UPPER: Use upper case letters: A, B, C, D...
- PDFPageLabel.LETTERS_LOWER: Use lower case letters: a, b, c, d...
* start:
- An integer specifying the starting number for this PDFPageLabel. This
can be used when numbering style changes to reset the page number back
to one, ie from roman to arabic, or from arabic to appendecies. Can be
any positive integer or None. I'm not sure what the effect of
specifying None is, probably that page numbering continues with the
current sequence, I'd have to check the spec to clarify though.
* prefix:
- A string which is prefixed to the page numbers. Can be used to display
appendecies in the format: A.1, A.2, ..., B.1, B.2, ... where a
PDFPageLabel is used to set the properties for the first page of each
appendix to restart the page numbering at one and set the prefix to the
appropriate letter for current appendix. The prefix can also be used to
display text only, if the 'style' is set to None. This can be used to
display strings such as 'Front', 'Back', or 'Cover' for the covers on
books.
"""
if style:
if style.upper() in self.__convertible__: style = getattr(self,style.upper())
self.S = PDFName(style)
if start: self.St = PDFnumber(start)
if prefix: self.P = PDFString(prefix)
#ends code contributed by Christian Jacobs <cljacobsen@gmail.com>
def testpage(document):
P = PDFPage()
P.Contents = teststream()
pages = document.Pages
P.Parent = document.Reference(pages)
P.MediaBox = PDFArray([0, 0, 595, 841])
resources = PDFResourceDictionary()
resources.allProcs() # enable all procsets
resources.basicFonts()
P.Resources = resources
pages.addPage(P)
#### DUMMY OUTLINES IMPLEMENTATION FOR testing
DUMMYOUTLINE = """
<<
/Count
0
/Type
/Outlines
>>"""
class PDFOutlines0:
__PDFObject__ = True
__Comment__ = "TEST OUTLINE!"
text = string.replace(DUMMYOUTLINE, "\n", LINEEND)
__RefOnly__ = 1
def format(self, document):
return self.text
class OutlineEntryObject:
"an entry in an outline"
__PDFObject__ = True
Title = Dest = Parent = Prev = Next = First = Last = Count = None
def format(self, document):
D = {}
D["Title"] = PDFString(self.Title)
D["Parent"] = self.Parent
D["Dest"] = self.Dest
for n in ("Prev", "Next", "First", "Last", "Count"):
v = getattr(self, n)
if v is not None:
D[n] = v
PD = PDFDictionary(D)
return PD.format(document)
class PDFOutlines:
"""
takes a recursive list of outline destinations like::
out = PDFOutline1()
out.setNames(canvas, # requires canvas for name resolution
"chapter1dest",
("chapter2dest",
["chapter2section1dest",
"chapter2section2dest",
"chapter2conclusiondest"]
), # end of chapter2 description
"chapter3dest",
("chapter4dest", ["c4s1", "c4s2"])
)
Higher layers may build this structure incrementally. KISS at base level.
"""
__PDFObject__ = True
# first attempt, many possible features missing.
#no init for now
mydestinations = ready = None
counter = 0
currentlevel = -1 # ie, no levels yet
def __init__(self):
self.destinationnamestotitles = {}
self.destinationstotitles = {}
self.levelstack = []
self.buildtree = []
self.closedict = {} # dictionary of "closed" destinations in the outline
def addOutlineEntry(self, destinationname, level=0, title=None, closed=None):
"""destinationname of None means "close the tree" """
from types import IntType, TupleType
if destinationname is None and level!=0:
raise ValueError, "close tree must have level of 0"
if type(level) is not IntType: raise ValueError, "level must be integer, got %s" % type(level)
if level<0: raise ValueError, "negative levels not allowed"
if title is None: title = destinationname
currentlevel = self.currentlevel
stack = self.levelstack
tree = self.buildtree
# adjust currentlevel and stack to match level
if level>currentlevel:
if level>currentlevel+1:
raise ValueError, "can't jump from outline level %s to level %s, need intermediates (destinationname=%r, title=%r)" %(currentlevel, level, destinationname, title)
level = currentlevel = currentlevel+1
stack.append([])
while level<currentlevel:
# pop off levels to match
current = stack[-1]
del stack[-1]
previous = stack[-1]
lastinprevious = previous[-1]
if type(lastinprevious) is TupleType:
(name, sectionlist) = lastinprevious
raise ValueError, "cannot reset existing sections: " + repr(lastinprevious)
else:
name = lastinprevious
sectionlist = current
previous[-1] = (name, sectionlist)
#sectionlist.append(current)
currentlevel = currentlevel-1
if destinationname is None: return
stack[-1].append(destinationname)
self.destinationnamestotitles[destinationname] = title
if closed: self.closedict[destinationname] = 1
self.currentlevel = level
def setDestinations(self, destinationtree):
self.mydestinations = destinationtree
def format(self, document):
D = {}
D["Type"] = PDFName("Outlines")
c = self.count
D["Count"] = c
if c!=0:
D["First"] = self.first
D["Last"] = self.last
PD = PDFDictionary(D)
return PD.format(document)
def setNames(self, canvas, *nametree):
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def setNameList(self, canvas, nametree):
"Explicit list so I don't need to do in the caller"
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def translateNames(self, canvas, object):
"recursively translate tree of names into tree of destinations"
from types import StringType, ListType, TupleType
Ot = type(object)
destinationnamestotitles = self.destinationnamestotitles
destinationstotitles = self.destinationstotitles
closedict = self.closedict
if Ot is StringType:
destination = canvas._bookmarkReference(object)
title = object
if object in destinationnamestotitles:
title = destinationnamestotitles[object]
else:
destinationnamestotitles[title] = title
destinationstotitles[destination] = title
if object in closedict:
closedict[destination] = 1 # mark destination closed
return {object: canvas._bookmarkReference(object)} # name-->ref
if Ot is ListType or Ot is TupleType:
L = []
for o in object:
L.append(self.translateNames(canvas, o))
if Ot is TupleType:
return tuple(L)
return L
# bug contributed by Benjamin Dumke <reportlab@benjamin-dumke.de>
raise TypeError("in outline, destination name must be string: got a %s"%Ot)
def prepare(self, document, canvas):
"""prepare all data structures required for save operation (create related objects)"""
if self.mydestinations is None:
if self.levelstack:
self.addOutlineEntry(None) # close the tree
destnames = self.levelstack[0]
#from pprint import pprint; pprint(destnames); stop
self.mydestinations = self.translateNames(canvas, destnames)
else:
self.first = self.last = None
self.count = 0
self.ready = 1
return
#self.first = document.objectReference("Outline.First")
#self.last = document.objectReference("Outline.Last")
# XXXX this needs to be generalized for closed entries!
self.count = count(self.mydestinations, self.closedict)
(self.first, self.last) = self.maketree(document, self.mydestinations, toplevel=1)
self.ready = 1
def maketree(self, document, destinationtree, Parent=None, toplevel=0):
from types import ListType, TupleType, DictType
tdestinationtree = type(destinationtree)
if toplevel:
levelname = "Outline"
Parent = document.Reference(document.Outlines)
else:
self.count = self.count+1
levelname = "Outline.%s" % self.count
if Parent is None:
raise ValueError, "non-top level outline elt parent must be specified"
if tdestinationtree is not ListType and tdestinationtree is not TupleType:
raise ValueError, "destinationtree must be list or tuple, got %s"
nelts = len(destinationtree)
lastindex = nelts-1
lastelt = firstref = lastref = None
destinationnamestotitles = self.destinationnamestotitles
closedict = self.closedict
for index in range(nelts):
eltobj = OutlineEntryObject()
eltobj.Parent = Parent
eltname = "%s.%s" % (levelname, index)
eltref = document.Reference(eltobj, eltname)
#document.add(eltname, eltobj)
if lastelt is not None:
lastelt.Next = eltref
eltobj.Prev = lastref
if firstref is None:
firstref = eltref
lastref = eltref
lastelt = eltobj # advance eltobj
lastref = eltref
elt = destinationtree[index]
te = type(elt)
if te is DictType:
# simple leaf {name: dest}
leafdict = elt
elif te is TupleType:
# leaf with subsections: ({name: ref}, subsections) XXXX should clean up (see count(...))
try:
(leafdict, subsections) = elt
except:
raise ValueError, "destination tree elt tuple should have two elts, got %s" % len(elt)
eltobj.Count = count(subsections, closedict)
(eltobj.First, eltobj.Last) = self.maketree(document, subsections, eltref)
else:
raise ValueError, "destination tree elt should be dict or tuple, got %s" % te
try:
[(Title, Dest)] = leafdict.items()
except:
raise ValueError, "bad outline leaf dictionary, should have one entry "+utf8str(elt)
eltobj.Title = destinationnamestotitles[Title]
eltobj.Dest = Dest
if te is TupleType and Dest in closedict:
# closed subsection, count should be negative
eltobj.Count = -eltobj.Count
return (firstref, lastref)
def count(tree, closedict=None):
"""utility for outline: recursively count leaves in a tuple/list tree"""
from operator import add
from types import TupleType, ListType
tt = type(tree)
if tt is TupleType:
# leaf with subsections XXXX should clean up this structural usage
(leafdict, subsections) = tree
[(Title, Dest)] = leafdict.items()
if closedict and Dest in closedict:
return 1 # closed tree element
if tt is TupleType or tt is ListType:
#return reduce(add, map(count, tree))
counts = []
for e in tree:
counts.append(count(e, closedict))
return sum(counts) #used to be: return reduce(add, counts)
return 1
class PDFInfo:
"""PDF documents can have basic information embedded, viewable from
File | Document Info in Acrobat Reader. If this is wrong, you get
Postscript errors while printing, even though it does not print."""
__PDFObject__ = True
producer = "ReportLab PDF Library - www.reportlab.com"
creator = "ReportLab PDF Library - www.reportlab.com"
title = "untitled"
author = "anonymous"
subject = "unspecified"
keywords = ""
_dateFormatter = None
def __init__(self):
self.invariant = rl_config.invariant
def digest(self, md5object):
# add self information to signature
for x in (self.title, self.author, self.subject, self.keywords):
md5object.update(utf8str(x))
def format(self, document):
D = {}
D["Title"] = PDFString(self.title)
D["Author"] = PDFString(self.author)
D["CreationDate"] = PDFDate(invariant=self.invariant,dateFormatter=self._dateFormatter)
D["Producer"] = PDFString(self.producer)
D["Creator"] = PDFString(self.creator)
D["Subject"] = PDFString(self.subject)
D["Keywords"] = PDFString(self.keywords)
PD = PDFDictionary(D)
return PD.format(document)
def copy(self):
"shallow copy - useful in pagecatchering"
thing = self.__klass__()
for (k, v) in self.__dict__.items():
setattr(thing, k, v)
return thing
# skipping thumbnails, etc
class Annotation:
"""superclass for all annotations."""
__PDFObject__ = True
defaults = [("Type", PDFName("Annot"),)]
required = ("Type", "Rect", "Contents", "Subtype")
permitted = required+(
"Border", "C", "T", "M", "F", "H", "BS", "AA", "AS", "Popup", "P", "AP")
def cvtdict(self, d, escape=1):
"""transform dict args from python form to pdf string rep as needed"""
Rect = d["Rect"]
if type(Rect) is not types.StringType:
d["Rect"] = PDFArray(Rect)
d["Contents"] = PDFString(d["Contents"],escape)
return d
def AnnotationDict(self, **kw):
if 'escape' in kw:
escape = kw['escape']
del kw['escape']
else:
escape = 1
d = {}
for (name,val) in self.defaults:
d[name] = val
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError, "keyword argument %s missing" % name
d = self.cvtdict(d,escape=escape)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError, "bad annotation dictionary name %s" % name
return PDFDictionary(d)
def Dict(self):
raise ValueError, "DictString undefined for virtual superclass Annotation, must overload"
# but usually
#return self.AnnotationDict(self, Rect=(a,b,c,d)) or whatever
def format(self, document):
D = self.Dict()
return D.format(document)
class TextAnnotation(Annotation):
permitted = Annotation.permitted + (
"Open", "Name")
def __init__(self, Rect, Contents, **kw):
self.Rect = Rect
self.Contents = Contents
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Text"
return self.AnnotationDict(**d)
class FreeTextAnnotation(Annotation):
permitted = Annotation.permitted + ("DA",)
def __init__(self, Rect, Contents, DA, **kw):
self.Rect = Rect
self.Contents = Contents
self.DA = DA
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["DA"] = self.DA
d["Subtype"] = "/FreeText"
return self.AnnotationDict(**d)
class LinkAnnotation(Annotation):
permitted = Annotation.permitted + (
"Dest", "A", "PA")
def __init__(self, Rect, Contents, Destination, Border="[0 0 1]", **kw):
self.Border = Border
self.Rect = Rect
self.Contents = Contents
self.Destination = Destination
self.otherkw = kw
def dummyDictString(self): # old, testing
return """
<< /Type /Annot /Subtype /Link /Rect [71 717 190 734] /Border [16 16 1]
/Dest [23 0 R /Fit] >>
"""
def Dict(self):
d = {}
d.update(self.otherkw)
d["Border"] = self.Border
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Link"
d["Dest"] = self.Destination
return self.AnnotationDict(**d)
# skipping names tree
# skipping actions
# skipping names trees
# skipping to chapter 7
class PDFRectangle:
__PDFObject__ = True
def __init__(self, llx, lly, urx, ury):
self.llx, self.lly, self.ulx, self.ury = llx, lly, urx, ury
def format(self, document):
A = PDFArray([self.llx, self.lly, self.ulx, self.ury])
return format(A, document)
_NOWT=None
def _getTimeStamp():
global _NOWT
if not _NOWT:
import time
_NOWT = time.time()
return _NOWT
class PDFDate:
__PDFObject__ = True
# gmt offset now suppported properly
def __init__(self, invariant=rl_config.invariant, dateFormatter=None):
if invariant:
now = (2000,01,01,00,00,00,0)
self.dhh = 0
self.dmm = 0
else:
import time
now = tuple(time.localtime(_getTimeStamp())[:6])
from time import timezone
self.dhh = int(timezone / (3600.0))
self.dmm = (timezone % 3600) % 60
self.date = now[:6]
self.dateFormatter = dateFormatter
def format(self, doc):
dfmt = self.dateFormatter or (
lambda yyyy,mm,dd,hh,m,s:
"D:%04d%02d%02d%02d%02d%02d%+03d'%02d'"
% (yyyy,mm,dd,hh,m,s,self.dhh,self.dmm))
return format(PDFString(dfmt(*self.date)), doc)
class Destination:
"""
not a pdfobject! This is a placeholder that can delegates
to a pdf object only after it has been defined by the methods
below.
EG a Destination can refer to Appendix A before it has been
defined, but only if Appendix A is explicitly noted as a destination
and resolved before the document is generated...
For example the following sequence causes resolution before doc generation.
d = Destination()
d.fit() # or other format defining method call
d.setPage(p)
(at present setPageRef is called on generation of the page).
"""
__PDFObject__ = True
representation = format = page = None
def __init__(self,name):
self.name = name
self.fmt = self.page = None
def format(self, document):
f = self.fmt
if f is None: raise ValueError, "format not resolved %s" % self.name
p = self.page
if p is None: raise ValueError, "Page reference unbound %s" % self.name
f.page = p
return f.format(document)
def xyz(self, left, top, zoom): # see pdfspec mar 11 99 pp184+
self.fmt = PDFDestinationXYZ(None, left, top, zoom)
def fit(self):
self.fmt = PDFDestinationFit(None)
def fitb(self):
self.fmt = PDFDestinationFitB(None)
def fith(self, top):
self.fmt = PDFDestinationFitH(None,top)
def fitv(self, left):
self.fmt = PDFDestinationFitV(None, left)
def fitbh(self, top):
self.fmt = PDFDestinationFitBH(None, top)
def fitbv(self, left):
self.fmt = PDFDestinationFitBV(None, left)
def fitr(self, left, bottom, right, top):
self.fmt = PDFDestinationFitR(None, left, bottom, right, top)
def setPage(self, page):
self.page = page
#self.fmt.page = page # may not yet be defined!
class PDFDestinationXYZ:
__PDFObject__ = True
typename = "XYZ"
def __init__(self, page, left, top, zoom):
self.page = page
self.top = top
self.zoom = zoom
self.left = left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.top, self.zoom ] )
return format(A, document)
class PDFDestinationFit:
__PDFObject__ = True
typename = "Fit"
def __init__(self, page):
self.page = page
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename) ] )
return format(A, document)
class PDFDestinationFitB(PDFDestinationFit):
typename = "FitB"
class PDFDestinationFitH:
__PDFObject__ = True
typename = "FitH"
def __init__(self, page, top):
self.page = page; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.top ] )
return format(A, document)
class PDFDestinationFitBH(PDFDestinationFitH):
typename = "FitBH"
class PDFDestinationFitV:
__PDFObject__ = True
typename = "FitV"
def __init__(self, page, left):
self.page = page; self.left=left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left ] )
return format(A, document)
class PDFDestinationFitBV(PDFDestinationFitV):
typename = "FitBV"
class PDFDestinationFitR:
__PDFObject__ = True
typename = "FitR"
def __init__(self, page, left, bottom, right, top):
self.page = page; self.left=left; self.bottom=bottom; self.right=right; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.bottom, self.right, self.top] )
return format(A, document)
# named destinations need nothing
# skipping filespecs
class PDFResourceDictionary:
"""each element *could* be reset to a reference if desired"""
__PDFObject__ = True
def __init__(self):
self.ColorSpace = {}
self.XObject = {}
self.ExtGState = {}
self.Font = {}
self.Pattern = {}
self.ProcSet = []
self.Properties = {}
self.Shading = {}
# ?by default define the basicprocs
self.basicProcs()
stdprocs = map(PDFName, string.split("PDF Text ImageB ImageC ImageI"))
dict_attributes = ("ColorSpace", "XObject", "ExtGState", "Font", "Pattern", "Properties", "Shading")
def allProcs(self):
# define all standard procsets
self.ProcSet = self.stdprocs
def basicProcs(self):
self.ProcSet = self.stdprocs[:2] # just PDF and Text
def basicFonts(self):
self.Font = PDFObjectReference(BasicFonts)
def setColorSpace(self,colorsUsed):
for c,s in colorsUsed.iteritems():
self.ColorSpace[s] = PDFObjectReference(c)
def format(self, document):
D = {}
from types import ListType, DictType
for dname in self.dict_attributes:
v = getattr(self, dname)
if type(v) is DictType:
if v:
dv = PDFDictionary(v)
D[dname] = dv
else:
D[dname] = v
v = self.ProcSet
dname = "ProcSet"
if type(v) is ListType:
if v:
dv = PDFArray(v)
D[dname] = dv
else:
D[dname] = v
DD = PDFDictionary(D)
return format(DD, document)
##############################################################################
#
# Font objects - the PDFDocument.addFont() method knows which of these
# to construct when given a user-facing Font object
#
##############################################################################
class PDFType1Font:
"""no init: set attributes explicitly"""
__PDFObject__ = True
__RefOnly__ = 1
# note! /Name appears to be an undocumented attribute....
name_attributes = string.split("Type Subtype BaseFont Name")
Type = "Font"
Subtype = "Type1"
# these attributes are assumed to already be of the right type
local_attributes = string.split("FirstChar LastChar Widths Encoding ToUnicode FontDescriptor")
def format(self, document):
D = {}
for name in self.name_attributes:
if hasattr(self, name):
value = getattr(self, name)
D[name] = PDFName(value)
for name in self.local_attributes:
if hasattr(self, name):
value = getattr(self, name)
D[name] = value
#print D
PD = PDFDictionary(D)
return PD.format(document)
## These attribute listings will be useful in future, even if we
## put them elsewhere
class PDFTrueTypeFont(PDFType1Font):
Subtype = "TrueType"
#local_attributes = string.split("FirstChar LastChar Widths Encoding ToUnicode FontDescriptor") #same
##class PDFMMType1Font(PDFType1Font):
## Subtype = "MMType1"
##
##class PDFType3Font(PDFType1Font):
## Subtype = "Type3"
## local_attributes = string.split(
## "FirstChar LastChar Widths CharProcs FontBBox FontMatrix Resources Encoding")
##
##class PDFType0Font(PDFType1Font):
## Subtype = "Type0"
## local_attributes = string.split(
## "DescendantFonts Encoding")
##
##class PDFCIDFontType0(PDFType1Font):
## Subtype = "CIDFontType0"
## local_attributes = string.split(
## "CIDSystemInfo FontDescriptor DW W DW2 W2 Registry Ordering Supplement")
##
##class PDFCIDFontType0(PDFType1Font):
## Subtype = "CIDFontType2"
## local_attributes = string.split(
## "BaseFont CIDToGIDMap CIDSystemInfo FontDescriptor DW W DW2 W2")
##
##class PDFEncoding(PDFType1Font):
## Type = "Encoding"
## name_attributes = string.split("Type BaseEncoding")
## # these attributes are assumed to already be of the right type
## local_attributes = ["Differences"]
##
# UGLY ALERT - this needs turning into something O-O, it was hacked
# across from the pdfmetrics.Encoding class to avoid circularity
# skipping CMaps
class PDFFormXObject:
# like page requires .info set by some higher level (doc)
# XXXX any resource used in a form must be propagated up to the page that (recursively) uses
# the form!! (not implemented yet).
__PDFObject__ = True
XObjects = Annots = BBox = Matrix = Contents = stream = Resources = None
hasImages = 1 # probably should change
compression = 0
def __init__(self, lowerx, lowery, upperx, uppery):
#not done
self.lowerx = lowerx; self.lowery=lowery; self.upperx=upperx; self.uppery=uppery
def setStreamList(self, data):
if type(data) is types.ListType:
data = string.join(data, LINEEND)
self.stream = data
def BBoxList(self):
"get the declared bounding box for the form as a list"
if self.BBox:
return list(self.BBox.sequence)
else:
return [self.lowerx, self.lowery, self.upperx, self.uppery]
def format(self, document):
self.BBox = self.BBox or PDFArray([self.lowerx, self.lowery, self.upperx, self.uppery])
self.Matrix = self.Matrix or PDFArray([1, 0, 0, 1, 0, 0])
if not self.Annots:
self.Annots = None
else:
#these must be transferred to the page when the form is used
raise ValueError, "annotations not reimplemented yet"
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
S.content = stream
# need to add filter stuff (?)
S.__Comment__ = "xobject form stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
self.Resources=resources
if self.compression:
self.Contents.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
sdict = self.Contents.dictionary
sdict["Type"] = PDFName("XObject")
sdict["Subtype"] = PDFName("Form")
sdict["FormType"] = 1
sdict["BBox"] = self.BBox
sdict["Matrix"] = self.Matrix
sdict["Resources"] = self.Resources
return self.Contents.format(document)
class PDFPostScriptXObject:
"For embedding PD (e.g. tray commands) in PDF"
__PDFObject__ = True
def __init__(self, content=None):
self.content = content
def format(self, document):
S = PDFStream()
S.content = self.content
S.__Comment__ = "xobject postscript stream"
sdict = S.dictionary
sdict["Type"] = PDFName("XObject")
sdict["Subtype"] = PDFName("PS")
return S.format(document)
_mode2CS={'RGB':'DeviceRGB', 'L':'DeviceGray', 'CMYK':'DeviceCMYK'}
class PDFImageXObject:
# first attempts at a hard-coded one
# in the file, Image XObjects are stream objects. We already
# have a PDFStream object with 3 attributes: dictionary, content
# and filters. So the job of this thing is to construct the
# right PDFStream instance and ask it to format itself.
__PDFObject__ = True
def __init__(self, name, source=None, mask=None):
self.name = name
self.width = 24
self.height = 23
self.bitsPerComponent = 1
self.colorSpace = 'DeviceGray'
self._filters = rl_config.useA85 and ('ASCII85Decode',) or ()
self.streamContent = """
003B00 002700 002480 0E4940 114920 14B220 3CB650
75FE88 17FF8C 175F14 1C07E2 3803C4 703182 F8EDFC
B2BBC2 BB6F84 31BFC2 18EA3C 0E3E00 07FC00 03F800
1E1800 1FF800>
"""
self.mask = mask
if source is None:
pass # use the canned one.
elif hasattr(source,'jpeg_fh'):
self.loadImageFromSRC(source) #it is already a PIL Image
else:
# it is a filename
import os
ext = string.lower(os.path.splitext(source)[1])
src = open_for_read(source)
if not(ext in ('.jpg', '.jpeg') and self.loadImageFromJPEG(src)):
if rl_config.useA85:
self.loadImageFromA85(src)
else:
self.loadImageFromRaw(src)
def loadImageFromA85(self,source):
IMG=[]
imagedata = map(string.strip,pdfutils.makeA85Image(source,IMG=IMG))
words = string.split(imagedata[1])
self.width, self.height = map(string.atoi,(words[1],words[3]))
self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]]
self.bitsPerComponent = 8
self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl'
if IMG: self._checkTransparency(IMG[0])
elif self.mask=='auto': self.mask = None
self.streamContent = string.join(imagedata[3:-1],'')
def loadImageFromJPEG(self,imageFile):
try:
try:
info = pdfutils.readJPEGInfo(imageFile)
finally:
imageFile.seek(0) #reset file pointer
except:
return False
self.width, self.height = info[0], info[1]
self.bitsPerComponent = 8
if info[2] == 1:
self.colorSpace = 'DeviceGray'
elif info[2] == 3:
self.colorSpace = 'DeviceRGB'
else: #maybe should generate an error, is this right for CMYK?
self.colorSpace = 'DeviceCMYK'
self._dotrans = 1
self.streamContent = imageFile.read()
if rl_config.useA85:
self.streamContent = pdfutils._AsciiBase85Encode(self.streamContent)
self._filters = 'ASCII85Decode','DCTDecode' #'A85','DCT'
else:
self._filters = 'DCTDecode', #'DCT'
self.mask = None
return True
def loadImageFromRaw(self,source):
IMG=[]
imagedata = pdfutils.makeRawImage(source,IMG=IMG)
words = string.split(imagedata[1])
self.width, self.height = map(string.atoi,(words[1],words[3]))
self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]]
self.bitsPerComponent = 8
self._filters = 'FlateDecode', #'Fl'
if IMG: self._checkTransparency(IMG[0])
elif self.mask=='auto': self.mask = None
self.streamContent = string.join(imagedata[3:-1],'')
def _checkTransparency(self,im):
if self.mask=='auto':
if im._dataA:
self.mask = None
self._smask = PDFImageXObject(_digester(im._dataA.getRGBData()),im._dataA,mask=None)
self._smask._decode = [0,1]
else:
tc = im.getTransparent()
if tc:
self.mask = (tc[0], tc[0], tc[1], tc[1], tc[2], tc[2])
else:
self.mask = None
elif hasattr(self.mask,'rgb'):
_ = self.mask.rgb()
self.mask = _[0],_[0],_[1],_[1],_[2],_[2]
def loadImageFromSRC(self, im):
"Extracts the stream, width and height"
fp = im.jpeg_fh()
if fp:
self.loadImageFromJPEG(fp)
else:
zlib = import_zlib()
if not zlib: return
self.width, self.height = im.getSize()
raw = im.getRGBData()
#assert len(raw) == self.width*self.height, "Wrong amount of data for image expected %sx%s=%s got %s" % (self.width,self.height,self.width*self.height,len(raw))
self.streamContent = zlib.compress(raw)
if rl_config.useA85:
self.streamContent = pdfutils._AsciiBase85Encode(self.streamContent)
self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl'
else:
self._filters = 'FlateDecode', #'Fl'
self.colorSpace= _mode2CS[im.mode]
self.bitsPerComponent = 8
self._checkTransparency(im)
def format(self, document):
S = PDFStream(content = self.streamContent)
dict = S.dictionary
dict["Type"] = PDFName("XObject")
dict["Subtype"] = PDFName("Image")
dict["Width"] = self.width
dict["Height"] = self.height
dict["BitsPerComponent"] = self.bitsPerComponent
dict["ColorSpace"] = PDFName(self.colorSpace)
if self.colorSpace=='DeviceCMYK' and getattr(self,'_dotrans',0):
dict["Decode"] = PDFArray([1,0,1,0,1,0,1,0])
elif getattr(self,'_decode',None):
dict["Decode"] = PDFArray(self._decode)
dict["Filter"] = PDFArray(map(PDFName,self._filters))
dict["Length"] = len(self.streamContent)
if self.mask: dict["Mask"] = PDFArray(self.mask)
if getattr(self,'smask',None): dict["SMask"] = self.smask
return S.format(document)
class PDFSeparationCMYKColor:
def __init__(self, cmyk):
from reportlab.lib.colors import CMYKColor
if not isinstance(cmyk,CMYKColor):
raise ValueError('%s needs a CMYKColor argument' % self.__class__.__name__)
elif not cmyk.spotName:
raise ValueError('%s needs a CMYKColor argument with a spotName' % self.__class__.__name__)
self.cmyk = cmyk
def _makeFuncPS(self):
'''create the postscript code for the tint transfer function
effectively this is tint*c, tint*y, ... tint*k'''
R = [].append
for i,v in enumerate(self.cmyk.cmyk()):
v=float(v)
if i==3:
if v==0.0:
R('pop')
R('0.0')
else:
R(str(v))
R('mul')
else:
if v==0:
R('0.0')
else:
R('dup')
R(str(v))
R('mul')
R('exch')
return '{%s}' % (' '.join(R.__self__))
def value(self):
return PDFArrayCompact((
PDFName('Separation'),
PDFName(self.cmyk.spotName),
PDFName('DeviceCMYK'),
PDFStream(
dictionary=PDFDictionary(dict(
FunctionType=4,
Domain=PDFArrayCompact((0,1)),
Range=PDFArrayCompact((0,1,0,1,0,1,0,1))
)),
content=self._makeFuncPS(),
filters=None,#[PDFBase85Encode, PDFZCompress],
)
))
if __name__=="__main__":
print "There is no script interpretation for pdfdoc."
| apache-2.0 |
zxwing/ansible | lib/ansible/runner/lookup_plugins/pipe.py | 162 | 1951 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
term = str(term)
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| gpl-3.0 |
bfontaine/web-pp | pp/unidecode/x09f.py | 252 | 4509 | data = (
'Cu ', # 0x00
'Qu ', # 0x01
'Chao ', # 0x02
'Wa ', # 0x03
'Zhu ', # 0x04
'Zhi ', # 0x05
'Mang ', # 0x06
'Ao ', # 0x07
'Bie ', # 0x08
'Tuo ', # 0x09
'Bi ', # 0x0a
'Yuan ', # 0x0b
'Chao ', # 0x0c
'Tuo ', # 0x0d
'Ding ', # 0x0e
'Mi ', # 0x0f
'Nai ', # 0x10
'Ding ', # 0x11
'Zi ', # 0x12
'Gu ', # 0x13
'Gu ', # 0x14
'Dong ', # 0x15
'Fen ', # 0x16
'Tao ', # 0x17
'Yuan ', # 0x18
'Pi ', # 0x19
'Chang ', # 0x1a
'Gao ', # 0x1b
'Qi ', # 0x1c
'Yuan ', # 0x1d
'Tang ', # 0x1e
'Teng ', # 0x1f
'Shu ', # 0x20
'Shu ', # 0x21
'Fen ', # 0x22
'Fei ', # 0x23
'Wen ', # 0x24
'Ba ', # 0x25
'Diao ', # 0x26
'Tuo ', # 0x27
'Tong ', # 0x28
'Qu ', # 0x29
'Sheng ', # 0x2a
'Shi ', # 0x2b
'You ', # 0x2c
'Shi ', # 0x2d
'Ting ', # 0x2e
'Wu ', # 0x2f
'Nian ', # 0x30
'Jing ', # 0x31
'Hun ', # 0x32
'Ju ', # 0x33
'Yan ', # 0x34
'Tu ', # 0x35
'Ti ', # 0x36
'Xi ', # 0x37
'Xian ', # 0x38
'Yan ', # 0x39
'Lei ', # 0x3a
'Bi ', # 0x3b
'Yao ', # 0x3c
'Qiu ', # 0x3d
'Han ', # 0x3e
'Wu ', # 0x3f
'Wu ', # 0x40
'Hou ', # 0x41
'Xi ', # 0x42
'Ge ', # 0x43
'Zha ', # 0x44
'Xiu ', # 0x45
'Weng ', # 0x46
'Zha ', # 0x47
'Nong ', # 0x48
'Nang ', # 0x49
'Qi ', # 0x4a
'Zhai ', # 0x4b
'Ji ', # 0x4c
'Zi ', # 0x4d
'Ji ', # 0x4e
'Ji ', # 0x4f
'Qi ', # 0x50
'Ji ', # 0x51
'Chi ', # 0x52
'Chen ', # 0x53
'Chen ', # 0x54
'He ', # 0x55
'Ya ', # 0x56
'Ken ', # 0x57
'Xie ', # 0x58
'Pao ', # 0x59
'Cuo ', # 0x5a
'Shi ', # 0x5b
'Zi ', # 0x5c
'Chi ', # 0x5d
'Nian ', # 0x5e
'Ju ', # 0x5f
'Tiao ', # 0x60
'Ling ', # 0x61
'Ling ', # 0x62
'Chu ', # 0x63
'Quan ', # 0x64
'Xie ', # 0x65
'Ken ', # 0x66
'Nie ', # 0x67
'Jiu ', # 0x68
'Yao ', # 0x69
'Chuo ', # 0x6a
'Kun ', # 0x6b
'Yu ', # 0x6c
'Chu ', # 0x6d
'Yi ', # 0x6e
'Ni ', # 0x6f
'Cuo ', # 0x70
'Zou ', # 0x71
'Qu ', # 0x72
'Nen ', # 0x73
'Xian ', # 0x74
'Ou ', # 0x75
'E ', # 0x76
'Wo ', # 0x77
'Yi ', # 0x78
'Chuo ', # 0x79
'Zou ', # 0x7a
'Dian ', # 0x7b
'Chu ', # 0x7c
'Jin ', # 0x7d
'Ya ', # 0x7e
'Chi ', # 0x7f
'Chen ', # 0x80
'He ', # 0x81
'Ken ', # 0x82
'Ju ', # 0x83
'Ling ', # 0x84
'Pao ', # 0x85
'Tiao ', # 0x86
'Zi ', # 0x87
'Ken ', # 0x88
'Yu ', # 0x89
'Chuo ', # 0x8a
'Qu ', # 0x8b
'Wo ', # 0x8c
'Long ', # 0x8d
'Pang ', # 0x8e
'Gong ', # 0x8f
'Pang ', # 0x90
'Yan ', # 0x91
'Long ', # 0x92
'Long ', # 0x93
'Gong ', # 0x94
'Kan ', # 0x95
'Ta ', # 0x96
'Ling ', # 0x97
'Ta ', # 0x98
'Long ', # 0x99
'Gong ', # 0x9a
'Kan ', # 0x9b
'Gui ', # 0x9c
'Qiu ', # 0x9d
'Bie ', # 0x9e
'Gui ', # 0x9f
'Yue ', # 0xa0
'Chui ', # 0xa1
'He ', # 0xa2
'Jue ', # 0xa3
'Xie ', # 0xa4
'Yu ', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit |
chiefspace/udemy-rest-api | udemy_rest_api_section5/env/lib/python3.4/site-packages/pip/vcs/git.py | 473 | 7898 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| gpl-2.0 |
spencerlyon2/pygments | pygments/lexers/_clbuiltins.py | 2 | 14050 | # -*- coding: utf-8 -*-
"""
pygments.lexers._clbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = set(( # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
))
SPECIAL_FORMS = set((
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
))
MACROS = set((
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
))
LAMBDA_LIST_KEYWORDS = set((
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
))
DECLARATIONS = set((
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
))
BUILTIN_TYPES = set((
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
))
BUILTIN_CLASSES = set((
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
))
| bsd-2-clause |
OS2World/APP-INTERNET-torpak_2 | Lib/plat-os2emx/IN.py | 77 | 1875 | # Generated by h2py from f:/emx/include/netinet/in.h
# Included from sys/param.h
PAGE_SIZE = 0x1000
HZ = 100
MAXNAMLEN = 260
MAXPATHLEN = 260
def htonl(X): return _swapl(X)
def ntohl(X): return _swapl(X)
def htons(X): return _swaps(X)
def ntohs(X): return _swaps(X)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_LOOPBACK = 0x7f000001
INADDR_BROADCAST = 0xffffffff
INADDR_NONE = 0xffffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| mit |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_frozen.py | 133 | 1236 | # Test the frozen module defined in frozen.c.
from test.test_support import captured_stdout, run_unittest
import unittest
import sys
class FrozenTests(unittest.TestCase):
def test_frozen(self):
with captured_stdout() as stdout:
try:
import __hello__
except ImportError, x:
self.fail("import __hello__ failed:" + str(x))
try:
import __phello__
except ImportError, x:
self.fail("import __phello__ failed:" + str(x))
try:
import __phello__.spam
except ImportError, x:
self.fail("import __phello__.spam failed:" + str(x))
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
self.assertEqual(stdout.getvalue(),
'Hello world...\nHello world...\nHello world...\n')
del sys.modules['__hello__']
del sys.modules['__phello__']
del sys.modules['__phello__.spam']
def test_main():
run_unittest(FrozenTests)
if __name__ == '__main__':
test_main()
| mit |
philanthropy-u/edx-platform | cms/envs/test_static_optimized.py | 3 | 2039 | """
Settings used when generating static assets for use in tests.
For example, Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running CMS and LMS
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
# Start with the common settings
from .common import * # pylint: disable=wildcard-import, unused-wildcard-import
from openedx.core.lib.derived import derive_settings
# Use an in-memory database since this settings file is only used for updating assets
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'ATOMIC_REQUESTS': True,
},
}
######################### PIPELINE ####################################
# Use RequireJS optimized storage
STATICFILES_STORAGE = 'openedx.core.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
# Revert to the default set of finders as we don't want to dynamically pick up files from the pipeline
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openedx.core.lib.xblock_pipeline.finder.XBlockPipelineFinder',
]
# Redirect to the test_root folder within the repo
TEST_ROOT = REPO_ROOT / "test_root"
LOG_DIR = (TEST_ROOT / "log").abspath()
# Store the static files under test root so that they don't overwrite existing static assets
STATIC_ROOT = (TEST_ROOT / "staticfiles" / "cms").abspath()
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json"
# Disable uglify when tests are running (used by build.js).
# 1. Uglify is by far the slowest part of the build process
# 2. Having full source code makes debugging tests easier for developers
os.environ['REQUIRE_BUILD_PROFILE_OPTIMIZE'] = 'none'
########################## Derive Any Derived Settings #######################
derive_settings(__name__)
| agpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Paste-2.0.1-py2.7.egg/paste/session.py | 50 | 11554 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Creates a session object in your WSGI environment.
Use like:
..code-block:: Python
environ['paste.session.factory']()
This will return a dictionary. The contents of this dictionary will
be saved to disk when the request is completed. The session will be
created when you first fetch the session dictionary, and a cookie will
be sent in that case. There's current no way to use sessions without
cookies, and there's no way to delete a session except to clear its
data.
@@: This doesn't do any locking, and may cause problems when a single
session is accessed concurrently. Also, it loads and saves the
session for each request, with no caching. Also, sessions aren't
expired.
"""
try:
# Python 3
from http.cookies import SimpleCookie
except ImportError:
# Python 2
from Cookie import SimpleCookie
import time
import random
import os
import datetime
import six
import threading
import tempfile
try:
import cPickle
except ImportError:
import pickle as cPickle
try:
from hashlib import md5
except ImportError:
from md5 import md5
from paste import wsgilib
from paste import request
class SessionMiddleware(object):
def __init__(self, application, global_conf=None, **factory_kw):
self.application = application
self.factory_kw = factory_kw
def __call__(self, environ, start_response):
session_factory = SessionFactory(environ, **self.factory_kw)
environ['paste.session.factory'] = session_factory
remember_headers = []
def session_start_response(status, headers, exc_info=None):
if not session_factory.created:
remember_headers[:] = [status, headers]
return start_response(status, headers)
headers.append(session_factory.set_cookie_header())
return start_response(status, headers, exc_info)
app_iter = self.application(environ, session_start_response)
def start():
if session_factory.created and remember_headers:
# Tricky bastard used the session after start_response
status, headers = remember_headers
headers.append(session_factory.set_cookie_header())
exc = ValueError(
"You cannot get the session after content from the "
"app_iter has been returned")
start_response(status, headers, (exc.__class__, exc, None))
def close():
if session_factory.used:
session_factory.close()
return wsgilib.add_start_close(app_iter, start, close)
class SessionFactory(object):
def __init__(self, environ, cookie_name='_SID_',
session_class=None,
session_expiration=60*12, # in minutes
**session_class_kw):
self.created = False
self.used = False
self.environ = environ
self.cookie_name = cookie_name
self.session = None
self.session_class = session_class or FileSession
self.session_class_kw = session_class_kw
self.expiration = session_expiration
def __call__(self):
self.used = True
if self.session is not None:
return self.session.data()
cookies = request.get_cookies(self.environ)
session = None
if self.cookie_name in cookies:
self.sid = cookies[self.cookie_name].value
try:
session = self.session_class(self.sid, create=False,
**self.session_class_kw)
except KeyError:
# Invalid SID
pass
if session is None:
self.created = True
self.sid = self.make_sid()
session = self.session_class(self.sid, create=True,
**self.session_class_kw)
session.clean_up()
self.session = session
return session.data()
def has_session(self):
if self.session is not None:
return True
cookies = request.get_cookies(self.environ)
if cookies.has_key(self.cookie_name):
return True
return False
def make_sid(self):
# @@: need better algorithm
return (''.join(['%02d' % x for x in time.localtime(time.time())[:6]])
+ '-' + self.unique_id())
def unique_id(self, for_object=None):
"""
Generates an opaque, identifier string that is practically
guaranteed to be unique. If an object is passed, then its
id() is incorporated into the generation. Relies on md5 and
returns a 32 character long string.
"""
r = [time.time(), random.random()]
if hasattr(os, 'times'):
r.append(os.times())
if for_object is not None:
r.append(id(for_object))
content = str(r)
if six.PY3:
content = content.encode('utf8')
md5_hash = md5(content)
try:
return md5_hash.hexdigest()
except AttributeError:
# Older versions of Python didn't have hexdigest, so we'll
# do it manually
hexdigest = []
for char in md5_hash.digest():
hexdigest.append('%02x' % ord(char))
return ''.join(hexdigest)
def set_cookie_header(self):
c = SimpleCookie()
c[self.cookie_name] = self.sid
c[self.cookie_name]['path'] = '/'
gmt_expiration_time = time.gmtime(time.time() + (self.expiration * 60))
c[self.cookie_name]['expires'] = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", gmt_expiration_time)
name, value = str(c).split(': ', 1)
return (name, value)
def close(self):
if self.session is not None:
self.session.close()
last_cleanup = None
cleaning_up = False
cleanup_cycle = datetime.timedelta(seconds=15*60) #15 min
class FileSession(object):
def __init__(self, sid, create=False, session_file_path=tempfile.gettempdir(),
chmod=None,
expiration=2880, # in minutes: 48 hours
):
if chmod and isinstance(chmod, (six.binary_type, six.text_type)):
chmod = int(chmod, 8)
self.chmod = chmod
if not sid:
# Invalid...
raise KeyError
self.session_file_path = session_file_path
self.sid = sid
if not create:
if not os.path.exists(self.filename()):
raise KeyError
self._data = None
self.expiration = expiration
def filename(self):
return os.path.join(self.session_file_path, self.sid)
def data(self):
if self._data is not None:
return self._data
if os.path.exists(self.filename()):
f = open(self.filename(), 'rb')
self._data = cPickle.load(f)
f.close()
else:
self._data = {}
return self._data
def close(self):
if self._data is not None:
filename = self.filename()
exists = os.path.exists(filename)
if not self._data:
if exists:
os.unlink(filename)
else:
f = open(filename, 'wb')
cPickle.dump(self._data, f)
f.close()
if not exists and self.chmod:
os.chmod(filename, self.chmod)
def _clean_up(self):
global cleaning_up
try:
exp_time = datetime.timedelta(seconds=self.expiration*60)
now = datetime.datetime.now()
#Open every session and check that it isn't too old
for root, dirs, files in os.walk(self.session_file_path):
for f in files:
self._clean_up_file(f, exp_time=exp_time, now=now)
finally:
cleaning_up = False
def _clean_up_file(self, f, exp_time, now):
t = f.split("-")
if len(t) != 2:
return
t = t[0]
try:
sess_time = datetime.datetime(
int(t[0:4]),
int(t[4:6]),
int(t[6:8]),
int(t[8:10]),
int(t[10:12]),
int(t[12:14]))
except ValueError:
# Probably not a session file at all
return
if sess_time + exp_time < now:
os.remove(os.path.join(self.session_file_path, f))
def clean_up(self):
global last_cleanup, cleanup_cycle, cleaning_up
now = datetime.datetime.now()
if cleaning_up:
return
if not last_cleanup or last_cleanup + cleanup_cycle < now:
if not cleaning_up:
cleaning_up = True
try:
last_cleanup = now
t = threading.Thread(target=self._clean_up)
t.start()
except:
# Normally _clean_up should set cleaning_up
# to false, but if something goes wrong starting
# it...
cleaning_up = False
raise
class _NoDefault(object):
def __repr__(self):
return '<dynamic default>'
NoDefault = _NoDefault()
def make_session_middleware(
app, global_conf,
session_expiration=NoDefault,
expiration=NoDefault,
cookie_name=NoDefault,
session_file_path=NoDefault,
chmod=NoDefault):
"""
Adds a middleware that handles sessions for your applications.
The session is a peristent dictionary. To get this dictionary
in your application, use ``environ['paste.session.factory']()``
which returns this persistent dictionary.
Configuration:
session_expiration:
The time each session lives, in minutes. This controls
the cookie expiration. Default 12 hours.
expiration:
The time each session lives on disk. Old sessions are
culled from disk based on this. Default 48 hours.
cookie_name:
The cookie name used to track the session. Use different
names to avoid session clashes.
session_file_path:
Sessions are put in this location, default /tmp.
chmod:
The octal chmod you want to apply to new sessions (e.g., 660
to make the sessions group readable/writable)
Each of these also takes from the global configuration. cookie_name
and chmod take from session_cookie_name and session_chmod
"""
if session_expiration is NoDefault:
session_expiration = global_conf.get('session_expiration', 60*12)
session_expiration = int(session_expiration)
if expiration is NoDefault:
expiration = global_conf.get('expiration', 60*48)
expiration = int(expiration)
if cookie_name is NoDefault:
cookie_name = global_conf.get('session_cookie_name', '_SID_')
if session_file_path is NoDefault:
session_file_path = global_conf.get('session_file_path', '/tmp')
if chmod is NoDefault:
chmod = global_conf.get('session_chmod', None)
return SessionMiddleware(
app, session_expiration=session_expiration,
expiration=expiration, cookie_name=cookie_name,
session_file_path=session_file_path, chmod=chmod)
| apache-2.0 |
roadmapper/ansible | lib/ansible/modules/cloud/openstack/os_keystone_endpoint.py | 19 | 6363 | #!/usr/bin/python
# Copyright: (c) 2017, VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_endpoint
short_description: Manage OpenStack Identity service endpoints
extends_documentation_fragment: openstack
author:
- Mohammed Naser (@mnaser)
- Alberto Murillo (@albertomurillo)
version_added: "2.5"
description:
- Create, update, or delete OpenStack Identity service endpoints. If a
service with the same combination of I(service), I(interface) and I(region)
exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
options:
service:
description:
- Name or id of the service.
required: true
endpoint_interface:
description:
- Interface of the service.
choices: [admin, public, internal]
required: true
url:
description:
- URL of the service.
required: true
region:
description:
- Region that the service belongs to. Note that I(region_name) is used for authentication.
enabled:
description:
- Is the service enabled.
default: True
type: bool
state:
description:
- Should the resource be C(present) or C(absent).
choices: [present, absent]
default: present
requirements:
- openstacksdk >= 0.13.0
'''
EXAMPLES = '''
- name: Create a service for glance
os_keystone_endpoint:
cloud: mycloud
service: glance
endpoint_interface: public
url: http://controller:9292
region: RegionOne
state: present
- name: Delete a service for nova
os_keystone_endpoint:
cloud: mycloud
service: nova
endpoint_interface: public
region: RegionOne
state: absent
'''
RETURN = '''
endpoint:
description: Dictionary describing the endpoint.
returned: On success when I(state) is C(present)
type: complex
contains:
id:
description: Endpoint ID.
type: str
sample: 3292f020780b4d5baf27ff7e1d224c44
region:
description: Region Name.
type: str
sample: RegionOne
service_id:
description: Service ID.
type: str
sample: b91f1318f735494a825a55388ee118f3
interface:
description: Endpoint Interface.
type: str
sample: public
url:
description: Service URL.
type: str
sample: http://controller:9292
enabled:
description: Service status.
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, endpoint):
if endpoint.enabled != module.params['enabled']:
return True
if endpoint.url != module.params['url']:
return True
return False
def _system_state_change(module, endpoint):
state = module.params['state']
if state == 'absent' and endpoint:
return True
if state == 'present':
if endpoint is None:
return True
return _needs_update(module, endpoint)
return False
def main():
argument_spec = openstack_full_argument_spec(
service=dict(type='str', required=True),
endpoint_interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
url=dict(type='str', required=True),
region=dict(type='str'),
enabled=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
service_name_or_id = module.params['service']
interface = module.params['endpoint_interface']
url = module.params['url']
region = module.params['region']
enabled = module.params['enabled']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
try:
service = cloud.get_service(service_name_or_id)
if service is None:
module.fail_json(msg='Service %s does not exist' % service_name_or_id)
filters = dict(service_id=service.id, interface=interface)
if region is not None:
filters['region'] = region
endpoints = cloud.search_endpoints(filters=filters)
if len(endpoints) > 1:
module.fail_json(msg='Service %s, interface %s and region %s are '
'not unique' %
(service_name_or_id, interface, region))
elif len(endpoints) == 1:
endpoint = endpoints[0]
else:
endpoint = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, endpoint))
if state == 'present':
if endpoint is None:
result = cloud.create_endpoint(service_name_or_id=service,
url=url, interface=interface,
region=region, enabled=enabled)
endpoint = result[0]
changed = True
else:
if _needs_update(module, endpoint):
endpoint = cloud.update_endpoint(
endpoint.id, url=url, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, endpoint=endpoint)
elif state == 'absent':
if endpoint is None:
changed = False
else:
cloud.delete_endpoint(endpoint.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
gvb/odoo | addons/project_issue/__init__.py | 433 | 1131 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DCGenomics/NCBI_August_Hackathon_Push_Button_Genomics_Solution | vcf_filter_v0.1.py | 3 | 4688 | ######## Purpose of this script is to filter the vcf file using SNPEff according to user input #########
### Helpful documentation ###
## http://snpeff.sourceforge.net/SnpSift.html
#import modules
import os, sys
import subprocess
#Collect input as variables
i=0
file_prefix=sys.argv[1]
filter_arguments=[]
for arg in sys.argv:
if i>1:
filter_arguments.append(sys.argv[i])
i+=1
print filter_arguments
# include the basic commands that will be present in every snpSift filter command
echo_cmd="echo \""
close_cmd=" \""
javacmd="java -jar "
snpsiftcmd=" /home/ubuntu/vlaufer/snpeff/snpEff/NCBI_August_Hackathon_Push_Button_Genomics_Solution/SnpSift.jar "
filter_cmd=" filter "
opening_cmd=javacmd + snpsiftcmd + filter_cmd
carat_cmd=" > "
op="( "
cp=" )"
op=""
cp=""
quotation="\""
and_operator= " & "
or_operator= " | "
# input and output file names and paths
file_path=" /home/ubuntu/segun/snakemake.testrun/results/"
input_vcf= file_prefix + ".annotated.vcf "
output_vcf=file_prefix + ".annotated.filtered.vcf "
variant_types=["SNV","Insertion","Deletion","Indel"]
effect_types=["missense", "nonsense","synonymous","frameshift"]
impact_types=["LOW","MODERATE","SEVERE"]
#class snpsift_input_maker():
def variant_type_selector(arg):
input_parameters=[]
i=0
input_file_path=file_path + input_vcf
for filtering_param in filter_arguments:
i+=1
if filtering_param in effect_types:
# input_param="ANN[0].EFFECT has \'" + filtering_param + "_variant\'"
input_param="\"ANN[0].EFFECT has \'" + filtering_param + "_variant\'\""
input_parameters.append(input_param)
elif filtering_param in impact_types:
input_param="\"ANN[0].IMPACT = \'" + filtering_param + "\'\""
input_parameters.append(input_param)
elif filtering_param in variant_types:
input_param="\"VC = \'" + filtering_param + "\'\""
input_parameters.append(input_param)
if i<len(effect_types):
temp_output_file_path=file_path + "temp." + str(i) + output_vcf
os.system(echo_cmd + opening_cmd + input_param + input_file_path + carat_cmd + temp_output_file_path + close_cmd) # echo the cmd to see output
os.system(opening_cmd + input_param + input_file_path + carat_cmd + temp_output_file_path)
elif i==len(effect_types):
try:
os.system(echo_cmd + opening_cmd + input_param + input_file_path + carat_cmd + temp_output_file_path + close_cmd) # echo the cmd to see output
os.system(opening_cmd + input_param + input_file_path + carat_cmd + temp_output_file_path)
except:
output_file_path=file_path + output_vcf
os.system(echo_cmd + opening_cmd + input_param + input_file_path + carat_cmd + output_file_path + close_cmd) # echo the cmd to see output
os.system(opening_cmd + input_param + input_file_path + carat_cmd + output_file_path)
input_file_path=temp_output_file_path
variant_type_selector(filter_arguments)
# final_input=""
# for input in input_parameters:
# final_input=final_input + op + input + cp + and_operator
# final_input=final_input[:-2]
# final_input=final_input
# print final_input
# subprocess.call([opening_cmd, final_input, input_file_path, carat_cmd, output_file_path, close_cmd], shell="TRUE")
## Works
#######sts=subprocess.call("ls" + " -a", shell="TRUE" )
# print([opening_cmd, input_param, input_file_path, carat_cmd, output_file_path, close_cmd])
# cmd=subprocess.call([opening_cmd, input_param, input_file_path, carat_cmd, output_file_path, close_cmd], shell="False")
## Currently does not work with final_input
# os.system(echo_cmd + opening_cmd + final_input + input_file_path + carat_cmd + output_file_path + close_cmd) # echo the cmd to see output
# os.system(opening_cmd + final_input + input_file_path + carat_cmd + output_file_path)
#] if isinstance( int(filter_param), int ):
## very helpful syntax examples here
# http://snpeff.sourceforge.net/SnpSift.html#Extract
# ( EFF[*].EFFECT = 'NON_SYNONYMOUS_CODING' )
# ( CHROM = '22' ) & ( POS > 12345600 ) & ( POS < 43210000 )
########### For adding parsing based on ranges later
#### FUNCTIONAL STATMENT --> input_param=" \" CHROM = \'" + filtering_param[0] + "\' \" "
# elif filtering_param[0:5]=="CHROM":
# filtering_param=filtering_param[5:]
# filtering_param=filtering_param.split("-")
# if len(filtering_param)==1:
# input_param=" CHROM = \'" + filtering_param[0] + "\' "
# if len(filtering_param)==2:
# input_param=" CHROM = \'" + filtering_param[0] + "\' " + cp
# input_param=input_param + and_operator + op + " POS > " + filtering_param[1]
# print input_param
#
# input_parameters.append(input_param)
# elif
| cc0-1.0 |
saydulk/horizon | openstack_dashboard/test/integration_tests/config.py | 26 | 3298 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('login_url',
default='http://localhost/auth/login/',
help="Login page for the dashboard"),
cfg.StrOpt('help_url',
default='http://docs.openstack.org/',
help="Dashboard help page url"),
]
IdentityGroup = [
cfg.StrOpt('username',
default='demo',
help="Username to use for non-admin API requests."),
cfg.StrOpt('password',
default='secretadmin',
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('admin_username',
default='admin',
help="Administrative Username to use for admin API "
"requests."),
cfg.StrOpt('admin_password',
default='secretadmin',
help="API key to use when authenticating as admin.",
secret=True),
]
ImageGroup = [
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
]
AvailableServiceGroup = [
cfg.BoolOpt('sahara',
default=True,
help='Whether is Sahara expected to be available')
]
SeleniumGroup = [
cfg.IntOpt('implicit_wait',
default=10,
help="Implicit wait timeout in seconds"),
cfg.IntOpt('explicit_wait',
default=300,
help="Explicit wait timeout in seconds"),
cfg.IntOpt('page_timeout',
default=30,
help="Page load timeout in seconds"),
]
ScenarioGroup = [
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for image file'),
]
def _get_config_files():
conf_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'integration_tests')
conf_file = os.environ.get('HORIZON_INTEGRATION_TESTS_CONFIG_FILE',
"%s/horizon.conf" % conf_dir)
return [conf_file]
def get_config():
cfg.CONF([], project='horizon', default_config_files=_get_config_files())
cfg.CONF.register_opts(DashboardGroup, group="dashboard")
cfg.CONF.register_opts(IdentityGroup, group="identity")
cfg.CONF.register_opts(AvailableServiceGroup, group="service_available")
cfg.CONF.register_opts(SeleniumGroup, group="selenium")
cfg.CONF.register_opts(ImageGroup, group="image")
cfg.CONF.register_opts(ScenarioGroup, group="scenario")
return cfg.CONF
| apache-2.0 |
fangxingli/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/presentation.py | 96 | 2714 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import PRESENTATIONNS
from element import Element
# ODF 1.0 section 9.6 and 9.7
# Autogenerated
def AnimationGroup(**args):
return Element(qname = (PRESENTATIONNS,'animation-group'), **args)
def Animations(**args):
return Element(qname = (PRESENTATIONNS,'animations'), **args)
def DateTime(**args):
return Element(qname = (PRESENTATIONNS,'date-time'), **args)
def DateTimeDecl(**args):
return Element(qname = (PRESENTATIONNS,'date-time-decl'), **args)
def Dim(**args):
return Element(qname = (PRESENTATIONNS,'dim'), **args)
def EventListener(**args):
return Element(qname = (PRESENTATIONNS,'event-listener'), **args)
def Footer(**args):
return Element(qname = (PRESENTATIONNS,'footer'), **args)
def FooterDecl(**args):
return Element(qname = (PRESENTATIONNS,'footer-decl'), **args)
def Header(**args):
return Element(qname = (PRESENTATIONNS,'header'), **args)
def HeaderDecl(**args):
return Element(qname = (PRESENTATIONNS,'header-decl'), **args)
def HideShape(**args):
return Element(qname = (PRESENTATIONNS,'hide-shape'), **args)
def HideText(**args):
return Element(qname = (PRESENTATIONNS,'hide-text'), **args)
def Notes(**args):
return Element(qname = (PRESENTATIONNS,'notes'), **args)
def Placeholder(**args):
return Element(qname = (PRESENTATIONNS,'placeholder'), **args)
def Play(**args):
return Element(qname = (PRESENTATIONNS,'play'), **args)
def Settings(**args):
return Element(qname = (PRESENTATIONNS,'settings'), **args)
def Show(**args):
return Element(qname = (PRESENTATIONNS,'show'), **args)
def ShowShape(**args):
return Element(qname = (PRESENTATIONNS,'show-shape'), **args)
def ShowText(**args):
return Element(qname = (PRESENTATIONNS,'show-text'), **args)
def Sound(**args):
return Element(qname = (PRESENTATIONNS,'sound'), **args)
| apache-2.0 |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/__init__.py | 8 | 1912 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@TensorFlowClassifier
@@DNNClassifier
@@DNNRegressor
@@TensorFlowDNNClassifier
@@TensorFlowDNNRegressor
@@TensorFlowEstimator
@@LinearClassifier
@@LinearRegressor
@@TensorFlowLinearClassifier
@@TensorFlowLinearRegressor
@@TensorFlowRNNClassifier
@@TensorFlowRNNRegressor
@@TensorFlowRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| mit |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| mit |
lanbing510/GTDWeb | django/conf/locale/mk/formats.py | 112 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-2.0 |
jhonatajh/mtasa-blue | vendor/google-breakpad/src/tools/gyp/pylib/gyp/generator/make.py | 124 | 89654 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= %(flock)s $(builddir)/linker.lock $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter:
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'aix':
header_params.update({
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
wrappers['LINK'] = '%s $(builddir)/linker.lock' % flock_command
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-3.0 |
endlessm/chromium-browser | tools/style_variable_generator/css_generator.py | 1 | 2204 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from base_generator import Color, Modes, BaseGenerator
class CSSStyleGenerator(BaseGenerator):
'''Generator for CSS Variables'''
def Render(self):
self.Validate()
return self.ApplyTemplate(self, 'css_generator.tmpl',
self.GetParameters())
def GetParameters(self):
return {
'light_variables': self._mode_variables[Modes.LIGHT],
'dark_variables': self._mode_variables[Modes.DARK],
}
def GetFilters(self):
return {
'to_var_name': self._ToVarName,
'css_color': self._CssColor,
'css_color_rgb': self._CssColorRGB,
}
def GetGlobals(self):
return {
'css_color_from_rgb_var': self._CssColorFromRGBVar,
}
def _ToVarName(self, var_name):
return '--%s' % var_name.replace('_', '-')
def _CssColor(self, c):
'''Returns the CSS color representation of |c|'''
assert (isinstance(c, Color))
if c.var:
return 'var(%s)' % self._ToVarName(c.var)
if c.rgb_var:
if c.a != 1:
return 'rgba(var(%s), %g)' % (self._ToVarName(c.rgb_var), c.a)
else:
return 'rgb(var(%s))' % self._ToVarName(c.rgb_var)
if c.a != 1:
return 'rgba(%d, %d, %d, %g)' % (c.r, c.g, c.b, c.a)
else:
return 'rgb(%d, %d, %d)' % (c.r, c.g, c.b)
def _CssColorRGB(self, c):
'''Returns the CSS rgb representation of |c|'''
if c.var:
return 'var(%s-rgb)' % self._ToVarName(c.var)
if c.rgb_var:
return 'var(%s)' % self._ToVarName(c.rgb_var)
return '%d, %d, %d' % (c.r, c.g, c.b)
def _CssColorFromRGBVar(self, name, alpha):
'''Returns the CSS color representation given a color name and alpha'''
if alpha != 1:
return 'rgba(var(%s-rgb), %g)' % (self._ToVarName(name), alpha)
else:
return 'rgb(var(%s-rgb))' % self._ToVarName(name)
| bsd-3-clause |
kongseokhwan/kulcloud-iitp-neutron | neutron/plugins/brocade/vlanbm.py | 14 | 1916 | # Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Vlan Bitmap class to handle allocation/de-allocation of vlan ids."""
from six import moves
from neutron.common import constants
from neutron.plugins.brocade.db import models as brocade_db
MIN_VLAN = constants.MIN_VLAN_TAG + 1
MAX_VLAN = constants.MAX_VLAN_TAG
class VlanBitmap(object):
"""Setup a vlan bitmap for allocation/de-allocation."""
# Keep track of the vlans that have been allocated/de-allocated
# uses a bitmap to do this
def __init__(self, ctxt):
"""Initialize the vlan as a set."""
self.vlans = set(int(net['vlan'])
for net in brocade_db.get_networks(ctxt)
if net['vlan']
)
def get_next_vlan(self, vlan_id=None):
"""Try to get a specific vlan if requested or get the next vlan."""
min_vlan_search = vlan_id or MIN_VLAN
max_vlan_search = (vlan_id + 1) if vlan_id else MAX_VLAN
for vlan in moves.xrange(min_vlan_search, max_vlan_search):
if vlan not in self.vlans:
self.vlans.add(vlan)
return vlan
def release_vlan(self, vlan_id):
"""Return the vlan to the pool."""
if vlan_id in self.vlans:
self.vlans.remove(vlan_id)
| apache-2.0 |
mmerce/python | bigml/tests/create_forecast_steps.py | 1 | 1792 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from nose.tools import assert_almost_equals, eq_
from datetime import datetime
from .world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED, FAULTY
from bigml.api import get_status
from .read_forecast_steps import i_get_the_forecast
def i_create_a_forecast(step, data=None):
if data is None:
data = "{}"
time_series = world.time_series['resource']
data = json.loads(data)
resource = world.api.create_forecast(time_series, data)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.forecast = resource['object']
world.forecasts.append(resource['resource'])
def the_forecast_is(step, predictions):
predictions = json.loads(predictions)
attrs = ["point_forecast", "model"]
for field_id in predictions:
forecast = world.forecast['forecast']['result'][field_id]
prediction = predictions[field_id]
eq_(len(forecast), len(prediction), "forecast: %s" % forecast)
for index in range(len(forecast)):
for attr in attrs:
eq_(forecast[index][attr], prediction[index][attr])
| apache-2.0 |
gpoesia/servo | tests/wpt/css-tests/tools/html5lib/html5lib/serializer/htmlserializer.py | 423 | 12897 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mpl-2.0 |
xbianonpi/xbian-package-development | content/usr/local/lib/python2.7/dist-packages/pip/index.py | 18 | 27606 | """Routines related to PyPI, indexes"""
import sys
import os
import re
import gzip
import mimetypes
try:
import threading
except ImportError:
import dummy_threading as threading
import posixpath
import pkg_resources
import random
import socket
import string
import zlib
from pip.log import logger
from pip.util import Inf
from pip.util import normalize_name, splitext
from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled
from pip.backwardcompat import (WindowsError, BytesIO,
Queue, httplib, urlparse,
URLError, HTTPError, u,
product, url2pathname)
from pip.backwardcompat import Empty as QueueEmpty
from pip.download import urlopen, path_to_url2, url_to_path, geturl, Urllib2HeadRequest
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_mirrors=False, mirrors=None, main_mirror_url=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
if use_mirrors:
self.mirror_urls = self._get_mirror_urls(mirrors, main_mirror_url)
logger.info('Using PyPI mirrors: %s' % ', '.join(self.mirror_urls))
else:
self.mirror_urls = []
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate
# list
def sort_path(path):
url = path_to_url2(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
if url.startswith('file:'):
path = url_to_path(url)
if os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def find_requirement(self, req, upgrade):
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(posixpath.join(self.index_urls[0], url_name))
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0]), url_name, req) or req.url_name
# Combine index URLs with mirror URLs here to allow
# adding more index URLs from requirements files
all_index_urls = self.index_urls + self.mirror_urls
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in all_index_urls] + self.find_links
else:
locations = list(self.find_links)
locations.extend(self.dependency_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
locations = [Link(url) for url in url_locations]
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
found_versions = []
found_versions.extend(
self._package_versions(
[Link(url, '-f') for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
raise DistributionNotFound('No distributions at all found for %s' % req)
if req.satisfied_by is not None:
found_versions.append((req.satisfied_by.parsed_version, Inf, req.satisfied_by.version))
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
found_versions = file_versions + found_versions
all_versions = found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
applicable_versions.append((link, version))
applicable_versions = sorted(applicable_versions, key=lambda v: pkg_resources.parse_version(v[1]), reverse=True)
existing_applicable = bool([link for link, version in applicable_versions if link is Inf])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is Inf:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
raise BestVersionAlreadyInstalled
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][1]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in found_versions])))
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][0] is Inf:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][1], ', '.join([version for link, version in applicable_versions])))
return applicable_versions[0][0]
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links"""
pending_queue = Queue()
for location in locations:
pending_queue.put(location)
done = []
seen = set()
threads = []
for i in range(min(10, len(locations))):
t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen))
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
return done
_log_lock = threading.Lock()
def _get_queued_page(self, req, pending_queue, done, seen):
while 1:
try:
location = pending_queue.get(False)
except QueueEmpty:
return
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
done.append(page)
for link in page.rel_links():
pending_queue.put(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req, cache=self.cache)
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None):
"""Retrieves a list of URLs from the main mirror DNS entry
unless a list of mirror URLs are passed.
"""
if not mirrors:
mirrors = get_mirrors(main_mirror_url)
# Should this be made "less random"? E.g. netselect like?
random.shuffle(mirrors)
mirror_urls = set()
for mirror_url in mirrors:
# Make sure we have a valid URL
if not ("http://" or "https://" or "file://") in mirror_url:
mirror_url = "http://%s" % mirror_url
if not mirror_url.endswith("/simple"):
mirror_url = "%s/simple/" % mirror_url
mirror_urls.add(mirror_url)
return list(mirror_urls)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
## These aren't so aweful:
_rel_re = re.compile("""<[^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*>""", re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
_base_re = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I)
def __init__(self, content, url, headers=None):
self.content = content
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True):
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = urlopen(url)
real_url = geturl(resp)
headers = resp.info()
contents = resp.read()
encoding = headers.get('Content-Encoding', None)
#XXX need to handle exceptions and add testing for this
if encoding is not None:
if encoding == 'gzip':
contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
if encoding == 'deflate':
contents = zlib.decompress(contents)
inst = cls(u(contents), real_url, headers)
except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError):
e = sys.exc_info()[1]
desc = str(e)
if isinstance(e, socket.timeout):
log_meth = logger.info
level =1
desc = 'timed out'
elif isinstance(e, URLError):
log_meth = logger.info
if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
desc = 'timed out'
level = 1
else:
level = 2
elif isinstance(e, HTTPError) and e.code == 404:
## FIXME: notify?
log_meth = logger.info
level = 2
else:
log_meth = logger.info
level = 1
log_meth('Could not fetch URL %s: %s' % (link, desc))
log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
return None
if cache is not None:
cache.add_page([url, real_url], inst)
return inst
@staticmethod
def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
req = Urllib2HeadRequest(url, headers={'Host': netloc})
resp = urlopen(req)
try:
if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'):
## FIXME: doesn't handle redirects
return ''
return resp.info().get('content-type', '')
finally:
resp.close()
@property
def base_url(self):
if not hasattr(self, "_base_url"):
match = self._base_re.search(self.content)
if match:
self._base_url = match.group(1)
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for match in self._href_re.finditer(self.content):
url = match.group(1) or match.group(2) or match.group(3)
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
for match in self._rel_re.finditer(self.content):
found_rels = match.group(1).lower().split()
for rel in rels:
if rel in found_rels:
break
else:
continue
match = self._href_re.search(match.group(0))
if not match:
continue
url = match.group(1) or match.group(2) or match.group(3)
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
def scraped_rel_links(self):
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return self.url
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
def get_mirrors(hostname=None):
"""Return the list of mirrors from the last record found on the DNS
entry::
>>> from pip.index import get_mirrors
>>> get_mirrors()
['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org',
'd.pypi.python.org']
Originally written for the distutils2 project by Alexis Metaireau.
"""
if hostname is None:
hostname = DEFAULT_MIRROR_HOSTNAME
# return the last mirror registered on PyPI.
last_mirror_hostname = None
try:
last_mirror_hostname = socket.gethostbyname_ex(hostname)[0]
except socket.gaierror:
return []
if not last_mirror_hostname or last_mirror_hostname == DEFAULT_MIRROR_HOSTNAME:
last_mirror_hostname = "z.pypi.python.org"
end_letter = last_mirror_hostname.split(".", 1)
# determine the list from the last one.
return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])]
def string_range(last):
"""Compute the range of string between "a" and last.
This works for simple "a to z" lists, but also for "a to zz" lists.
"""
for k in range(len(last)):
for x in product(string.ascii_lowercase, repeat=k+1):
result = ''.join(x)
yield result
if result == last:
return
| gpl-2.0 |
terrycojones/dark-matter | dark/mutations.py | 1 | 16454 | import os
from collections import defaultdict
import numpy as np
try:
import matplotlib
if not os.environ.get('DISPLAY'):
# Use non-interactive Agg backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
import platform
if platform.python_implementation() == 'PyPy':
# PyPy doesn't have a version of matplotlib. Make a fake
# class that raises if it is used. This allows us to use other
# 'dark' code that happens to import dark.mutations but not use the
# functions that rely on matplotlib.
class plt(object):
def __getattr__(self, _):
raise NotImplementedError(
'matplotlib is not supported under pypy')
else:
raise
from random import choice, uniform
from dark import ncbidb
def basePlotter(blastHits, title):
"""
Plot the reads and the subject, so that bases in the reads which are
different from the subject are shown. Else a '.' is shown.
like so:
subject_gi ATGCGTACGTACGACACC
read_1 A......TTC..T
@param blastHits: A L{dark.blast.BlastHits} instance.
@param title: A C{str} sequence title that was matched by BLAST. We plot
the reads that matched this title.
"""
result = []
params = blastHits.plotParams
assert params is not None, ('Oops, it looks like you forgot to run '
'computePlotInfo.')
sequence = ncbidb.getSequence(title, blastHits.records.blastDb)
subject = sequence.seq
gi = title.split('|')[1]
sub = '%s\t \t \t%s' % (gi, subject)
result.append(sub)
plotInfo = blastHits.titles[title]['plotInfo']
assert plotInfo is not None, ('Oops, it looks like you forgot to run '
'computePlotInfo.')
items = plotInfo['items']
count = 0
for item in items:
count += 1
hsp = item['hsp']
queryTitle = blastHits.fasta[item['readNum']].id
# If the product of the subject and query frame values is +ve,
# then they're either both +ve or both -ve, so we just use the
# query as is. Otherwise, we need to reverse complement it.
if item['frame']['subject'] * item['frame']['query'] > 0:
query = blastHits.fasta[item['readNum']].seq
reverse = False
else:
# One of the subject or query has negative sense.
query = blastHits.fasta[
item['readNum']].reverse_complement().seq
reverse = True
query = query.upper()
queryStart = hsp['queryStart']
subjectStart = hsp['subjectStart']
queryEnd = hsp['queryEnd']
subjectEnd = hsp['subjectEnd']
# Before comparing the read to the subject, make a string of the
# same length as the subject, which contains the read and
# has ' ' where the read does not match.
# 3 parts need to be taken into account:
# 1) the left offset (if the query doesn't stick out to the left)
# 2) the query. if the frame is -1, it has to be reversed.
# The query consists of 3 parts: left, middle (control for gaps)
# 3) the right offset
# Do part 1) and 2).
if queryStart < 0:
# The query is sticking out to the left.
leftQuery = ''
if subjectStart == 0:
# The match starts at the first base of the subject.
middleLeftQuery = ''
else:
# The match starts into the subject.
# Determine the length of the not matching query
# part to the left.
leftOffset = -1 * queryStart
rightOffset = subjectStart + leftOffset
middleLeftQuery = query[leftOffset:rightOffset]
else:
# The query is not sticking out to the left
# make the left offset.
leftQuery = queryStart * ' '
leftQueryOffset = subjectStart - queryStart
middleLeftQuery = query[:leftQueryOffset]
# Do part 3).
# Disregard gaps in subject while adding.
matchQuery = item['origHsp'].query
matchSubject = item['origHsp'].sbjct
index = 0
mid = ''
for item in range(len(matchQuery)):
if matchSubject[index] != ' ':
mid += matchQuery[index]
index += 1
# if the query has been reversed, turn the matched part around
if reverse:
rev = ''
toReverse = mid
reverseDict = {' ': ' ', '-': '-', 'A': 'T', 'T': 'A',
'C': 'G', 'G': 'C', '.': '.', 'N': 'N'}
for item in toReverse:
newItem = reverseDict[item]
rev += newItem
mid = rev[::-1]
middleQuery = middleLeftQuery + mid
# add right not-matching part of the query
rightQueryOffset = queryEnd - subjectEnd
rightQuery = query[-rightQueryOffset:]
middleQuery += rightQuery
read = leftQuery + middleQuery
# do part 3)
offset = len(subject) - len(read)
# if the read is sticking out to the right
# chop it off
if offset < 0:
read = read[:offset]
# if it's not sticking out, fill the space with ' '
elif offset > 0:
read += offset * ' '
# compare the subject and the read, make a string
# called 'comparison', which contains a '.' if the bases
# are equal and the letter of the read if they are not.
comparison = ''
for readBase, subjectBase in zip(read, subject):
if readBase == ' ':
comparison += ' '
elif readBase == subjectBase:
comparison += '.'
elif readBase != subjectBase:
comparison += readBase
index += 1
que = '%s \t %s' % (queryTitle, comparison)
result.append(que)
# sanity checks
assert (len(comparison) == len(subject)), (
'%d != %d' % (len(comparison), len(subject)))
index = 0
if comparison[index] == ' ':
index += 1
else:
start = index - 1
assert (start == queryStart or start == -1), (
'%s != %s or %s != -1' % (start, queryStart, start))
return result
def getAPOBECFrequencies(dotAlignment, orig, new, pattern):
"""
Gets mutation frequencies if they are in a certain pattern.
@param dotAlignment: result from calling basePlotter
@param orig: A C{str}, naming the original base
@param new: A C{str}, what orig was mutated to
@param pattern: A C{str}m which pattern we're looking for
(must be one of 'cPattern', 'tPattern')
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
middleBase = 'C'
else:
patterns = tPattern
middleBase = 'T'
# generate the freqs dict with the right pattern
freqs = defaultdict(int)
for pattern in patterns:
freqs[pattern] = 0
# get the subject sequence from dotAlignment
subject = dotAlignment[0].split('\t')[3]
# exclude the subject from the dotAlignment, so just the queries
# are left over
queries = dotAlignment[1:]
for item in queries:
query = item.split('\t')[1]
index = 0
for queryBase in query:
qBase = query[index]
sBase = subject[index]
if qBase == new and sBase == orig:
try:
plusSb = subject[index + 1]
minusSb = subject[index - 1]
except IndexError:
plusSb = 'end'
motif = '%s%s%s' % (minusSb, middleBase, plusSb)
if motif in freqs:
freqs[motif] += 1
index += 1
return freqs
def getCompleteFreqs(blastHits):
"""
Make a dictionary which collects all mutation frequencies from
all reads.
Calls basePlotter to get dotAlignment, which is passed to
getAPOBECFrequencies with the respective parameter, to collect
the frequencies.
@param blastHits: A L{dark.blast.BlastHits} instance.
"""
allFreqs = {}
for title in blastHits.titles:
allFreqs[title] = {
'C>A': {},
'C>G': {},
'C>T': {},
'T>A': {},
'T>C': {},
'T>G': {},
}
basesPlotted = basePlotter(blastHits, title)
for mutation in allFreqs[title]:
orig = mutation[0]
new = mutation[2]
if orig == 'C':
pattern = 'cPattern'
else:
pattern = 'tPattern'
freqs = getAPOBECFrequencies(basesPlotted, orig, new, pattern)
allFreqs[title][mutation] = freqs
numberOfReads = len(blastHits.titles[title]['plotInfo']['items'])
allFreqs[title]['numberOfReads'] = numberOfReads
allFreqs[title]['bitScoreMax'] = blastHits.titles[
title]['plotInfo']['bitScoreMax']
return allFreqs
def makeFrequencyGraph(allFreqs, title, substitution, pattern,
color='blue', createFigure=True, showFigure=True,
readsAx=False):
"""
For a title, make a graph showing the frequencies.
@param allFreqs: result from getCompleteFreqs
@param title: A C{str}, title of virus of which frequencies should be
plotted.
@param substitution: A C{str}, which substitution should be plotted;
must be one of 'C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'.
@param pattern: A C{str}, which pattern we're looking for ( must be
one of 'cPattern', 'tPattern')
@param color: A C{str}, color of bars.
@param createFigure: If C{True}, create a figure.
@param showFigure: If C{True}, show the created figure.
@param readsAx: If not None, use this as the subplot for displaying reads.
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
else:
patterns = tPattern
fig = plt.figure(figsize=(10, 10))
ax = readsAx or fig.add_subplot(111)
# how many bars
N = 16
ind = np.arange(N)
width = 0.4
# make a list in the right order, so that it can be plotted easily
divisor = allFreqs[title]['numberOfReads']
toPlot = allFreqs[title][substitution]
index = 0
data = []
for item in patterns:
newData = toPlot[patterns[index]] / divisor
data.append(newData)
index += 1
# create the bars
ax.bar(ind, data, width, color=color)
maxY = np.max(data) + 5
# axes and labels
if createFigure:
title = title.split('|')[4][:50]
ax.set_title('%s \n %s' % (title, substitution), fontsize=20)
ax.set_ylim(0, maxY)
ax.set_ylabel('Absolute Number of Mutations', fontsize=16)
ax.set_xticks(ind + width)
ax.set_xticklabels(patterns, rotation=45, fontsize=8)
if createFigure is False:
ax.set_xticks(ind + width)
ax.set_xticklabels(patterns, rotation=45, fontsize=0)
else:
if showFigure:
plt.show()
return maxY
def makeFrequencyPanel(allFreqs, patientName):
"""
For a title, make a graph showing the frequencies.
@param allFreqs: result from getCompleteFreqs
@param patientName: A C{str}, title for the panel
"""
titles = sorted(
iter(allFreqs.keys()),
key=lambda title: (allFreqs[title]['bitScoreMax'], title))
origMaxY = 0
cols = 6
rows = len(allFreqs)
figure, ax = plt.subplots(rows, cols, squeeze=False)
substitutions = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']
colors = ['blue', 'black', 'red', 'yellow', 'green', 'orange']
for i, title in enumerate(titles):
for index in range(6):
for subst in allFreqs[str(title)]:
substitution = substitutions[index]
print(i, index, title, 'substitution', substitutions[index])
if substitution[0] == 'C':
pattern = 'cPattern'
else:
pattern = 'tPattern'
maxY = makeFrequencyGraph(allFreqs, title, substitution,
pattern, color=colors[index],
createFigure=False, showFigure=False,
readsAx=ax[i][index])
if maxY > origMaxY:
origMaxY = maxY
# add title for individual plot.
# if used for other viruses, this will have to be adapted.
if index == 0:
gi = title.split('|')[1]
titles = title.split(' ')
try:
typeIndex = titles.index('type')
except ValueError:
typeNumber = 'gi: %s' % gi
else:
typeNumber = titles[typeIndex + 1]
ax[i][index].set_ylabel(('Type %s \n maxBitScore: %s' % (
typeNumber, allFreqs[title]['bitScoreMax'])), fontsize=10)
# add xAxis tick labels
if i == 0:
ax[i][index].set_title(substitution, fontsize=13)
if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2:
if index < 3:
pat = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG',
'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC',
'TCG', 'TCT']
else:
pat = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG',
'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC',
'TTG', 'TTT']
ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8)
# make Y-axis equal
for i, title in enumerate(allFreqs):
for index in range(6):
a = ax[i][index]
a.set_ylim([0, origMaxY])
# add title of whole panel
figure.suptitle('Mutation Signatures in %s' % patientName, fontsize=20)
figure.set_size_inches(5 * cols, 3 * rows, forward=True)
figure.show()
return allFreqs
def mutateString(original, n, replacements='acgt'):
"""
Mutate C{original} in C{n} places with chars chosen from C{replacements}.
@param original: The original C{str} to mutate.
@param n: The C{int} number of locations to mutate.
@param replacements: The C{str} of replacement letters.
@return: A new C{str} with C{n} places of C{original} mutated.
@raises ValueError: if C{n} is too high, or C{replacement} contains
duplicates, or if no replacement can be made at a certain locus
because C{replacements} is of length one, or if C{original} is of
zero length.
"""
if not original:
raise ValueError('Empty original string passed.')
if n > len(original):
raise ValueError('Cannot make %d mutations in a string of length %d' %
(n, len(original)))
if len(replacements) != len(set(replacements)):
raise ValueError('Replacement string contains duplicates')
if len(replacements) == 1 and original.find(replacements) != -1:
raise ValueError('Impossible replacement')
result = list(original)
length = len(original)
for offset in range(length):
if uniform(0.0, 1.0) < float(n) / (length - offset):
# Mutate.
while True:
new = choice(replacements)
if new != result[offset]:
result[offset] = new
break
n -= 1
if n == 0:
break
return ''.join(result)
| mit |
rampage644/impala-cut | thirdparty/hive-0.10.0-cdh4.5.0/lib/py/fb303/FacebookService.py | 54 | 57351 | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
"""
Standard base service
"""
def getName(self, ):
"""
Returns a descriptive name of the service
"""
pass
def getVersion(self, ):
"""
Returns the version of the service
"""
pass
def getStatus(self, ):
"""
Gets the status of this service
"""
pass
def getStatusDetails(self, ):
"""
User friendly description of status, such as why the service is in
the dead or warning state, or what is being started or stopped.
"""
pass
def getCounters(self, ):
"""
Gets the counters for this service
"""
pass
def getCounter(self, key):
"""
Gets the value of a single counter
Parameters:
- key
"""
pass
def setOption(self, key, value):
"""
Sets an option
Parameters:
- key
- value
"""
pass
def getOption(self, key):
"""
Gets an option
Parameters:
- key
"""
pass
def getOptions(self, ):
"""
Gets all options
"""
pass
def getCpuProfile(self, profileDurationInSec):
"""
Returns a CPU profile over the given time interval (client and server
must agree on the profile format).
Parameters:
- profileDurationInSec
"""
pass
def aliveSince(self, ):
"""
Returns the unix time that the server has been running since
"""
pass
def reinitialize(self, ):
"""
Tell the server to reload its configuration, reopen log files, etc
"""
pass
def shutdown(self, ):
"""
Suggest a shutdown to the server
"""
pass
class Client(Iface):
"""
Standard base service
"""
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def getName(self, ):
"""
Returns a descriptive name of the service
"""
self.send_getName()
return self.recv_getName()
def send_getName(self, ):
self._oprot.writeMessageBegin('getName', TMessageType.CALL, self._seqid)
args = getName_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getName(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getName_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getName failed: unknown result");
def getVersion(self, ):
"""
Returns the version of the service
"""
self.send_getVersion()
return self.recv_getVersion()
def send_getVersion(self, ):
self._oprot.writeMessageBegin('getVersion', TMessageType.CALL, self._seqid)
args = getVersion_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVersion(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVersion_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVersion failed: unknown result");
def getStatus(self, ):
"""
Gets the status of this service
"""
self.send_getStatus()
return self.recv_getStatus()
def send_getStatus(self, ):
self._oprot.writeMessageBegin('getStatus', TMessageType.CALL, self._seqid)
args = getStatus_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getStatus(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getStatus_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatus failed: unknown result");
def getStatusDetails(self, ):
"""
User friendly description of status, such as why the service is in
the dead or warning state, or what is being started or stopped.
"""
self.send_getStatusDetails()
return self.recv_getStatusDetails()
def send_getStatusDetails(self, ):
self._oprot.writeMessageBegin('getStatusDetails', TMessageType.CALL, self._seqid)
args = getStatusDetails_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getStatusDetails(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getStatusDetails_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatusDetails failed: unknown result");
def getCounters(self, ):
"""
Gets the counters for this service
"""
self.send_getCounters()
return self.recv_getCounters()
def send_getCounters(self, ):
self._oprot.writeMessageBegin('getCounters', TMessageType.CALL, self._seqid)
args = getCounters_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCounters(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCounters_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounters failed: unknown result");
def getCounter(self, key):
"""
Gets the value of a single counter
Parameters:
- key
"""
self.send_getCounter(key)
return self.recv_getCounter()
def send_getCounter(self, key):
self._oprot.writeMessageBegin('getCounter', TMessageType.CALL, self._seqid)
args = getCounter_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCounter(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCounter_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounter failed: unknown result");
def setOption(self, key, value):
"""
Sets an option
Parameters:
- key
- value
"""
self.send_setOption(key, value)
self.recv_setOption()
def send_setOption(self, key, value):
self._oprot.writeMessageBegin('setOption', TMessageType.CALL, self._seqid)
args = setOption_args()
args.key = key
args.value = value
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setOption(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setOption_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def getOption(self, key):
"""
Gets an option
Parameters:
- key
"""
self.send_getOption(key)
return self.recv_getOption()
def send_getOption(self, key):
self._oprot.writeMessageBegin('getOption', TMessageType.CALL, self._seqid)
args = getOption_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getOption(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getOption_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getOption failed: unknown result");
def getOptions(self, ):
"""
Gets all options
"""
self.send_getOptions()
return self.recv_getOptions()
def send_getOptions(self, ):
self._oprot.writeMessageBegin('getOptions', TMessageType.CALL, self._seqid)
args = getOptions_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getOptions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getOptions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getOptions failed: unknown result");
def getCpuProfile(self, profileDurationInSec):
"""
Returns a CPU profile over the given time interval (client and server
must agree on the profile format).
Parameters:
- profileDurationInSec
"""
self.send_getCpuProfile(profileDurationInSec)
return self.recv_getCpuProfile()
def send_getCpuProfile(self, profileDurationInSec):
self._oprot.writeMessageBegin('getCpuProfile', TMessageType.CALL, self._seqid)
args = getCpuProfile_args()
args.profileDurationInSec = profileDurationInSec
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCpuProfile(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getCpuProfile_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCpuProfile failed: unknown result");
def aliveSince(self, ):
"""
Returns the unix time that the server has been running since
"""
self.send_aliveSince()
return self.recv_aliveSince()
def send_aliveSince(self, ):
self._oprot.writeMessageBegin('aliveSince', TMessageType.CALL, self._seqid)
args = aliveSince_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_aliveSince(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = aliveSince_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "aliveSince failed: unknown result");
def reinitialize(self, ):
"""
Tell the server to reload its configuration, reopen log files, etc
"""
self.send_reinitialize()
def send_reinitialize(self, ):
self._oprot.writeMessageBegin('reinitialize', TMessageType.CALL, self._seqid)
args = reinitialize_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def shutdown(self, ):
"""
Suggest a shutdown to the server
"""
self.send_shutdown()
def send_shutdown(self, ):
self._oprot.writeMessageBegin('shutdown', TMessageType.CALL, self._seqid)
args = shutdown_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getName"] = Processor.process_getName
self._processMap["getVersion"] = Processor.process_getVersion
self._processMap["getStatus"] = Processor.process_getStatus
self._processMap["getStatusDetails"] = Processor.process_getStatusDetails
self._processMap["getCounters"] = Processor.process_getCounters
self._processMap["getCounter"] = Processor.process_getCounter
self._processMap["setOption"] = Processor.process_setOption
self._processMap["getOption"] = Processor.process_getOption
self._processMap["getOptions"] = Processor.process_getOptions
self._processMap["getCpuProfile"] = Processor.process_getCpuProfile
self._processMap["aliveSince"] = Processor.process_aliveSince
self._processMap["reinitialize"] = Processor.process_reinitialize
self._processMap["shutdown"] = Processor.process_shutdown
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getName(self, seqid, iprot, oprot):
args = getName_args()
args.read(iprot)
iprot.readMessageEnd()
result = getName_result()
result.success = self._handler.getName()
oprot.writeMessageBegin("getName", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVersion(self, seqid, iprot, oprot):
args = getVersion_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVersion_result()
result.success = self._handler.getVersion()
oprot.writeMessageBegin("getVersion", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getStatus(self, seqid, iprot, oprot):
args = getStatus_args()
args.read(iprot)
iprot.readMessageEnd()
result = getStatus_result()
result.success = self._handler.getStatus()
oprot.writeMessageBegin("getStatus", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getStatusDetails(self, seqid, iprot, oprot):
args = getStatusDetails_args()
args.read(iprot)
iprot.readMessageEnd()
result = getStatusDetails_result()
result.success = self._handler.getStatusDetails()
oprot.writeMessageBegin("getStatusDetails", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCounters(self, seqid, iprot, oprot):
args = getCounters_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCounters_result()
result.success = self._handler.getCounters()
oprot.writeMessageBegin("getCounters", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCounter(self, seqid, iprot, oprot):
args = getCounter_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCounter_result()
result.success = self._handler.getCounter(args.key)
oprot.writeMessageBegin("getCounter", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setOption(self, seqid, iprot, oprot):
args = setOption_args()
args.read(iprot)
iprot.readMessageEnd()
result = setOption_result()
self._handler.setOption(args.key, args.value)
oprot.writeMessageBegin("setOption", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getOption(self, seqid, iprot, oprot):
args = getOption_args()
args.read(iprot)
iprot.readMessageEnd()
result = getOption_result()
result.success = self._handler.getOption(args.key)
oprot.writeMessageBegin("getOption", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getOptions(self, seqid, iprot, oprot):
args = getOptions_args()
args.read(iprot)
iprot.readMessageEnd()
result = getOptions_result()
result.success = self._handler.getOptions()
oprot.writeMessageBegin("getOptions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getCpuProfile(self, seqid, iprot, oprot):
args = getCpuProfile_args()
args.read(iprot)
iprot.readMessageEnd()
result = getCpuProfile_result()
result.success = self._handler.getCpuProfile(args.profileDurationInSec)
oprot.writeMessageBegin("getCpuProfile", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_aliveSince(self, seqid, iprot, oprot):
args = aliveSince_args()
args.read(iprot)
iprot.readMessageEnd()
result = aliveSince_result()
result.success = self._handler.aliveSince()
oprot.writeMessageBegin("aliveSince", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reinitialize(self, seqid, iprot, oprot):
args = reinitialize_args()
args.read(iprot)
iprot.readMessageEnd()
self._handler.reinitialize()
return
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
self._handler.shutdown()
return
# HELPER FUNCTIONS AND STRUCTURES
class getName_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getName_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getName_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getName_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVersion_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVersion_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVersion_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVersion_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatusDetails_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatusDetails_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatusDetails_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatusDetails_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounters_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounters_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounters_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.I64,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readI64();
self.success[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounters_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.success))
for kiter7,viter8 in self.success.items():
oprot.writeString(kiter7)
oprot.writeI64(viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounter_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounter_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCounter_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCounter_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setOption_args:
"""
Attributes:
- key
- value
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRING, 'value', None, None, ), # 2
)
def __init__(self, key=None, value=None,):
self.key = key
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setOption_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.value != None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeString(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setOption_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setOption_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOption_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOption_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOption_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOption_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOptions_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOptions_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getOptions_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.STRING,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype10, _vtype11, _size9 ) = iprot.readMapBegin()
for _i13 in xrange(_size9):
_key14 = iprot.readString();
_val15 = iprot.readString();
self.success[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getOptions_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
for kiter16,viter17 in self.success.items():
oprot.writeString(kiter16)
oprot.writeString(viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCpuProfile_args:
"""
Attributes:
- profileDurationInSec
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'profileDurationInSec', None, None, ), # 1
)
def __init__(self, profileDurationInSec=None,):
self.profileDurationInSec = profileDurationInSec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.profileDurationInSec = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCpuProfile_args')
if self.profileDurationInSec != None:
oprot.writeFieldBegin('profileDurationInSec', TType.I32, 1)
oprot.writeI32(self.profileDurationInSec)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCpuProfile_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getCpuProfile_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class aliveSince_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('aliveSince_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class aliveSince_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('aliveSince_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reinitialize_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reinitialize_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
FNCS/ns-3.26 | wutils.py | 64 | 8869 | import os
import os.path
import re
import sys
import subprocess
import shlex
# WAF modules
from waflib import Options, Utils, Logs, TaskGen, Build, Context
from waflib.Errors import WafError
# these are set from the main wscript file
APPNAME=None
VERSION=None
bld=None
def get_command_template(env, arguments=()):
cmd = Options.options.command_template or '%s'
for arg in arguments:
cmd = cmd + " " + arg
return cmd
if hasattr(os.path, "relpath"):
relpath = os.path.relpath # since Python 2.6
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def find_program(program_name, env):
launch_dir = os.path.abspath(Context.launch_dir)
#top_dir = os.path.abspath(Options.cwd_launch)
found_programs = []
for obj in bld.all_task_gen:
if not getattr(obj, 'is_ns3_program', False):
continue
## filter out programs not in the subtree starting at the launch dir
if not (obj.path.abspath().startswith(launch_dir)
or obj.path.get_bld().abspath().startswith(launch_dir)):
continue
name1 = obj.name
name2 = os.path.join(relpath(obj.path.abspath(), launch_dir), obj.name)
names = [name1, name2]
found_programs.extend(names)
if program_name in names:
return obj
raise ValueError("program '%s' not found; available programs are: %r"
% (program_name, found_programs))
def get_proc_env(os_env=None):
env = bld.env
if sys.platform == 'linux2' or sys.platform == 'linux':
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform == 'win32':
pathvar = 'PATH'
elif sys.platform == 'cygwin':
pathvar = 'PATH'
elif sys.platform.startswith('freebsd'):
pathvar = 'LD_LIBRARY_PATH'
else:
Logs.warn(("Don't know how to configure "
"dynamic library path for the platform %r;"
" assuming it's LD_LIBRARY_PATH.") % (sys.platform,))
pathvar = 'LD_LIBRARY_PATH'
proc_env = dict(os.environ)
if os_env is not None:
proc_env.update(os_env)
if pathvar is not None:
if pathvar in proc_env:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']) + [proc_env[pathvar]])
else:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']))
pymoddir = bld.path.find_dir('bindings/python').get_bld().abspath()
pyvizdir = bld.path.find_dir('src/visualizer').abspath()
if 'PYTHONPATH' in proc_env:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir] + [proc_env['PYTHONPATH']])
else:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir])
if 'PATH' in proc_env:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']) + [proc_env['PATH']])
else:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']))
return proc_env
def run_argv(argv, env, os_env=None, cwd=None, force_no_valgrind=False):
proc_env = get_proc_env(os_env)
if Options.options.valgrind and not force_no_valgrind:
if Options.options.command_template:
raise WafError("Options --command-template and --valgrind are conflicting")
if not env['VALGRIND']:
raise WafError("valgrind is not installed")
# Use the first program found in the env['VALGRIND'] list
argv = [env['VALGRIND'][0], "--leak-check=full", "--show-reachable=yes", "--error-exitcode=1"] + argv
proc = subprocess.Popen(argv, env=proc_env, cwd=cwd, stderr=subprocess.PIPE)
stderrdata = proc.communicate()[1]
stderrdata = stderrdata.decode('utf-8')
error = False
for line in stderrdata:
sys.stderr.write(line)
if "== LEAK SUMMARY" in line:
error = True
retval = proc.wait()
if retval == 0 and error:
retval = 1
else:
try:
WindowsError
except NameError:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
else:
try:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
except WindowsError as ex:
raise WafError("Command %s raised exception %s" % (argv, ex))
if retval:
signame = None
if retval < 0: # signal?
import signal
for name, val in vars(signal).items():
if len(name) > 3 and name[:3] == 'SIG' and name[3] != '_':
if val == -retval:
signame = name
break
if signame:
raise WafError("Command %s terminated with signal %s."
" Run it under a debugger to get more information "
"(./waf --run <program> --command-template=\"gdb --args %%s <args>\")." % (argv, signame))
else:
raise WafError("Command %s exited with code %i" % (argv, retval))
return retval
def get_run_program(program_string, command_template=None):
"""
Return the program name and argv of the process that would be executed by
run_program(program_string, command_template).
"""
#print "get_run_program_argv(program_string=%r, command_template=%r)" % (program_string, command_template)
env = bld.env
if command_template in (None, '%s'):
argv = shlex.split(program_string)
#print "%r ==shlex.split==> %r" % (program_string, argv)
program_name = argv[0]
try:
program_obj = find_program(program_name, env)
except ValueError as ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
execvec = [program_node.abspath()] + argv[1:]
else:
program_name = program_string
try:
program_obj = find_program(program_name, env)
except ValueError as ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
tmpl = command_template % (program_node.abspath(),)
execvec = shlex.split(tmpl.replace('\\', '\\\\'))
#print "%r ==shlex.split==> %r" % (command_template % (program_node.abspath(env),), execvec)
return program_name, execvec
def run_program(program_string, env, command_template=None, cwd=None, visualize=False):
"""
if command_template is not None, then program_string == program
name and argv is given by command_template with %s replaced by the
full path to the program. Else, program_string is interpreted as
a shell command with first name being the program name.
"""
dummy_program_name, execvec = get_run_program(program_string, command_template)
if cwd is None:
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv(execvec, env, cwd=cwd)
def run_python_program(program_string, env, visualize=False):
env = bld.env
execvec = shlex.split(program_string)
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv([env['PYTHON'][0]] + execvec, env, cwd=cwd)
def uniquify_list(seq):
"""Remove duplicates while preserving order
From Dave Kirby http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
| gpl-2.0 |
bregman-arie/ansible | lib/ansible/module_utils/facts/hardware/dragonfly.py | 232 | 1090 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.hardware.base import HardwareCollector
from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware
class DragonFlyHardwareCollector(HardwareCollector):
# Note: This uses the freebsd fact class, there is no dragonfly hardware fact class
_fact_class = FreeBSDHardware
_platform = 'DragonFly'
| gpl-3.0 |
nati/fun | cube.py | 1 | 4119 | import copy
import math
import re
import subprocess
import sys
import time
ret = subprocess.check_output(["resize"])
m = re.match("COLUMNS=(\d+);\nLINES=(\d+);", ret)
WIDTH = int(m.group(1))
HEIGHT = int(m.group(2))
SCALE = 7
X = 0
Y = 1
Z = 2
POINTS = [
[-1, -1, 1],
[-1, 1, 1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, -1],
[1, -1, -1]
]
LINES = [
[0, 1],
[1, 2],
[2, 3],
[0, 3],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[0, 4],
[1, 5],
[2, 6],
[3, 7],
]
POINTS2 = [
[-1, -1, 0],
[-1, 1, 0],
[1, 1, 0],
[1, -1, 0],
[0, 0, 3],
]
LINES2 = [
[0, 1],
[1, 2],
[2, 3],
[3, 0],
[0, 4],
[1, 4],
[2, 4],
[3, 4]
]
class Campas(object):
def draw_line(self, p1, p2):
steep = abs(p2[Y] - p1[Y]) > abs(p2[X] - p1[X])
if steep:
p1[X], p1[Y] = p1[Y], p1[X]
p2[X], p2[Y] = p2[Y], p2[X]
if p1[X] > p2[X]:
p1[X], p2[X] = p2[X], p1[X]
p1[Y], p2[Y] = p2[Y], p1[Y]
dx = p2[X] - p1[X]
dy = abs(p2[Y] - p1[Y])
error = dx / 2.0
y = p1[Y]
if p1[Y] < p2[Y]:
ystep = 1
else:
ystep = -1
for x in range(p1[X], p2[X]):
if steep:
self.draw_point([y, x])
else:
self.draw_point([x, y])
error = error - dy
if error < 0:
y = y + ystep
error = error + dx
def draw_point(self, p, char="#"):
if p[X] >= WIDTH or 0 > p[X]:
return
if p[Y] >= HEIGHT or 0 > p[Y]:
return
sys.stdout.write("\033[%i;%iH%s" % (p[Y], p[X], char))
def clear_screen(self):
sys.stdout.write("\033[2J")
def flush(self):
sys.stdout.flush()
class Poly(object):
points = []
lines = []
def __init__(self, points, lines, campas):
self.points = copy.deepcopy(points)
self.lines = copy.deepcopy(lines)
self.campas = campas
self.base_point = [0, 0, 1]
def mult(self, transform):
self.points = [self.mult_m_p(transform, p) for p in self.points]
def move(self, axis, distance):
self.base_point[axis] = distance
def mult_m_p(self, m, p):
x, y, z = p
r1 = sum([m[0][0] * x, m[0][1] * y, m[0][2] * z])
r2 = sum([m[1][0] * x, m[1][1] * y, m[1][2] * z])
r3 = sum([m[2][0] * x, m[2][1] * y, m[2][2] * z])
return [r1, r2, r3]
def projection(self, p):
cx, cy = WIDTH / 2, HEIGHT / 2
x = (p[X] + self.base_point[X]) * SCALE / self.base_point[Z] + cx
y = (p[Y] + self.base_point[Y]) * SCALE / self.base_point[Z] + cy
return [int(x), int(y)]
def draw(self):
if self.base_point[Z] <= 0:
return
for point in self.points:
self.campas.draw_point(self.projection(point))
for line in self.lines:
self.campas.draw_line(self.projection(self.points[line[0]]),
self.projection(self.points[line[1]]))
def matrix_rotate_x(a):
return [[1, 0, 0],
[0, math.cos(a), -math.sin(a)],
[0, math.sin(a), math.cos(a)]]
def matrix_rotate_y(a):
return [[math.cos(a), 0, math.sin(a)],
[0, 1, 0],
[-math.sin(a), 0, math.cos(a)]]
campas = Campas()
campas.clear_screen()
cube = Poly(POINTS, LINES, campas)
cube2 = Poly(POINTS2, LINES2, campas)
cube3 = Poly(POINTS, LINES, campas)
i = math.pi / 100.0
j = 0
mx = matrix_rotate_x(i * 1)
my = matrix_rotate_y(i * 5)
while True:
campas.clear_screen()
cube.mult(mx)
cube.mult(my)
cube3.mult(mx)
cube3.mult(my)
cube.move(Z, math.sin(j) + 1.5)
cube.move(X, 10 * math.cos(j))
cube3.move(Z, math.sin(j + math.pi / 2) + 1.5)
cube3.move(Y, 3 * math.cos(j + math.pi / 2))
j += math.pi / 50.0
cube2.mult(mx)
cube2.mult(my)
cube2.move(Z, 1.5)
cube.draw()
cube2.draw()
cube3.draw()
campas.flush()
time.sleep(0.1)
| apache-2.0 |
nuclear-wizard/moose | test/tests/time_integrators/scalar/run.py | 12 | 4487 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
mpvismer/pyqtgraph | pyqtgraph/graphicsItems/GraphicsWidget.py | 52 | 2151 | from ..Qt import QtGui, QtCore
from ..GraphicsScene import GraphicsScene
from .GraphicsItem import GraphicsItem
__all__ = ['GraphicsWidget']
class GraphicsWidget(GraphicsItem, QtGui.QGraphicsWidget):
_qtBaseClass = QtGui.QGraphicsWidget
def __init__(self, *args, **kargs):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtGui.QGraphicsWidget`
Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs.
Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.
"""
QtGui.QGraphicsWidget.__init__(self, *args, **kargs)
GraphicsItem.__init__(self)
## done by GraphicsItem init
#GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
# Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86
#def itemChange(self, change, value):
## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
##ret = QtGui.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here
## The default behavior is just to return the value argument, so we'll do that
## without calling the original method.
#ret = value
#if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
#self._updateView()
#return ret
def setFixedHeight(self, h):
self.setMaximumHeight(h)
self.setMinimumHeight(h)
def setFixedWidth(self, h):
self.setMaximumWidth(h)
self.setMinimumWidth(h)
def height(self):
return self.geometry().height()
def width(self):
return self.geometry().width()
def boundingRect(self):
br = self.mapRectFromParent(self.geometry()).normalized()
#print "bounds:", br
return br
def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
p = QtGui.QPainterPath()
p.addRect(self.boundingRect())
#print "shape:", p.boundingRect()
return p
| mit |
wavelets/chainer | chainer/cudnn/cudnn.py | 4 | 3454 | """Common routines to use CuDNN."""
import atexit
import ctypes
import os
import numpy
from chainer import cuda
import libcudnn
enabled = int(os.environ.get('CHAINER_CUDNN', '1')) != 0
available = True
def get_ptr(x):
return ctypes.c_void_p(x.ptr)
class Auto(object):
"""Object to be destroyed automatically."""
def __init__(self, value, destroyer):
self.value = value
self.destroyer = destroyer
def __del__(self):
try:
self.destroyer(self.value)
except Exception:
pass
_handles = {}
_pid = None
def get_default_handle():
"""Get the default handle of CuDNN."""
global _handles, _pid
pid = os.getpid()
if _pid != pid: # not initialized yet
_handles = {}
atexit.register(shutdown)
_pid = pid
device = cuda.Context.get_device()
if device in _handles:
return _handles[device]
handle = libcudnn.cudnnCreate()
_handles[device] = handle
return handle
def shutdown():
global _handles, _pid
pid = os.getpid()
if _pid != pid: # not initialized
return
for handle in _handles.itervalues():
libcudnn.cudnnDestroy(handle)
_handles = {}
_pid = None # mark as uninitialized
_dtypes = {numpy.dtype('float32'): libcudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
numpy.dtype('float64'): libcudnn.cudnnDataType['CUDNN_DATA_DOUBLE']}
def get_tensor_desc(x, h, w, form='CUDNN_TENSOR_NCHW'):
"""Create a tensor descriptor for given settings."""
n = x.shape[0] if len(x.shape) >= 1 else 1
c = x.size // (n * h * w)
desc = libcudnn.cudnnCreateTensorDescriptor()
libcudnn.cudnnSetTensor4dDescriptor(
desc, libcudnn.cudnnTensorFormat[form], _dtypes[x.dtype], n, c, h, w)
return Auto(desc, libcudnn.cudnnDestroyTensorDescriptor)
def get_conv_bias_desc(x):
"""Create a bias tensor descriptor."""
desc = libcudnn.cudnnCreateTensorDescriptor()
libcudnn.cudnnSetTensor4dDescriptor(
desc, libcudnn.cudnnTensorFormat[
'CUDNN_TENSOR_NCHW'], _dtypes[x.dtype],
1, x.size, 1, 1)
return Auto(desc, libcudnn.cudnnDestroyTensorDescriptor)
_default_conv_mode = libcudnn.cudnnConvolutionMode['CUDNN_CROSS_CORRELATION']
def get_filter4d_desc(x, mode=_default_conv_mode):
"""Create a 2d convolution filter descriptor."""
k, c, h, w = x.shape
desc = libcudnn.cudnnCreateFilterDescriptor()
libcudnn.cudnnSetFilter4dDescriptor(desc, _dtypes[x.dtype], k, c, h, w)
return Auto(desc, libcudnn.cudnnDestroyFilterDescriptor)
def get_conv2d_desc(pad, stride, mode=_default_conv_mode):
"""Create a 2d convolution descriptor."""
desc = libcudnn.cudnnCreateConvolutionDescriptor()
libcudnn.cudnnSetConvolution2dDescriptor(
desc, pad[0], pad[1], stride[0], stride[1], 1, 1, mode)
return Auto(desc, libcudnn.cudnnDestroyConvolutionDescriptor)
_pool_mode = {
'MAX': libcudnn.cudnnPoolingMode['CUDNN_POOLING_MAX'],
'AVE': libcudnn.cudnnPoolingMode[
'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING']
}
def get_pool2d_desc(ksize, stride, pad, mode):
"""Create a 2d pooling descriptor."""
desc = libcudnn.cudnnCreatePoolingDescriptor()
libcudnn.cudnnSetPooling2dDescriptor(
desc, libcudnn.cudnnPoolingMode[mode], ksize[0], ksize[1],
pad[0], pad[1], stride[0], stride[1])
return Auto(desc, libcudnn.cudnnDestroyPoolingDescriptor)
| mit |
benracine/opencomparison | apps/package/repos/sourceforge.py | 4 | 2752 | import re
from urllib import urlopen
try:
import simplejson as json
except ImportError:
import json
from .base_handler import BaseHandler
API_TARGET = "https://sourceforge.net/api"
class SourceforgeError(Exception):
"""An error occurred when making a request to the Sourceforge API"""
class SourceforgeHandler(BaseHandler):
"""
The Sourceforge API has some tricky stuff in it - some sections are fed
via xml/rss, some are via json. As of 03/16/2011, the xml API is the most
up-to-date, but a bug has been opened to fix the json side. This API is
on hold until it is fixed.
"""
title = "Sourceforge"
url_regex = "https://sourceforge.net/"
url = "https://sourceforge.net"
repo_regex = r'https://sourceforge.com/[\w\-\_]+/([\w\-\_]+)/{0,1}'
slug_regex = r'https://sourceforge.com/[\w\-\_]+/([\w\-\_]+)/{0,1}'
def fetch_metadata(self, package):
sourceforge = '';
repo_name = package.repo_name()
target = API_TARGET + "/projects/name/" + repo_name
if not target.endswith("/"):
target += "/"
# sourceforge project API requires ending with /doap/
target += "json/"
# open the target and read the content
response = urlopen(target)
response_text = response.read()
# dejson the results
try:
data = json.loads(response_text)
except jason.decoder.JSONDecodeError:
raise SourceforgeError("unexpected response from sourceforge.net %d: %r" % (
response.status, response_text))
# sourceforge has both developers and maintainers in a list
participants = data.get("developers").append(data.get("maintainers"))
package.participants = [p['name'] for p in participants]
package.repo_description = data.get("description")
project_name = _name_from_pypi_home_page(package.pypi_home_page)
# dejsonify the results
try:
sf_package_data = _get_project_data(project_name)
except json.decoder.JSONDecodeError:
message = "%s had a JSONDecodeError while loading %s" % (package.title,
package_json_path)
warn(message)
return package
package.repo_watchers = len(sf_package_data.get('maintainers', [])) + len(sf_package_data.get('developers', []))
package.repo_description = sf_package_data.get('description', '')
# TODO - remove the line below and use repo_url as your foundation
package.repo_url = _get_repo_url(sf_package_data)
package.repo_forks = None
return package
repo_handler = SourceforgeHandler()
| mit |
pombredanne/django-avocado | avocado/modeltree.py | 1 | 22222 | import inspect
from django.db import models
from django.db.models import Q
from django.core.exceptions import ImproperlyConfigured
from avocado.conf import settings
__all__ = ('ModelTree',)
DEFAULT_MODELTREE_ALIAS = 'default'
class ModelTreeNode(object):
def __init__(self, model, parent=None, rel_type=None, rel_reversed=None,
related_name=None, accessor_name=None, depth=0):
"""Defines attributes of a `model' and the relationship to the parent
model.
`name' - the `model's class name
`db_table' - the model's database table name
`pk_field' - the model's primary key field
`parent' - a reference to the parent ModelTreeNode
`parent_model' - a reference to the `parent' model, since it may be
None
`rel_type' - denotes the _kind_ of relationship with the
following possibilities: 'manytomany', 'onetoone', or 'foreignkey'.
`rel_reversed' - denotes whether this node was derived from a
forward relationship (an attribute lives on the parent model) or
a reverse relationship (an attribute lives on this model).
`related_name' - is the query string representation which is used
when querying via the ORM.
`accessor_name' - can be used when accessing the model object's
attributes e.g. getattr(obj, accessor_name). this is relative to
the parent model.
`depth' - the depth of this node relative to the root (zero-based
index)
`children' - a list containing the child nodes
"""
self.model = model
self.name = model.__name__
self.db_table = model._meta.db_table
self.pk_field = model._meta.pk.column
self.parent = parent
self.parent_model = parent and parent.model or None
self.rel_type = rel_type
self.rel_reversed = rel_reversed
self.related_name = related_name
self.accessor_name = accessor_name
self.depth = depth
self.children = []
def __str__(self):
return '%s via %s' % (self.name, self.parent_model.__name__)
def _get_m2m_db_table(self):
f = getattr(self.parent_model, self.accessor_name)
if self.rel_reversed:
return f.related.field.m2m_db_table()
else:
return f.field.m2m_db_table()
m2m_db_table = property(_get_m2m_db_table)
def _get_m2m_field(self):
f = getattr(self.parent_model, self.accessor_name)
if self.rel_reversed:
return f.related.field.m2m_column_name()
else:
return f.field.m2m_column_name()
m2m_field = property(_get_m2m_field)
def _get_m2m_reverse_field(self):
f = getattr(self.parent_model, self.accessor_name)
if self.rel_reversed:
return f.related.field.m2m_reverse_name()
else:
return f.field.m2m_reverse_name()
m2m_reverse_field = property(_get_m2m_reverse_field)
def _get_foreignkey_field(self):
f = getattr(self.parent_model, self.accessor_name)
if self.rel_reversed:
return f.related.field.column
else:
return f.field.column
foreignkey_field = property(_get_foreignkey_field)
def _get_join_connections(self):
"""Returns a list of connections that need to be added to a
QuerySet object that properly joins this model and the parent.
"""
if not hasattr(self, '_join_connections'):
connections = []
# setup initial FROM clause
connections.append((None, self.parent.db_table, None, None))
# setup two connections for m2m
if self.rel_type == 'manytomany':
c1 = (
self.parent.db_table,
self.m2m_db_table,
self.parent.pk_field,
self.rel_reversed and self.m2m_reverse_field or \
self.m2m_field,
)
c2 = (
self.m2m_db_table,
self.db_table,
self.rel_reversed and self.m2m_field or \
self.m2m_reverse_field,
self.pk_field,
)
connections.append(c1)
connections.append(c2)
else:
c1 = (
self.parent.db_table,
self.db_table,
self.rel_reversed and self.parent.pk_field or \
self.foreignkey_field,
self.rel_reversed and self.foreignkey_field or \
self.parent.pk_field,
)
connections.append(c1)
self._join_connections = connections
return self._join_connections
join_connections = property(_get_join_connections)
def remove_child(self, model):
for i, cnode in enumerate(self.children):
if cnode.model is model:
return self.children.pop(i)
class ModelTree(object):
"""A class to handle building and parsing a tree structure given a model.
`root_model' - the model of interest in which everything is relatively
defined
`exclude' - a list of models that are not to be added to the tree
"""
def __init__(self, root_model, exclude=(), routes=()):
self.root_model = self._get_model(root_model)
self.exclude = map(self._get_model, exclude)
self._rts, self._tos = self._build_routes(routes)
self._tree_hash = {}
def check(self, queryset):
if queryset.model is self.root_model:
return True
return False
def _get_model(self, label):
# model class
if inspect.isclass(label) and issubclass(label, models.Model):
return label
# passed as a label string
elif isinstance(label, basestring):
app_label, model_label = label.lower().split('.')
model = models.get_model(app_label, model_label)
if model:
return model
raise TypeError, 'model "%s" could not be found' % label
def _build_routes(self, routes):
"""
Routes provide a means of specifying JOINs between two tables.
The minimum information necessary to define an explicit JOIN is as
follows:
'from_label' - defines the model on the right side of the join
'to_label' - defines the model on the left side of the join
'join_field' - defines the field in which the join will occur
'symmetrical' - defines whether the same join will be constructed
if the 'from_model' and 'to_model' are reversed
"""
rts = {}
tos = {}
for route in routes:
# unpack
from_label, to_label, join_field, symmetrical = route
# get models
from_model = self._get_model(from_label)
to_model = self._get_model(to_label)
# get field
if join_field is not None:
model_name, field_name = join_field.split('.')
model_name = model_name.lower()
if model_name == from_model.__name__.lower():
field = from_model._meta.get_field_by_name(field_name)[0]
elif model_name == to_model.__name__.lower():
field = to_model._meta.get_field_by_name(field_name)[0]
else:
raise TypeError, 'model for join_field, "%s", does not match' % field_name
if field is None:
raise TypeError, 'field "%s" not found'
else:
field = None
if field:
rts[(from_model, to_model)] = field
if symmetrical:
rts[(to_model, from_model)] = field
else:
tos[to_model] = from_model
return rts, tos
def _filter_one2one(self, field):
"""Tests if this field is a OneToOneField. If a route exists for this
field's model and it's target model, ensure this is the field that
should be used to join the the two tables.
"""
if isinstance(field, models.OneToOneField):
# route has been defined with a specific field required
tup = (field.model, field.rel.to)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return field
def _filter_related_one2one(self, rel):
"""Tests if this RelatedObject represents a OneToOneField. If a route
exists for this field's model and it's target model, ensure this is
the field that should be used to join the the two tables.
"""
field = rel.field
if isinstance(field, models.OneToOneField):
# route has been defined with a specific field required
tup = (rel.model, field.model)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return rel
def _filter_fk(self, field):
"""Tests if this field is a ForeignKey. If a route exists for this
field's model and it's target model, ensure this is the field that
should be used to join the the two tables.
"""
if isinstance(field, models.ForeignKey):
# route has been defined with a specific field required
tup = (field.model, field.rel.to)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return field
def _filter_related_fk(self, rel):
"""Tests if this RelatedObject represents a ForeignKey. If a route
exists for this field's model and it's target model, ensure this is
the field that should be used to join the the two tables.
"""
field = rel.field
if isinstance(field, models.ForeignKey):
# route has been defined with a specific field required
tup = (rel.model, field.model)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return rel
def _filter_m2m(self, field):
"""Tests if this field is a ManyToManyField. If a route exists for this
field's model and it's target model, ensure this is the field that
should be used to join the the two tables.
"""
if isinstance(field, models.ManyToManyField):
# route has been defined with a specific field required
tup = (field.model, field.rel.to)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return field
def _filter_related_m2m(self, rel):
"""Tests if this RelatedObject represents a ManyToManyField. If a route
exists for this field's model and it's target model, ensure this is
the field that should be used to join the the two tables.
"""
field = rel.field
if isinstance(field, models.ManyToManyField):
# route has been defined with a specific field required
tup = (rel.model, field.model)
# skip if not the correct field
if self._rts.has_key(tup) and self._rts.get(tup) is not field:
return
return rel
def _add_node(self, parent, model, rel_type, rel_reversed, related_name,
accessor_name, depth):
"""Adds a node to the tree only if a node of the same `model' does not
already exist in the tree with smaller depth. If the node is added, the
tree traversal continues finding the node's relations.
Conditions in which the node will fail to be added:
- the model is excluded completely
- the model is going back the same path it came from
- the model is circling back to the root_model
- the model does not come from the parent.model (via _tos)
"""
exclude = set(self.exclude + [parent.parent_model, self.root_model])
# ignore excluded models and prevent circular paths
if model in exclude:
return
# if a route exists, only allow the model to be added if coming from the
# specified parent.model
if self._tos.has_key(model) and self._tos.get(model) is not parent.model:
return
node_hash = self._tree_hash.get(model, None)
# don't add node if a path with a shorter depth exists. this is applied
# after the correct join has been determined. generally if a route is
# defined for relation, this will never be an issue since there would
# only be one path available. if a route is not defined, the shorter
# path will be found
if not node_hash or node_hash['depth'] > depth:
if node_hash:
node_hash['parent'].remove_child(model)
node = ModelTreeNode(model, parent, rel_type, rel_reversed,
related_name, accessor_name, depth)
self._tree_hash[model] = {'parent': parent, 'depth': depth,
'node': node}
node = self._find_relations(node, depth)
parent.children.append(node)
del node
def _find_relations(self, node, depth=0):
"""Finds all relations given a node.
NOTE: the many-to-many relations are evaluated first to prevent
'through' models being bound as a ForeignKey relationship.
"""
depth += 1
model = node.model
# determine relational fields to determine paths
forward_fields = model._meta.fields
reverse_fields = model._meta.get_all_related_objects()
forward_o2o = filter(self._filter_one2one, forward_fields)
reverse_o2o = filter(self._filter_related_one2one, reverse_fields)
forward_fk = filter(self._filter_fk, forward_fields)
reverse_fk = filter(self._filter_related_fk, reverse_fields)
forward_m2m = filter(self._filter_m2m, model._meta.many_to_many)
reverse_m2m = filter(self._filter_related_m2m, model._meta.get_all_related_many_to_many_objects())
# iterate m2m relations
for f in forward_m2m:
kwargs = {
'parent': node,
'model': f.rel.to,
'rel_type': 'manytomany',
'rel_reversed': False,
'related_name': f.name,
'accessor_name': f.name,
'depth': depth,
}
self._add_node(**kwargs)
# iterate over related m2m fields
for r in reverse_m2m:
kwargs = {
'parent': node,
'model': r.model,
'rel_type': 'manytomany',
'rel_reversed': True,
'related_name': r.field.related_query_name(),
'accessor_name': r.get_accessor_name(),
'depth': depth,
}
self._add_node(**kwargs)
# iterate over one2one fields
for f in forward_o2o:
kwargs = {
'parent': node,
'model': f.rel.to,
'rel_type': 'onetoone',
'rel_reversed': False,
'related_name': f.name,
'accessor_name': f.name,
'depth': depth,
}
self._add_node(**kwargs)
# iterate over related one2one fields
for r in reverse_o2o:
kwargs = {
'parent': node,
'model': r.model,
'rel_type': 'onetoone',
'rel_reversed': True,
'related_name': r.field.related_query_name(),
'accessor_name': r.get_accessor_name(),
'depth': depth,
}
self._add_node(**kwargs)
# iterate over fk fields
for f in forward_fk:
kwargs = {
'parent': node,
'model': f.rel.to,
'rel_type': 'foreignkey',
'rel_reversed': False,
'related_name': f.name,
'accessor_name': f.name,
'depth': depth,
}
self._add_node(**kwargs)
# iterate over related foreign keys
for r in reverse_fk:
kwargs = {
'parent': node,
'model': r.model,
'rel_type': 'foreignkey',
'rel_reversed': True,
'related_name': r.field.related_query_name(),
'accessor_name': r.get_accessor_name(),
'depth': depth,
}
self._add_node(**kwargs)
return node
def _get_root_node(self):
"Sets the `root_node' and implicitly builds the entire tree."
if not hasattr(self, '_root_node'):
node = ModelTreeNode(self.root_model)
self._root_node = self._find_relations(node)
self._tree_hash[self.root_model] = {'parent': None, 'depth': 0,
'node': self._root_node}
return self._root_node
root_node = property(_get_root_node)
def _find_path(self, model, node, node_path=[]):
if node.model == model:
return node_path
for cnode in node.children:
mpath = self._find_path(model, cnode, node_path + [cnode])
if mpath:
return mpath
def path_to(self, model):
"Returns a list of nodes thats defines the path of traversal."
model = self._get_model(model)
return self._find_path(model, self.root_node)
def path_to_with_root(self, model):
"""Returns a list of nodes thats defines the path of traversal
including the root node.
"""
model = self._get_model(model)
return self._find_path(model, self.root_node, [self.root_node])
def get_node_by_model(self, model):
"Finds the node with the specified model."
model = self._get_model(model)
if not self._tree_hash:
self.root_node
val = self._tree_hash.get(model, None)
if val is None:
return
return val['node']
def query_string(self, node_path, field_name, operator=None):
"Returns a query string given a path"
toks = [n.related_name for n in node_path] + [field_name]
if operator is not None:
toks.append(operator)
return str('__'.join(toks))
def q(self, node_path, field_name, value, operator=None):
"Returns a Q object."
key = self.query_string(node_path, field_name, operator)
return Q(**{key: value})
def accessor_names(self, node_path):
"""Returns a list of the accessor names given a list of nodes. This is
most useful when needing to dynamically access attributes starting from
an instance of the `root_node' object.
"""
return [n.accessor_name for n in node_path]
def get_all_join_connections(self, node_path):
"""Returns a list of JOIN connections that can be manually applied to a
QuerySet object, e.g.:
queryset = SomeModel.objects.all()
modeltree = ModelTree(SomeModel)
nodes = modeltree.path_to(SomeOtherModel)
conns = modeltree.get_all_join_connections(nodes)
for c in conns:
queryset.query.join(c, promote=True)
This allows for the ORM to handle setting up the JOINs which may be
different depending the QuerySet being altered.
"""
connections = []
for i,node in enumerate(node_path):
if i == 0:
connections.extend(node.join_connections)
else:
connections.extend(node.join_connections[1:])
return connections
def add_joins(self, model, queryset, **kwargs):
model = self._get_model(model)
clone = queryset._clone()
nodes = self.path_to(model)
conns = self.get_all_join_connections(nodes)
for c in conns:
clone.query.join(c, **kwargs)
return clone
def print_path(self, node=None, depth=0):
"Traverses the entire tree and prints a hierarchical view to stdout."
if node is None:
node = self.root_node
if node:
print '- ' * depth * 2, '"%s"' % node.name, 'at a depth of', node.depth
if node.children:
depth += 1
for x in node.children:
self.print_path(x, depth)
def get_accessor_pairs(self, node_path):
"Used for testing purposes."
accessor_names = self.accessor_names(node_path)
node_path = node_path[:-1] # don't need the last item
if len(node_path) == 0 or node_path[0] is not self.root_node:
node_path = [self.root_node] + node_path
else:
accessor_names = accessor_names[1:]
return zip(node_path, accessor_names)
def get_queryset(self):
"Returns a QuerySet relative to the ``root_model``."
return self.root_model.objects.all()
class LazyModelTree(object):
def __init__(self, modeltrees):
self.modeltrees = modeltrees
self._modeltrees = {}
def __getitem__(self, alias):
if not self.modeltrees:
raise ImproperlyConfigured, 'You must at least specify the "%s" ' \
'modeltree config' % DEFAULT_MODELTREE_ALIAS
if alias not in self._modeltrees:
try:
kwargs = self.modeltrees[alias]
except KeyError:
raise KeyError, 'No modeltree settings defined for "%s"' % alias
self._modeltrees[alias] = ModelTree(**kwargs)
return self._modeltrees[alias]
trees = LazyModelTree(settings.MODELTREES)
| bsd-3-clause |
bryan-lunt/execnet | doc/example/taskserver.py | 2 | 1281 |
import execnet
group = execnet.Group()
for i in range(4): # 4 CPUs
group.makegateway()
def process_item(channel):
# task processor, sits on each CPU
import time
import random
channel.send("ready")
for x in channel:
if x is None: # we can shutdown
break
# sleep random time, send result
time.sleep(random.randrange(3))
channel.send(x*10)
# execute taskprocessor everywhere
mch = group.remote_exec(process_item)
# get a queue that gives us results
q = mch.make_receive_queue(endmarker=-1)
tasks = range(10) # a list of tasks, here just integers
terminated = 0
while 1:
channel, item = q.get()
if item == -1:
terminated += 1
print "terminated %s" % channel.gateway.id
if terminated == len(mch):
print "got all results, terminating"
break
continue
if item != "ready":
print "other side %s returned %r" % (channel.gateway.id, item)
if not tasks:
print "no tasks remain, sending termination request to all"
mch.send_each(None)
tasks = -1
if tasks and tasks != -1:
task = tasks.pop()
channel.send(task)
print "sent task %r to %s" % (task, channel.gateway.id)
group.terminate()
| mit |
wkentaro/chainer | tests/chainer_tests/dataset_tests/tabular_tests/test_join.py | 5 | 3492 | import unittest
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
def _filter_params(params):
for param in params:
key_size = 0
key_size += 3 if param['mode_a'] else 1
key_size += 2 if param['mode_b'] else 1
if param['key_indices'] and \
any(key_size <= key_index for key_index in param['key_indices']):
continue
yield param
@testing.parameterize(*_filter_params(testing.product({
'mode_a': [tuple, dict, None],
'mode_b': [tuple, dict, None],
'return_array': [True, False],
'key_indices': [None, (0, 4, 1), (0, 2), (1, 0), ()],
})))
class TestJoin(unittest.TestCase):
def setUp(self):
if self.key_indices is None:
self.expected_key_indices_a = None
self.expected_key_indices_b = None
return
key_size_a = 3 if self.mode_a else 1
key_indices_a = tuple(
key_index
for key_index in self.key_indices
if key_index < key_size_a)
key_indices_b = tuple(
key_index - key_size_a
for key_index in self.key_indices
if key_size_a <= key_index)
if key_indices_a:
self.expected_key_indices_a = key_indices_a
if key_indices_b:
self.expected_key_indices_b = key_indices_b
def test_join(self):
def callback_a(indices, key_indices):
self.assertIsNone(indices)
self.assertEqual(key_indices, self.expected_key_indices_a)
dataset_a = dummy_dataset.DummyDataset(
mode=self.mode_a,
return_array=self.return_array, callback=callback_a,
convert=True)
def callback_b(indices, key_indices):
self.assertIsNone(indices)
self.assertEqual(key_indices, self.expected_key_indices_b)
dataset_b = dummy_dataset. DummyDataset(
keys=('d', 'e'), mode=self.mode_b,
return_array=self.return_array, callback=callback_b)
view = dataset_a.join(dataset_b)
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset_a))
self.assertEqual(view.keys, dataset_a.keys + dataset_b.keys)
self.assertEqual(view.mode, dataset_a.mode or dataset_b.mode or tuple)
output = view.get_examples(None, self.key_indices)
data = np.vstack((dataset_a.data, dataset_b.data))
if self.key_indices is not None:
data = data[list(self.key_indices)]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.return_array:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(output), 'converted')
class TestJoinInvalid(unittest.TestCase):
def test_join_length(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(size=5, keys=('d', 'e'))
with self.assertRaises(ValueError):
dataset_a.join(dataset_b)
def test_join_conflict_key(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(keys=('a', 'd'))
with self.assertRaises(ValueError):
dataset_a.join(dataset_b)
testing.run_module(__name__, __file__)
| mit |
ohio813/pyflag | src/plugins/Flash/ExportCommands.py | 7 | 3391 | """ These commands allow us to create reports from pyflash """
import pyflag.pyflagsh as pyflagsh
import pyflag.conf
config=pyflag.conf.ConfObject()
import pyflag.Registry as Registry
import pyflag.TEXTUI as TEXTUI
class export(pyflagsh.command):
""" Export a custom report """
long_opts = ['filter=']
def help(self):
return "export filename [ column ... ] [ options ]. Exports the table constructed by columns into the filename provided. Columns must be specified in fully qualified form (table.column name) and options must be specified in standard form too (key=value)."
def execute(self):
## Derive an element list
elements = []
for t in self.args[2:]:
yield t
if '.' in t:
class_name , column_name = t.split(".")
cls = Registry.CASE_TABLES.dispatch(class_name)()
elements.append(cls.bind_column(self.environment._CASE, column_name))
elif "=" in t:
key,value = t.split("=",1)
print t
self.opts.set(key, value)
exporter = Registry.TABLE_RENDERERS.dispatch(self.args[1])
exporter = exporter(elements=elements,
case=self.environment._CASE)
## Set the filename
self.opts.set('filename', self.args[0])
self.opts.set('include_extra_files',1)
self.opts.set('explain_inodes',1)
## Render it
ui = TEXTUI.TEXTUI(query=self.opts)
## Render the table:
exporter.render(self.opts, ui)
try:
for i in ui.generator.generator:
print i
except TypeError:
print ui
#return ui.generator
def complete(self, text, state):
args = self.args
if len(args)<2: return
if len(args)==2 or (text and len(args)==3):
exporters = [t for t in Registry.TABLE_RENDERERS.class_names_ex if t.startswith(text)]
return exporters[state]
elif '.' in text:
## complete the column name
table, column = text.split(".")
tbl = Registry.CASE_TABLES.dispatch(table)()
if tbl:
columns = [ c.name for c in tbl.instantiate_columns() if c.name.startswith(column)]
return "%s.%s" % (table,columns[state])
else:
tables = [ t for t in Registry.CASE_TABLES.class_names_ex if t.startswith(text)]
return tables[state]
import pyflag.tests
import pyflag.pyflagsh as pyflagsh
class HTMLExportTest(pyflag.tests.ScannerTest):
""" Test that exporting a HTML directory works """
test_case = "PyFlagTestCase"
test_file = "pyflag_stdimage_0.5.e01"
subsystem = 'EWF'
offset = "16128s"
def test01TypeScan(self):
""" Check the type scanner works """
env = pyflagsh.environment(case=self.test_case)
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'TypeScan'])
pyflagsh.shell_execv(env=env, command="export",
argv=["Images","HTMLDirectoryRenderer",
"TypeCaseTable.Thumbnail",
"TypeCaseTable.Type","InodeTable.Size",
#'filter=Type contains JPEG',
])
| gpl-2.0 |
gaiaresources/biosys | biosys/apps/main/tests/api/test_observation.py | 2 | 78736 | import datetime
import io
import json
import re
from os import path
from django.contrib.gis.geos import Point
from django.urls import reverse
from django.utils import timezone
from openpyxl import load_workbook
from rest_framework import status
from main import constants
from main.models import Site, Dataset, Record
from main.tests import factories
from main.tests.api import helpers
from main.tests.test_data_package import clone
class TestPermissions(helpers.BaseUserTestCase):
"""
Test Permissions
Get: authenticated
Update: admin, data_engineer, custodians
Create: admin, data_engineer, custodians
Delete: admin, data_engineer, custodians
"""
def setUp(self):
super(TestPermissions, self).setUp()
rows = [
['What', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
self.ds_1 = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(self.ds_1.type, Dataset.TYPE_OBSERVATION)
self.record_1 = self.ds_1.record_set.first()
self.assertIsNotNone(self.record_1)
def test_get(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': self.record_1.pk})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.admin_client,
self.data_engineer_1_client,
self.data_engineer_2_client
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.get(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.get(url).status_code,
status.HTTP_200_OK
)
def test_create(self):
"""
Admin, custodians and data engineers
:return:
"""
urls = [reverse('api:record-list')]
ds = self.ds_1
rec = self.record_1
data = {
"dataset": rec.dataset.pk,
"data": rec.data,
"datetime": rec.datetime,
"geometry": rec.geometry.geojson
}
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_2_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
self.custodian_1_client
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
def test_bulk_create(self):
"""
Cannot create bulk with this end point
:return:
"""
urls = [reverse('api:record-list')]
rec = self.record_1
ds = self.ds_1
data = [
{
"dataset": rec.dataset.pk,
"data": rec.data
},
{
"dataset": rec.dataset.pk,
"data": rec.data
}
]
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_2_client,
self.admin_client,
self.custodian_1_client,
self.data_engineer_1_client,
self.data_engineer_2_client
],
"allowed": []
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.post(url, data, format='json').status_code,
[status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + len(data))
def test_update(self):
"""
admin + custodian of project for site 1
:return:
"""
rec = self.record_1
previous_data = clone(rec.data)
updated_data = clone(previous_data)
updated_data['Longitude'] = '118.78'
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = {
"data": updated_data,
}
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_2_client,
self.data_engineer_2_client
],
"allowed": [self.admin_client, self.custodian_1_client, self.data_engineer_1_client]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.patch(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.data = previous_data
rec.save()
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_200_OK
)
rec.refresh_from_db()
self.assertEqual(rec.data, updated_data)
def test_delete(self):
"""
Currently admin, custodians and data engineers
:return:
"""
rec = self.record_1
urls = [reverse('api:record-detail', kwargs={'pk': rec.pk})]
data = None
access = {
"forbidden": [
self.anonymous_client,
self.readonly_client,
self.custodian_2_client,
self.data_engineer_2_client
],
"allowed": [
self.admin_client,
self.custodian_1_client,
self.data_engineer_1_client
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.delete(url, data, format='json').status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
for client in access['allowed']:
for url in urls:
rec.save()
count = Dataset.objects.count()
self.assertEqual(
client.delete(url, data, format='json').status_code,
status.HTTP_204_NO_CONTENT
)
self.assertTrue(Dataset.objects.count(), count - 1)
def test_options(self):
urls = [
reverse('api:record-list'),
reverse('api:record-detail', kwargs={'pk': 1})
]
access = {
"forbidden": [self.anonymous_client],
"allowed": [
self.readonly_client,
self.custodian_1_client,
self.custodian_2_client,
self.admin_client,
self.data_engineer_1_client,
self.data_engineer_2_client
]
}
for client in access['forbidden']:
for url in urls:
self.assertIn(
client.options(url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
# authenticated
for client in access['allowed']:
for url in urls:
self.assertEqual(
client.options(url).status_code,
status.HTTP_200_OK
)
class TestDataValidation(helpers.BaseUserTestCase):
def setUp(self):
super(TestDataValidation, self).setUp()
self.ds_1 = self._create_dataset_with_schema(
self.project_1,
self.data_engineer_1_client,
self.observation_schema_with_with_all_possible_geometry_fields(),
dataset_type=Dataset.TYPE_OBSERVATION
)
# set the date
self.record_1 = self._create_record(
self.custodian_1_client,
self.ds_1,
{
'What': 'Chubby bat',
'When': '2018-06-30',
'Latitude': -32.0,
'Longitude': 115.75
}
)
self.assertIsNotNone(self.record_1)
def test_create_one_happy_path(self):
"""
Test the create of one record
:return:
"""
# grab one existing an re-inject it
record = self.record_1
ds = self.ds_1
data = {
"dataset": record.dataset.pk,
"data": record.data
}
url = reverse('api:record-list')
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
def test_empty_not_allowed(self):
ds = self.ds_1
record = self.record_1
data = {
"dataset": record.dataset.pk,
"data": {}
}
url = reverse('api:record-list')
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_create_column_not_in_schema(self):
"""
Test that if we introduce a column not in the the dataset it will not validate
:return:
"""
ds = self.ds_1
record = self.record_1
incorrect_data = clone(record.data)
incorrect_data['Extra Column'] = "Extra Value"
data = {
"dataset": record.dataset.pk,
"data": incorrect_data
}
url = reverse('api:record-list')
# set strict mode
url = helpers.set_strict_mode(url)
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_update_column_not_in_schema(self):
"""
Test that if we introduce a column not in the the dataset it will not validate
:return:
"""
ds = self.ds_1
record = self.record_1
incorrect_data = clone(record.data)
incorrect_data['Extra Column'] = "Extra Value"
data = {
"dataset": record.dataset.pk,
"data": incorrect_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
count = ds.record_queryset.count()
# set strict mode
url = helpers.set_strict_mode(url)
self.assertEqual(
client.put(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
self.assertEqual(
client.patch(url, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_date_error(self):
"""
Test date values
:return:
"""
ds = self.ds_1
record = self.record_1
date_column = ds.schema.observation_date_field.name
# ensure the date field is set as required
self.assertTrue(ds.schema.observation_date_field.required)
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = ['15/08/2008']
for value in valid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'abcd']
for value in invalid_values:
new_data[date_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
def test_geometry_error(self):
"""
An observation must have a valid geometry
:return:
"""
ds = self.ds_1
record = self.record_1
lat_column = ds.schema.latitude_field.name
new_data = clone(record.data)
url_post = reverse('api:record-list')
url_update = reverse('api:record-detail', kwargs={'pk': record.pk})
valid_values = [-34.125]
for value in valid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), count + 1)
invalid_values = [None, '', 'abcd']
for value in invalid_values:
new_data[lat_column] = value
data = {
"dataset": record.dataset.pk,
"data": new_data
}
client = self.custodian_1_client
count = ds.record_queryset.count()
self.assertEqual(
client.post(url_post, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.put(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
client.patch(url_update, data, format='json').status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(ds.record_queryset.count(), count)
class TestSiteExtraction(helpers.BaseUserTestCase):
def setUp(self):
super(TestSiteExtraction, self).setUp()
self.site_1 = factories.SiteFactory.create(
project=self.project_1,
code='COTT',
geometry="SRID=4326;"
"LINESTRING (124.18701171875 -17.6484375, 126.38427734375 -18.615234375, 123.35205078125 "
"-20.65869140625, 124.1650390625 -17.71435546875)",)
self.ds_1 = self._create_dataset_with_schema(
self.project_1,
self.data_engineer_1_client,
self.observation_schema_with_with_all_possible_geometry_fields(),
dataset_type=Dataset.TYPE_OBSERVATION
)
# set the date
self.record_1 = self._create_record(
self.custodian_1_client,
self.ds_1,
{
'What': 'Chubby bat',
'When': '2018-06-30',
'Site Code': 'COTT',
}
)
self.assertIsNotNone(self.record_1)
def test_create_with_site(self):
"""
The descriptor contains a foreign key to the site.
Test that the site is extracted from the data
:return:
"""
# clear all records
ds = self.ds_1
ds.record_queryset.delete()
self.assertEqual(ds.record_queryset.count(), 0)
record = self.record_1
data = {
"dataset": record.dataset.pk,
"data": record.data
}
schema = ds.schema
self.assertTrue(schema.has_fk_for_model('Site'))
expected_site = record.site
url = reverse('api:record-list')
client = self.custodian_1_client
self.assertEqual(
client.post(url, data, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(ds.record_queryset.count(), 1)
self.assertEqual(ds.record_queryset.first().site, expected_site)
def test_update_site(self):
ds = self.ds_1
record = self.record_1
site = factories.SiteFactory.create(code='NEW-SITE', project=self.project_1, geometry=Point(117, 33))
# need to test if the site belongs to the dataset project or the update won't happen
self.assertIsNotNone(site)
self.assertEqual(site.project, record.dataset.project)
self.assertNotEqual(record.site, site)
# update site value
schema = record.dataset.schema
site_column = schema.get_fk_for_model('Site').data_field
self.assertIsNotNone(site_column)
r_data = record.data
r_data[site_column] = site.code
data = {
"data": r_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
resp = client.patch(url, data, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertEqual(record.site, site)
class TestDateTimeAndGeometryExtraction(helpers.BaseUserTestCase):
@staticmethod
def schema_with_lat_long_and_date():
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"biosys": {
"type": "latitude"
},
"constraints": {
"required": True,
"minimum": -90.0,
"maximum": 90.0,
}
},
{
"name": "Longitude",
"type": "number",
"biosys": {
"type": "longitude"
},
"constraints": {
"required": True,
"minimum": -180.0,
"maximum": 180.0,
}
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
@staticmethod
def schema_with_no_date():
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Latitude",
"type": "number",
"biosys": {
"type": "latitude"
},
"constraints": {
"required": True,
"minimum": -90.0,
"maximum": 90.0,
}
},
{
"name": "Longitude",
"type": "number",
"biosys": {
"type": "longitude"
},
"constraints": {
"required": True,
"minimum": -180.0,
"maximum": 180.0,
}
},
]
schema = helpers.create_schema_from_fields(schema_fields)
return schema
def test_create(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_lat_long_and_date()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema,
dataset_type=Dataset.TYPE_OBSERVATION)
self.assertEqual(dataset.record_queryset.count(), 0)
record_data = {
'What': 'A test',
'When': '01/06/2017',
'Latitude': -32.0,
'Longitude': 116.0
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
expected_date = datetime.date(2017, 6, 1)
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(dataset.record_queryset.count(), 1)
record = dataset.record_queryset.first()
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual(geometry.x, 116.0)
self.assertEqual(geometry.y, -32.0)
def test_update(self):
"""
Test that the date and geometry are extracted from the data
and saved in DB
:return:
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_lat_long_and_date()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertEqual(dataset.record_queryset.count(), 0)
record_data = {
'What': 'A test',
'When': '01/06/2017',
'Latitude': -32.0,
'Longitude': 116.0
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
record = dataset.record_queryset.first()
# change date
new_date = '20/4/2016'
# change lat/lon
new_long = 111.111
new_lat = 22.222
record_data = {
'When': new_date,
'Latitude': new_lat,
'Longitude': new_long
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, data=payload, format='json').status_code,
status.HTTP_200_OK
)
self.assertEqual(dataset.record_queryset.count(), 1)
record.refresh_from_db()
expected_date = datetime.date(2016, 4, 20)
self.assertEqual(timezone.localtime(record.datetime).date(), expected_date)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual(geometry.x, new_long)
self.assertEqual(geometry.y, new_lat)
def test_create_without_date(self):
"""
As of 29/06/2017. Date are not mandatory to create a Observation record
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_no_date()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION)
self.assertEqual(dataset.record_queryset.count(), 0)
record_data = {
'What': 'A test',
'Latitude': -32.0,
'Longitude': 116.0
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
self.assertEqual(dataset.record_queryset.count(), 1)
record = dataset.record_queryset.first()
self.assertIsNone(record.datetime)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual(geometry.x, 116.0)
self.assertEqual(geometry.y, -32.0)
def test_update_without_date(self):
"""
As of 29/06/2017. Date are not mandatory to create a Observation record
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_no_date()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertEqual(dataset.record_queryset.count(), 0)
record_data = {
'What': 'A test',
'Latitude': -32.0,
'Longitude': 116.0
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
url = reverse('api:record-list')
self.assertEqual(
client.post(url, payload, format='json').status_code,
status.HTTP_201_CREATED
)
record = dataset.record_queryset.first()
new_long = 111.111
new_lat = 22.222
record_data = {
'Latitude': new_lat,
'Longitude': new_long
}
payload = {
"dataset": dataset.pk,
"data": record_data
}
url = reverse('api:record-detail', kwargs={"pk": record.pk})
self.assertEqual(
client.patch(url, data=payload, format='json').status_code,
status.HTTP_200_OK
)
self.assertEqual(dataset.record_queryset.count(), 1)
record.refresh_from_db()
self.assertIsNone(record.datetime)
geometry = record.geometry
self.assertIsInstance(geometry, Point)
self.assertEqual(geometry.x, new_long)
self.assertEqual(geometry.y, new_lat)
class TestEastingNorthing(helpers.BaseUserTestCase):
"""
Use case: the schema contains a datum and a zone field and easting/northing.
"""
@staticmethod
def schema_with_easting_northing():
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Northing",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
"type": "northing"
}
},
{
"name": "Easting",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
"type": "easting"
}
},
{
"name": "Datum",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "Zone",
"type": "integer",
"constraints": helpers.REQUIRED_CONSTRAINTS
}
]
return helpers.create_schema_from_fields(schema_fields)
def test_create_happy_path(self):
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_easting_northing()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
easting = 405542.537
northing = 6459127.469
datum = 'GDA94'
zone = 50
record_data = {
'What': 'Chubby Bat',
'When': '12/12/2017',
'Easting': easting,
'Northing': northing,
'Datum': datum,
'Zone': zone
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
qs = dataset.record_queryset
self.assertEqual(qs.count(), 1)
record = qs.first()
geom = record.geometry
# should be in WGS84 -> srid = 4326
self.assertEqual(geom.srid, 4326)
# convert it back to GAD / zone 50 -> srid = 28350
geom.transform(28350)
# compare with 2 decimal place precision
self.assertAlmostEqual(geom.x, easting, places=2)
self.assertAlmostEqual(geom.y, northing, places=2)
def test_update_happy_path(self):
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_easting_northing()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
# first create record with wrong zone
easting = 405542.537
northing = 6459127.469
datum = 'GDA94'
zone = 58
record_data = {
'What': 'Chubby Bat',
'When': '12/12/2017',
'Easting': easting,
'Northing': northing,
'Datum': datum,
'Zone': zone
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
qs = dataset.record_queryset
self.assertEqual(qs.count(), 1)
record = qs.first()
geom = record.geometry
# should be in WGS84 -> srid = 4326
self.assertEqual(geom.srid, 4326)
# convert it back to GAD / zone 50 -> srid = 28350
geom.transform(28350)
# compare with 2 decimal place precision. Should be different that of expected
self.assertNotAlmostEqual(geom.x, easting, places=2)
self.assertNotAlmostEqual(geom.y, northing, places=2)
# send path to update the zone
record_data = {
'What': 'Chubby Bat',
'When': '12/12/2017',
'Easting': easting,
'Northing': northing,
'Datum': datum,
'Zone': 50
}
payload = {
'data': record_data
}
url = reverse('api:record-detail', kwargs={'pk': record.pk})
resp = client.patch(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
geom = record.geometry
# should be in WGS84 -> srid = 4326
self.assertEqual(geom.srid, 4326)
# convert it back to GAD / zone 50 -> srid = 28350
geom.transform(28350)
self.assertAlmostEqual(geom.x, easting, places=2)
self.assertAlmostEqual(geom.y, northing, places=2)
def test_default_datum(self):
"""
If only easting and northing are provided the project's datum/zone should be used
"""
project = self.project_1
srid = constants.get_datum_srid('GDA94 / MGA zone 50')
self.assertEqual(srid, 28350)
project.datum = srid
project.save()
client = self.custodian_1_client
# schema with datum and zone not required
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Northing",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
"type": "northing"
}
},
{
"name": "Easting",
"type": "number",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
"type": "easting"
}
},
{
"name": "Datum",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "Zone",
"type": "integer",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
}
]
schema = helpers.create_schema_from_fields(schema_fields)
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
easting = 405542.537
northing = 6459127.469
record_data = {
'What': 'Chubby Bat',
'When': '12/12/2017',
'Easting': easting,
'Northing': northing,
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list') + '?strict=true'
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
qs = dataset.record_queryset
self.assertEqual(qs.count(), 1)
record = qs.first()
geom = record.geometry
# should be in WGS84 -> srid = 4326
self.assertEqual(geom.srid, 4326)
self.assertIsInstance(geom, Point)
self.assertAlmostEqual(geom.x, 116, places=2)
self.assertAlmostEqual(geom.y, -32, places=2)
# convert it back to GAD / zone 50 -> srid = 28350
geom.transform(srid)
# compare with 2 decimal place precision
self.assertAlmostEqual(geom.x, easting, places=2)
self.assertAlmostEqual(geom.y, northing, places=2)
class TestGeometryFromSite(helpers.BaseUserTestCase):
"""
Use case: the observation dataset doesn't contain any geometry columns/fields
but a reference (foreign key) to the site code. In this case the when yhe user uploads observations with a site
reference only the observation geometry should be copied (not referenced) from the site geometry.
"""
@staticmethod
def schema_with_site_code_fk():
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Site Code",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
schema = helpers.add_model_field_foreign_key_to_schema(schema, {
'schema_field': 'Site Code',
'model': 'Site',
'model_field': 'code'
})
return schema
@staticmethod
def schema_with_latlong_and_site_code_fk():
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Latitude",
"type": "number",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": 'latitude'
}
},
{
"name": "Longitude",
"type": "number",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": 'longitude'
}
},
{
"name": "Site Code",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
schema = helpers.add_model_field_foreign_key_to_schema(schema, {
'schema_field': 'Site Code',
'model': 'Site',
'model_field': 'code'
})
return schema
def test_observation_schema_valid_with_site_foreign_key(self):
"""
An observation schema should be valid without geometry fields as long it has a foreign key to site.
"""
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Site Code",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
schema = helpers.add_model_field_foreign_key_to_schema(schema, {
'schema_field': 'Site Code',
'model': 'Site',
'model_field': 'code'
})
data_package = helpers.create_data_package_from_schema(schema)
# create data set
url = reverse('api:dataset-list')
project = self.project_1
client = self.data_engineer_1_client
dataset_name = "Observation with site foreign key and no geometry"
payload = {
"name": dataset_name,
"type": Dataset.TYPE_OBSERVATION,
"project": project.pk,
'data_package': data_package
}
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# double check
self.assertIsNotNone(Dataset.objects.filter(project=project, name=dataset_name).first())
def test_observation_schema_not_valid_with_other_foreign_key(self):
"""
only a foreign key to the site code is accepted.
"""
schema_fields = [
{
"name": "What",
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.REQUIRED_CONSTRAINTS,
"biosys": {
'type': 'observationDate'
}
},
{
"name": "Project", # project not site
"type": "string",
"constraints": helpers.REQUIRED_CONSTRAINTS
},
]
schema = helpers.create_schema_from_fields(schema_fields)
schema = helpers.add_model_field_foreign_key_to_schema(schema, {
'schema_field': 'Project', # project not site
'model': 'Project',
'model_field': 'title'
})
data_package = helpers.create_data_package_from_schema(schema)
# create data set
url = reverse('api:dataset-list')
project = self.project_1
client = self.data_engineer_1_client
dataset_name = "Observation with project foreign key and no geometry"
payload = {
"name": dataset_name,
"type": Dataset.TYPE_OBSERVATION,
"project": project.pk,
'data_package': data_package
}
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_geometry_extracted_create(self):
"""
Test that the record geometry is properly copied from the site when posting through api
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Site Code': site_code
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.site, site)
self.assertEqual(record.geometry, site_geometry)
def test_geometry_extracted_update(self):
"""
Test that the record geometry is properly copied from the site when updating/patching
"""
# create the record
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Site Code': site_code
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.geometry, site_geometry)
# update record with new site
site_code = 'Somewhere'
site_geometry = Point(116.0, -30.0)
# create the site
factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
record_data = {
'What': 'Yellow!',
'When': '01/01/2017',
'Site Code': site_code
}
payload = {
'data': record_data
}
url = reverse('api:record-detail', kwargs={'pk': record.pk})
resp = client.patch(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertIsNotNone(record)
self.assertEqual(timezone.make_naive(record.datetime), datetime.datetime(2017, 1, 1, 0, 0))
self.assertEqual(record.geometry, site_geometry)
def test_record_rejected_if_site_has_no_geometry_api(self):
"""
When using api
If the referenced site has no geometry the record should be rejected
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
site_code = 'Cottesloe'
# create the site
site = factories.SiteFactory(code=site_code, geometry=None, project=project)
self.assertIsNone(site.geometry)
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Site Code': site_code
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# check error
errors = resp.json().get('data')
# errors is of string of format 'field_name::message'
self.assertIsNotNone(errors)
self.assertTrue(isinstance(errors, list))
self.assertEqual(len(errors), 1)
field_name, message = errors[0].split('::')
self.assertEqual(field_name, 'Site Code')
# message should be something like:
expected_message = 'The site Cottesloe has no geometry'
self.assertEqual(message, expected_message)
def test_schema_with_lat_long_and_site_fk(self):
"""
Use case:
The schema contains a classic lat/lon fields and a site_code foreign key.
Test that:
1 - the lat/long provided takes precedence over the site geometry
2 - if lat/long not provided the site geometry is used
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_latlong_and_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.latitude_field)
self.assertIsNotNone(dataset.schema.longitude_field)
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
# the observation geometry different than the site geometry
observation_geometry = Point(site_geometry.x + 2, site_geometry.y + 2)
self.assertNotEqual(site.geometry, observation_geometry)
# lat/long + site
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Longitude': observation_geometry.x,
'Latitude': observation_geometry.y,
'Site Code': site_code
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.site, site)
self.assertEqual(record.geometry.geojson, observation_geometry.geojson)
# lat/long no site
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Longitude': observation_geometry.x,
'Latitude': observation_geometry.y,
'Site Code': None
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertIsNone(record.site)
self.assertEqual(record.geometry.geojson, observation_geometry.geojson)
# site without lat/long
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Longitude': None,
'Latitude': None,
'Site Code': site_code
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
self.assertEqual(record.site, site)
self.assertEqual(record.geometry.geojson, site_geometry.geojson)
# no lat/long no site -> error
record_data = {
'What': 'Hello! This is a test.',
'When': '12/12/2017',
'Longitude': None,
'Latitude': None,
'Site Code': None
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_geometry_extracted_upload(self):
"""
Test that the record geometry is properly copied from the site when using an xlsx upload
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
# create two sites
site_1_code = 'Cottesloe'
site_1_geometry = Point(115.76, -32.0)
site_1 = factories.SiteFactory(code=site_1_code, geometry=site_1_geometry, project=project)
site_2_code = 'Somewhere'
site_2_geometry = Point(116.0, -30.0)
# create the site
site_2 = factories.SiteFactory(code=site_2_code, geometry=site_2_geometry, project=project)
# data
csv_data = [
['What', 'When', 'Site Code'],
['what_1', '01/01/2017', site_1_code],
['what_2', '02/02/2017', site_2_code]
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, data=payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
r = [r for r in records if r.data['What'] == 'what_1'][0]
self.assertEqual(r.site, site_1)
self.assertEqual(r.geometry, site_1_geometry)
r = [r for r in records if r.data['What'] == 'what_2'][0]
self.assertEqual(r.site, site_2)
self.assertEqual(r.geometry, site_2_geometry)
def test_record_rejected_if_site_has_no_geometry_upload(self):
"""
When uploading with Excel
If the referenced site has no geometry the record should be rejected
"""
# same as above but site_2 has no geometry
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
# create two sites the number 2 without a geometry
site_1_code = 'Cottesloe'
site_1_geometry = Point(115.76, -32.0)
site_1 = factories.SiteFactory(code=site_1_code, geometry=site_1_geometry, project=project)
site_2_code = 'Somewhere'
site_2_geometry = None
factories.SiteFactory(code=site_2_code, geometry=site_2_geometry, project=project)
csv_data = [
['What', 'When', 'Site Code'],
['what_1', '01/01/2017', site_1_code],
['what_2', '02/02/2017', site_2_code]
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, data=payload, format='multipart')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# Check that the good record is there.
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), 1)
r = records.first()
self.assertEqual(r.site, site_1)
self.assertEqual(r.geometry, site_1_geometry)
def test_site_geometry_updated(self):
"""
Use case:
observations has been created with a site geometry, user update the site location.
user expect that the associated observations have their geometry updated.
This can only if the observations has the site as a FK (of course) and exactly the same geometry.
"""
project = self.project_1
client = self.custodian_1_client
schema = self.schema_with_site_code_fk()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
# create two sites
site_1_code = 'Cottesloe'
site_1_geometry = Point(115.76, -32.0)
site_1 = factories.SiteFactory(code=site_1_code, geometry=site_1_geometry, project=project)
site_2_code = 'Somewhere'
site_2_geometry = Point(116.0, -30.0)
# create the site
site_2 = factories.SiteFactory(code=site_2_code, geometry=site_2_geometry, project=project)
# data
csv_data = [
['What', 'When', 'Site Code'],
['what_1', '01/01/2017', site_1_code],
['what_2', '02/02/2017', site_2_code]
]
file_ = helpers.rows_to_xlsx_file(csv_data)
self.assertEqual(0, Record.objects.filter(dataset=dataset).count())
url = reverse('api:dataset-upload', kwargs={'pk': dataset.pk})
with open(file_, 'rb') as fp:
payload = {
'file': fp
}
resp = client.post(url, data=payload, format='multipart')
self.assertEqual(status.HTTP_200_OK, resp.status_code)
records = Record.objects.filter(dataset=dataset)
self.assertEqual(records.count(), len(csv_data) - 1)
record_1 = [r for r in records if r.data['What'] == 'what_1'][0]
self.assertEqual(record_1.site, site_1)
self.assertEqual(record_1.geometry, site_1_geometry)
record_2 = [r for r in records if r.data['What'] == 'what_2'][0]
self.assertEqual(record_2.site, site_2)
self.assertEqual(record_2.geometry, site_2_geometry)
# Change the site_1 geometry and expect the record_1 to have its geometry updated
previous_geometry = site_1_geometry
new_geometry = Point(previous_geometry.x + 2, previous_geometry.y + 2)
self.assertNotEqual(previous_geometry, new_geometry)
url = reverse('api:site-detail', kwargs={'pk': site_1.pk})
payload = {
"geometry": new_geometry.wkt
}
resp = client.patch(url, data=payload, format='json')
self.assertEqual(resp.status_code, 200)
# check that the record has been updated
record_1.refresh_from_db()
self.assertEqual(record_1.geometry.geojson, new_geometry.geojson)
# site_2 record should be untouched
record_2.refresh_from_db()
self.assertEqual(record_2.geometry.geojson, site_2_geometry.geojson)
# Use case: the record geometry should be updated ONLY if it matches exactly the site geometry
# new geometry for record_1
new_site_geometry = Point(179, -30)
self.assertNotEqual(new_site_geometry, site_1.geometry)
new_record_geometry = Point(180, -35)
self.assertNotEqual(new_record_geometry, new_site_geometry)
record_1.geometry = new_record_geometry
record_1.save()
self.assertNotEqual(record_1.geometry.geojson, record_1.site.geometry.geojson)
site = record_1.site
site.geometry = new_site_geometry
site.save()
# check record not changed
record_1.refresh_from_db()
self.assertEqual(record_1.geometry, new_record_geometry)
self.assertNotEqual(record_1.geometry.geojson, record_1.site.geometry.geojson)
class TestMultipleGeometrySource(helpers.BaseUserTestCase):
def test_geometry_easting_northing_precedence(self):
"""
If all fields are provided easting and northing have precedence over lat/long and site code.
"""
project = self.project_1
client = self.custodian_1_client
schema = self.observation_schema_with_with_all_possible_geometry_fields()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
# site geometry
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
# easting/northing: nearly (116.0, -32.0)
easting = 405542.537
northing = 6459127.469
east_north_datum = 'GDA94'
zone = 50
east_north_srid = 28350
# lat/long
longitude = 117.0
latitude = -33.0
lat_long_datum = 'WGS84'
lat_long_srid = 4326
record_data = {
'What': 'A record with all geometry fields populated',
'When': '12/12/2017',
'Site Code': site_code,
'Easting': easting,
'Northing': northing,
'Datum': east_north_datum,
'Zone': zone,
'Latitude': latitude,
'Longitude': longitude
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
geometry = record.geometry
self.assertIsNotNone(geometry)
self.assertIsInstance(geometry, Point)
# it should be the easting/northing geometry
geometry.transform(east_north_srid)
self.assertAlmostEqual(geometry.x, easting, places=2)
self.assertAlmostEqual(geometry.y, northing, places=2)
def test_geometry_lat_long_precedence(self):
"""
Lat/long takes precedence over site code
"""
project = self.project_1
client = self.custodian_1_client
schema = self.observation_schema_with_with_all_possible_geometry_fields()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
# site geometry
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
# lat/long
longitude = 117.0
latitude = -33.0
lat_long_datum = 'WGS84'
lat_long_srid = 4326
record_data = {
'What': 'A record with all geometry fields populated',
'When': '12/12/2017',
'Site Code': site_code,
'Easting': None,
'Northing': None,
'Datum': lat_long_datum,
'Zone': None,
'Latitude': latitude,
'Longitude': longitude
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
geometry = record.geometry
self.assertIsNotNone(geometry)
self.assertIsInstance(geometry, Point)
# it should be the lat/long geometry
geometry.transform(lat_long_srid)
self.assertAlmostEqual(geometry.x, longitude, places=4)
self.assertAlmostEqual(geometry.y, latitude, places=4)
# and not the site
self.assertNotAlmostEqual(geometry.x, site_geometry.x, places=4)
self.assertNotAlmostEqual(geometry.y, site_geometry.y, places=4)
def test_easting_northing_and_site(self):
"""
Easting/Northing > site_code
"""
project = self.project_1
client = self.custodian_1_client
schema = self.observation_schema_with_with_all_possible_geometry_fields()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
# site geometry
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
# easting/northing: nearly (116.0, -32.0)
easting = 405542.537
northing = 6459127.469
east_north_datum = 'GDA94'
zone = 50
east_north_srid = 28350
record_data = {
'What': 'A record with all geometry fields populated',
'When': '12/12/2017',
'Site Code': site_code,
'Easting': easting,
'Northing': northing,
'Datum': east_north_datum,
'Zone': zone,
'Latitude': None,
'Longitude': None
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
geometry = record.geometry
self.assertIsNotNone(geometry)
self.assertIsInstance(geometry, Point)
# it should be the easting/northing geometry
geometry.transform(east_north_srid)
self.assertAlmostEqual(geometry.x, easting, places=2)
self.assertAlmostEqual(geometry.y, northing, places=2)
def test_site_only(self):
project = self.project_1
client = self.custodian_1_client
schema = self.observation_schema_with_with_all_possible_geometry_fields()
dataset = self._create_dataset_with_schema(
project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_OBSERVATION
)
self.assertIsNotNone(dataset.schema.datum_field)
self.assertIsNotNone(dataset.schema.zone_field)
# site geometry
site_code = 'Cottesloe'
site_geometry = Point(115.76, -32.0)
# create the site
site = factories.SiteFactory(code=site_code, geometry=site_geometry, project=project)
record_data = {
'What': 'A record with all geometry fields populated',
'When': '12/12/2017',
'Site Code': site_code,
'Easting': None,
'Northing': None,
'Datum': None,
'Zone': None,
'Latitude': None,
'Longitude': None
}
payload = {
'dataset': dataset.pk,
'data': record_data
}
url = reverse('api:record-list')
resp = client.post(url, data=payload, format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
record = Record.objects.filter(id=resp.json().get('id')).first()
self.assertIsNotNone(record)
geometry = record.geometry
self.assertIsNotNone(geometry)
self.assertIsInstance(geometry, Point)
# it should be the easting/northing geometry
self.assertAlmostEqual(geometry.x, site_geometry.x, places=4)
self.assertAlmostEqual(geometry.y, site_geometry.y, places=4)
class TestGeometryConversion(helpers.BaseUserTestCase):
def test_lat_long_with_projected_project_datum(self):
"""
see: https://youtrack.gaiaresources.com.au/youtrack/issue/BIOSYS-152
Use case:
- Project datum set to be a projected one, e.g GDA/Zone 56.
- Schema has a latitude, longitude, datum, zone, easting and northing field (the whole shebang)
- Post a record with lat=-32.0 long=115.75 and Datum=WGS84
Success if record.geometry is Point(115.75, -32.0)
"""
# Create project with projected datum
program = factories.ProgramFactory.create()
program.data_engineers.add(self.data_engineer_1_user)
datum_srid = constants.get_datum_srid('GDA94 / MGA zone 56')
project = factories.ProjectFactory.create(
program=program,
datum=datum_srid
)
self.assertTrue(constants.is_projected_srid(project.datum))
project.custodians.add(self.custodian_1_user)
self.assertTrue(project.is_custodian(self.custodian_1_user))
# Dataset and records in lat/long
schema = self.observation_schema_with_with_all_possible_geometry_fields()
client = self.custodian_1_client
dataset = self._create_dataset_with_schema(
project,
self.data_engineer_1_client,
schema,
dataset_type=Dataset.TYPE_OBSERVATION
)
# post record
record_data = {
'When': "2018-05-25",
'Datum': 'WGS84',
'Latitude': -32.0,
'Longitude': 115.75
}
record = self._create_record(
client,
dataset,
record_data
)
self.assertIsNotNone(record)
self.assertIsNotNone(record.geometry)
self.assertEqual(record.geometry.x, 115.75)
self.assertEqual(record.geometry.y, -32.0)
# try with the upload end-point
dataset.record_set.all().delete()
rows = [
['When', 'Datum', 'Latitude', 'Longitude'],
['2018-05-25', 'WGS84', -32.0, 115.75]
]
response = self._upload_records_from_rows(
rows,
dataset.pk,
strict=True
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
record = dataset.record_set.last()
self.assertIsNotNone(record)
self.assertIsNotNone(record.geometry)
self.assertEqual(record.geometry.x, 115.75)
self.assertEqual(record.geometry.y, -32.0)
class TestSerialization(helpers.BaseUserTestCase):
def test_date_serialization_uses_project_timezone(self):
# TODO: implement this
pass
class TestExport(helpers.BaseUserTestCase):
def setUp(self):
super(TestExport, self).setUp()
rows = [
['When', 'Species', 'How Many', 'Latitude', 'Longitude', 'Comments'],
['2018-02-07', 'Canis lupus', 1, -32.0, 115.75, ''],
['2018-01-12', 'Chubby bat', 10, -32.0, 115.75, 'Awesome'],
['2018-02-02', 'Canis dingo', 2, -32.0, 115.75, 'Watch out kids'],
['2018-02-10', 'Unknown', 3, -32.0, 115.75, 'Canis?'],
]
self.ds_1 = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(self.ds_1.type, Dataset.TYPE_OBSERVATION)
def test_happy_path_no_filter(self):
client = self.custodian_1_client
dataset = self.ds_1
all_records = Record.objects.filter(dataset=dataset)
self.assertTrue(all_records.count() > 0)
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check headers
self.assertEqual(resp.get('content-type'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
content_disposition = resp.get('content-disposition')
# should be something like:
# 'attachment; filename=DatasetName_YYYY_MM_DD-HHMMSS.xlsx
match = re.match('attachment; filename=(.+)', content_disposition)
self.assertIsNotNone(match)
filename, ext = path.splitext(match.group(1))
self.assertEqual(ext, '.xlsx')
filename.startswith(dataset.name)
# read content
wb = load_workbook(io.BytesIO(resp.content), read_only=True)
# one datasheet named from dataset
sheet_names = wb.sheetnames
self.assertEqual(1, len(sheet_names))
self.assertEqual(dataset.name, sheet_names[0])
ws = wb[dataset.name]
rows = list(ws.rows)
expected_records = Record.objects.filter(dataset=dataset)
self.assertEqual(len(rows), expected_records.count() + 1)
headers = [c.value for c in rows[0]]
schema = dataset.schema
# all the columns of the schema should be in the excel
self.assertEqual(schema.headers, headers)
def test_permission_ok_for_not_custodian(self):
"""
Export is a read action. Should be authorised for every logged-in user.
"""
client = self.custodian_2_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_permission_denied_if_not_logged_in(self):
"""Must be logged-in."""
client = self.anonymous_client
dataset = self.ds_1
url = reverse('api:record-list')
query = {
'dataset__id': dataset.pk,
'output': 'xlsx'
}
try:
resp = client.get(url, query)
except Exception as e:
self.fail("Export should not raise an exception: {}".format(e))
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
class TestDateNotMandatory(helpers.BaseUserTestCase):
date_easting_northing_site_nothing_required_schema = [
{
"name": "What",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "When",
"type": "date",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"format": "any",
"biosys": {
'type': 'observationDate'
}
},
{
"name": 'Site',
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
'type': 'siteCode'
}
},
{
"name": "Northing",
"type": "number",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "northing"
}
},
{
"name": "Easting",
"type": "number",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS,
"biosys": {
"type": "easting"
}
},
{
"name": "Datum",
"type": "string",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
},
{
"name": "Zone",
"type": "integer",
"constraints": helpers.NOT_REQUIRED_CONSTRAINTS
}
]
def test_record_without_date(self):
project = self.project_1
client = self.custodian_1_client
dataset = self._create_dataset_with_schema(
project,
self.data_engineer_1_client,
self.date_easting_northing_site_nothing_required_schema,
Dataset.TYPE_OBSERVATION
)
# easting/northing: nearly (116.0, -32.0)
easting = 405542.537
northing = 6459127.469
datum = 'GDA94'
zone = 50
east_north_srid = 28350
record_data = {
'What': 'Whaaat?',
'Easting': easting,
'Northing': northing,
'Datum': datum,
'Zone': zone
}
record = self._create_record(client, dataset, record_data)
self.assertIsNone(record.datetime)
geometry = record.geometry
self.assertIsNotNone(geometry)
self.assertIsInstance(geometry, Point)
geometry.transform(east_north_srid)
self.assertAlmostEqual(geometry.x, easting, places=2)
self.assertAlmostEqual(geometry.y, northing, places=2)
class TestPatch(helpers.BaseUserTestCase):
def test_patch_validated(self):
"""
Test that we can patch just the 'validated' flag
:return:
"""
rows = [
['What', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.validated)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'validated': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.validated)
self.assertTrue(json.dumps(record.data), previous_data)
def test_patch_locked(self):
"""
Test that we can patch just the 'locked' flag
:return:
"""
rows = [
['What', 'When', 'Latitude', 'Longitude', 'Comments'],
['Chubby bat', '2018-06-01', -32, 115.75, 'It is huge!']
]
dataset = self._create_dataset_and_records_from_rows(rows)
self.assertEqual(dataset.type, Dataset.TYPE_OBSERVATION)
records = dataset.record_set.all()
record = records.last()
self.assertIsNotNone(record)
self.assertFalse(record.locked)
previous_data = json.dumps(record.data)
# patch
url = reverse('api:record-detail', kwargs={"pk": record.pk})
client = self.custodian_1_client
payload = {
'locked': True
}
resp = client.patch(url, payload)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
record.refresh_from_db()
self.assertTrue(record.locked)
self.assertTrue(json.dumps(record.data), previous_data)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.