repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
markeTIC/OCB | addons/account/edi/invoice.py | 342 | 13984 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from werkzeug import url_encode
INVOICE_LINE_EDI_STRUCT = {
'name': True,
'origin': True,
'uos_id': True,
'product_id': True,
'price_unit': True,
'quantity': True,
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
INVOICE_TAX_LINE_EDI_STRUCT = {
'name': True,
'base': True,
'amount': True,
'manual': True,
'sequence': True,
'base_amount': True,
'tax_amount': True,
}
INVOICE_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
'type': True, # -> reversed at import
'internal_number': True, # -> reference at import
'comment': True,
'date_invoice': True,
'date_due': True,
'partner_id': True,
'payment_term': True,
#custom: currency_id
'invoice_line': INVOICE_LINE_EDI_STRUCT,
'tax_line': INVOICE_TAX_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
#custom: 'partner_ref'
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
}
class account_invoice(osv.osv, EDIMixin):
_inherit = 'account.invoice'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a supplier or customer invoice"""
edi_struct = dict(edi_struct or INVOICE_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner = self.pool.get('res.partner')
edi_doc_list = []
for invoice in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, invoice, context=context)
edi_doc = super(account_invoice,self).edi_export(cr, uid, [invoice], edi_struct, context)[0]
edi_doc.update({
'company_address': res_company.edi_export_address(cr, uid, invoice.company_id, context=context),
'company_paypal_account': invoice.company_id.paypal_account,
'partner_address': res_partner.edi_export(cr, uid, [invoice.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [invoice.currency_id], context=context)[0],
'partner_ref': invoice.reference or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_tax_account(self, cr, uid, invoice_type='out_invoice', context=None):
#TODO/FIXME: should select proper Tax Account
account_pool = self.pool.get('account.account')
account_ids = account_pool.search(cr, uid, [('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')])
tax_account = False
if account_ids:
tax_account = account_pool.browse(cr, uid, account_ids[0])
return tax_account
def _edi_invoice_account(self, cr, uid, partner_id, invoice_type, context=None):
res_partner = self.pool.get('res.partner')
partner = res_partner.browse(cr, uid, partner_id, context=context)
if invoice_type in ('out_invoice', 'out_refund'):
invoice_account = partner.property_account_receivable
else:
invoice_account = partner.property_account_payable
return invoice_account
def _edi_product_account(self, cr, uid, product_id, invoice_type, context=None):
product_pool = self.pool.get('product.product')
product = product_pool.browse(cr, uid, product_id, context=context)
if invoice_type in ('out_invoice','out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
return account
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address','type'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
invoice_type = edi_document['type']
if invoice_type.startswith('out_'):
company_address_edi['customer'] = True
else:
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def edi_import(self, cr, uid, edi_document, context=None):
""" During import, invoices will import the company that is provided in the invoice as
a new partner (e.g. supplier company for a customer invoice will be come a supplier
record for the new invoice.
Summary of tasks that need to be done:
- import company as a new partner, if type==in then supplier=1, else customer=1
- partner_id field is modified to point to the new partner
- company_address data used to add address to new partner
- change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
- reference: should contain the value of the 'internal_number'
- reference_type: 'none'
- internal number: reset to False, auto-generated
- journal_id: should be selected based on type: simply put the 'type'
in the context when calling create(), will be selected correctly
- payment_term: if set, create a default one based on name...
- for invoice lines, the account_id value should be taken from the
product's default, i.e. from the default category, as it will not
be provided.
- for tax lines, we disconnect from the invoice.line, so all tax lines
will be of type 'manual', and default accounts should be picked based
on the tax config of the DB where it is imported.
"""
if context is None:
context = {}
self._edi_requires_attributes(('company_id','company_address','type','invoice_line','currency'), edi_document)
# extract currency info
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
currency = res_currency.browse(cr, uid, currency_id)
edi_document['currency_id'] = self.edi_m2o(cr, uid, currency, context=context)
# change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
invoice_type = edi_document['type']
invoice_type = invoice_type.startswith('in_') and invoice_type.replace('in_','out_') or invoice_type.replace('out_','in_')
edi_document['type'] = invoice_type
# import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# Set Account
invoice_account = self._edi_invoice_account(cr, uid, partner_id, invoice_type, context=context)
edi_document['account_id'] = invoice_account and self.edi_m2o(cr, uid, invoice_account, context=context) or False
# reference: should contain the value of the 'internal_number'
edi_document['reference'] = edi_document.get('internal_number', False)
# reference_type: 'none'
edi_document['reference_type'] = 'none'
# internal number: reset to False, auto-generated
edi_document['internal_number'] = False
# discard web preview fields, if present
edi_document.pop('partner_ref', None)
# journal_id: should be selected based on type: simply put the 'type' in the context when calling create(), will be selected correctly
context = dict(context, type=invoice_type)
# for invoice lines, the account_id value should be taken from the product's default, i.e. from the default category, as it will not be provided.
for edi_invoice_line in edi_document['invoice_line']:
product_info = edi_invoice_line['product_id']
product_id = self.edi_import_relation(cr, uid, 'product.product', product_info[1],
product_info[0], context=context)
account = self._edi_product_account(cr, uid, product_id, invoice_type, context=context)
# TODO: could be improved with fiscal positions perhaps
# account = fpos_obj.map_account(cr, uid, fiscal_position_id, account.id)
edi_invoice_line['account_id'] = self.edi_m2o(cr, uid, account, context=context) if account else False
# discard web preview fields, if present
edi_invoice_line.pop('price_subtotal', None)
# for tax lines, we disconnect from the invoice.line, so all tax lines will be of type 'manual', and default accounts should be picked based
# on the tax config of the DB where it is imported.
tax_account = self._edi_tax_account(cr, uid, context=context)
tax_account_info = self.edi_m2o(cr, uid, tax_account, context=context)
for edi_tax_line in edi_document.get('tax_line', []):
edi_tax_line['account_id'] = tax_account_info
edi_tax_line['manual'] = True
return super(account_invoice,self).edi_import(cr, uid, edi_document, context=context)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
action = super(account_invoice,self)._edi_record_display_action(cr, uid, id, context=context)
try:
invoice = self.browse(cr, uid, id, context=context)
if 'out_' in invoice.type:
view_ext_id = 'invoice_form'
journal_type = 'sale'
else:
view_ext_id = 'invoice_supplier_form'
journal_type = 'purchase'
ctx = "{'type': '%s', 'journal_type': '%s'}" % (invoice.type, journal_type)
action.update(context=ctx)
view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', view_ext_id)[1]
action.update(views=[(view_id,'form'), (False, 'tree')])
except ValueError:
# ignore if views are missing
pass
return action
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for inv in self.browse(cr, uid, ids, context=context):
if inv.type == 'out_invoice' and inv.company_id.paypal_account:
params = {
"cmd": "_xclick",
"business": inv.company_id.paypal_account,
"item_name": "%s Invoice %s" % (inv.company_id.name, inv.number or ''),
"invoice": inv.number,
"amount": inv.residual,
"currency_code": inv.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Invoice_PayNow_" + inv.currency_id.name,
}
res[inv.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class account_invoice_line(osv.osv, EDIMixin):
_inherit='account.invoice.line'
class account_invoice_tax(osv.osv, EDIMixin):
_inherit = "account.invoice.tax"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
charbeljc/hr | hr_contract_reference/__openerp__.py | 9 | 1484 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "HR Contract Reference",
"version": "1.0",
"category": "Generic Modules/Human Resources",
"author": 'Michael Telahun Makonnen, '
'Fekete Mihai (Forest and Biomass Services Romania), '
'Odoo Community Association (OCA)',
"website": "http://miketelahun.wordpress.com",
"license": "AGPL-3",
"depends": ["hr_contract"],
"data": [
'views/hr_contract_view.xml',
'data/hr_contract_sequence.xml',
],
'installable': True,
}
| agpl-3.0 |
MQQiang/kbengine | kbe/src/lib/python/Lib/curses/textpad.py | 155 | 7339 | """Simple textbox editing widget with Emacs-like keybindings."""
import curses
import curses.ascii
def rectangle(win, uly, ulx, lry, lrx):
"""Draw a rectangle with corners at the provided upper-left
and lower-right coordinates.
"""
win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
win.addch(uly, ulx, curses.ACS_ULCORNER)
win.addch(uly, lrx, curses.ACS_URCORNER)
win.addch(lry, lrx, curses.ACS_LRCORNER)
win.addch(lry, ulx, curses.ACS_LLCORNER)
class Textbox:
"""Editing widget using the interior of a window object.
Supports the following Emacs-like key bindings:
Ctrl-A Go to left edge of window.
Ctrl-B Cursor left, wrapping to previous line if appropriate.
Ctrl-D Delete character under cursor.
Ctrl-E Go to right edge (stripspaces off) or end of line (stripspaces on).
Ctrl-F Cursor right, wrapping to next line when appropriate.
Ctrl-G Terminate, returning the window contents.
Ctrl-H Delete character backward.
Ctrl-J Terminate if the window is 1 line, otherwise insert newline.
Ctrl-K If line is blank, delete it, otherwise clear to end of line.
Ctrl-L Refresh screen.
Ctrl-N Cursor down; move down one line.
Ctrl-O Insert a blank line at cursor location.
Ctrl-P Cursor up; move up one line.
Move operations do nothing if the cursor is at an edge where the movement
is not possible. The following synonyms are supported where possible:
KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
KEY_BACKSPACE = Ctrl-h
"""
def __init__(self, win, insert_mode=False):
self.win = win
self.insert_mode = insert_mode
(self.maxy, self.maxx) = win.getmaxyx()
self.maxy = self.maxy - 1
self.maxx = self.maxx - 1
self.stripspaces = 1
self.lastcmd = None
win.keypad(1)
def _end_of_line(self, y):
"""Go to the location of the first blank on the given line,
returning the index of the last non-blank character."""
last = self.maxx
while True:
if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP:
last = min(self.maxx, last+1)
break
elif last == 0:
break
last = last - 1
return last
def _insert_printable_char(self, ch):
(y, x) = self.win.getyx()
if y < self.maxy or x < self.maxx:
if self.insert_mode:
oldch = self.win.inch()
# The try-catch ignores the error we trigger from some curses
# versions by trying to write into the lowest-rightmost spot
# in the window.
try:
self.win.addch(ch)
except curses.error:
pass
if self.insert_mode:
(backy, backx) = self.win.getyx()
if curses.ascii.isprint(oldch):
self._insert_printable_char(oldch)
self.win.move(backy, backx)
def do_command(self, ch):
"Process a single editing command."
(y, x) = self.win.getyx()
self.lastcmd = ch
if curses.ascii.isprint(ch):
if y < self.maxy or x < self.maxx:
self._insert_printable_char(ch)
elif ch == curses.ascii.SOH: # ^a
self.win.move(y, 0)
elif ch in (curses.ascii.STX,curses.KEY_LEFT, curses.ascii.BS,curses.KEY_BACKSPACE):
if x > 0:
self.win.move(y, x-1)
elif y == 0:
pass
elif self.stripspaces:
self.win.move(y-1, self._end_of_line(y-1))
else:
self.win.move(y-1, self.maxx)
if ch in (curses.ascii.BS, curses.KEY_BACKSPACE):
self.win.delch()
elif ch == curses.ascii.EOT: # ^d
self.win.delch()
elif ch == curses.ascii.ENQ: # ^e
if self.stripspaces:
self.win.move(y, self._end_of_line(y))
else:
self.win.move(y, self.maxx)
elif ch in (curses.ascii.ACK, curses.KEY_RIGHT): # ^f
if x < self.maxx:
self.win.move(y, x+1)
elif y == self.maxy:
pass
else:
self.win.move(y+1, 0)
elif ch == curses.ascii.BEL: # ^g
return 0
elif ch == curses.ascii.NL: # ^j
if self.maxy == 0:
return 0
elif y < self.maxy:
self.win.move(y+1, 0)
elif ch == curses.ascii.VT: # ^k
if x == 0 and self._end_of_line(y) == 0:
self.win.deleteln()
else:
# first undo the effect of self._end_of_line
self.win.move(y, x)
self.win.clrtoeol()
elif ch == curses.ascii.FF: # ^l
self.win.refresh()
elif ch in (curses.ascii.SO, curses.KEY_DOWN): # ^n
if y < self.maxy:
self.win.move(y+1, x)
if x > self._end_of_line(y+1):
self.win.move(y+1, self._end_of_line(y+1))
elif ch == curses.ascii.SI: # ^o
self.win.insertln()
elif ch in (curses.ascii.DLE, curses.KEY_UP): # ^p
if y > 0:
self.win.move(y-1, x)
if x > self._end_of_line(y-1):
self.win.move(y-1, self._end_of_line(y-1))
return 1
def gather(self):
"Collect and return the contents of the window."
result = ""
for y in range(self.maxy+1):
self.win.move(y, 0)
stop = self._end_of_line(y)
if stop == 0 and self.stripspaces:
continue
for x in range(self.maxx+1):
if self.stripspaces and x > stop:
break
result = result + chr(curses.ascii.ascii(self.win.inch(y, x)))
if self.maxy > 0:
result = result + "\n"
return result
def edit(self, validate=None):
"Edit in the widget window and collect the results."
while 1:
ch = self.win.getch()
if validate:
ch = validate(ch)
if not ch:
continue
if not self.do_command(ch):
break
self.win.refresh()
return self.gather()
if __name__ == '__main__':
def test_editbox(stdscr):
ncols, nlines = 9, 4
uly, ulx = 15, 20
stdscr.addstr(uly-2, ulx, "Use Ctrl-G to end editing.")
win = curses.newwin(nlines, ncols, uly, ulx)
rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
return Textbox(win).edit()
str = curses.wrapper(test_editbox)
print('Contents of text box:', repr(str))
| lgpl-3.0 |
aldanor/blox | blox/utils.py | 1 | 1944 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
import struct
import functools
import numpy as np
try:
import ujson as json
json_dumps = json.dumps
except ImportError:
import json
json_dumps = functools.partial(json.dumps, separators=',:')
def flatten_dtype(dtype):
dtype = np.dtype(dtype)
if dtype.fields is not None:
if dtype.type is np.record:
return ('record', list(dtype.descr))
return list(dtype.descr)
return str(dtype)
def restore_dtype(dtype):
def _convert_dtype(dt):
# workaround for a long-standing bug in numpy:
# https://github.com/numpy/numpy/issues/2407
is_string = lambda s: isinstance(s, (six.text_type, six.string_types))
if isinstance(dt, list):
if len(dt) == 2 and is_string(dt[0]):
return _convert_dtype(tuple(dt))
return [_convert_dtype(subdt) for subdt in dt]
elif isinstance(dt, tuple):
return tuple(_convert_dtype(subdt) for subdt in dt)
elif isinstance(dt, six.text_type) and six.PY2:
return dt.encode('ascii')
return dt
dtype = _convert_dtype(dtype)
if isinstance(dtype, (list, tuple)) and len(dtype) == 2 and dtype[0] == 'record':
return np.dtype((np.record, np.dtype(dtype[1])))
return np.dtype(dtype)
def write_i64(stream, *values):
for value in values:
stream.write(struct.pack('<Q', value))
def read_i64(stream, count=None):
if count is None:
return struct.unpack('<Q', stream.read(8))[0]
return tuple(struct.unpack('<Q', stream.read(8))[0] for _ in range(count))
def write_json(stream, data):
payload = json_dumps(data).encode('utf-8')
write_i64(stream, len(payload))
stream.write(payload)
return len(payload) + 8
def read_json(stream):
length = read_i64(stream)
return json.loads(stream.read(length).decode('utf-8'))
| mit |
PythonCharmers/bokeh | bokeh/palettes.py | 52 | 24061 | YlGn3 = ["#31a354", "#addd8e", "#f7fcb9"]
YlGn4 = ["#238443", "#78c679", "#c2e699", "#ffffcc"]
YlGn5 = ["#006837", "#31a354", "#78c679", "#c2e699", "#ffffcc"]
YlGn6 = ["#006837", "#31a354", "#78c679", "#addd8e", "#d9f0a3", "#ffffcc"]
YlGn7 = ["#005a32", "#238443", "#41ab5d", "#78c679", "#addd8e", "#d9f0a3", "#ffffcc"]
YlGn8 = ["#005a32", "#238443", "#41ab5d", "#78c679", "#addd8e", "#d9f0a3", "#f7fcb9", "#ffffe5"]
YlGn9 = ["#004529", "#006837", "#238443", "#41ab5d", "#78c679", "#addd8e", "#d9f0a3", "#f7fcb9", "#ffffe5"]
YlGnBu3 = ["#2c7fb8", "#7fcdbb", "#edf8b1"]
YlGnBu4 = ["#225ea8", "#41b6c4", "#a1dab4", "#ffffcc"]
YlGnBu5 = ["#253494", "#2c7fb8", "#41b6c4", "#a1dab4", "#ffffcc"]
YlGnBu6 = ["#253494", "#2c7fb8", "#41b6c4", "#7fcdbb", "#c7e9b4", "#ffffcc"]
YlGnBu7 = ["#0c2c84", "#225ea8", "#1d91c0", "#41b6c4", "#7fcdbb", "#c7e9b4", "#ffffcc"]
YlGnBu8 = ["#0c2c84", "#225ea8", "#1d91c0", "#41b6c4", "#7fcdbb", "#c7e9b4", "#edf8b1", "#ffffd9"]
YlGnBu9 = ["#081d58", "#253494", "#225ea8", "#1d91c0", "#41b6c4", "#7fcdbb", "#c7e9b4", "#edf8b1", "#ffffd9"]
GnBu3 = ["#43a2ca", "#a8ddb5", "#e0f3db"]
GnBu4 = ["#2b8cbe", "#7bccc4", "#bae4bc", "#f0f9e8"]
GnBu5 = ["#0868ac", "#43a2ca", "#7bccc4", "#bae4bc", "#f0f9e8"]
GnBu6 = ["#0868ac", "#43a2ca", "#7bccc4", "#a8ddb5", "#ccebc5", "#f0f9e8"]
GnBu7 = ["#08589e", "#2b8cbe", "#4eb3d3", "#7bccc4", "#a8ddb5", "#ccebc5", "#f0f9e8"]
GnBu8 = ["#08589e", "#2b8cbe", "#4eb3d3", "#7bccc4", "#a8ddb5", "#ccebc5", "#e0f3db", "#f7fcf0"]
GnBu9 = ["#084081", "#0868ac", "#2b8cbe", "#4eb3d3", "#7bccc4", "#a8ddb5", "#ccebc5", "#e0f3db", "#f7fcf0"]
BuGn3 = ["#2ca25f", "#99d8c9", "#e5f5f9"]
BuGn4 = ["#238b45", "#66c2a4", "#b2e2e2", "#edf8fb"]
BuGn5 = ["#006d2c", "#2ca25f", "#66c2a4", "#b2e2e2", "#edf8fb"]
BuGn6 = ["#006d2c", "#2ca25f", "#66c2a4", "#99d8c9", "#ccece6", "#edf8fb"]
BuGn7 = ["#005824", "#238b45", "#41ae76", "#66c2a4", "#99d8c9", "#ccece6", "#edf8fb"]
BuGn8 = ["#005824", "#238b45", "#41ae76", "#66c2a4", "#99d8c9", "#ccece6", "#e5f5f9", "#f7fcfd"]
BuGn9 = ["#00441b", "#006d2c", "#238b45", "#41ae76", "#66c2a4", "#99d8c9", "#ccece6", "#e5f5f9", "#f7fcfd"]
PuBuGn3 = ["#1c9099", "#a6bddb", "#ece2f0"]
PuBuGn4 = ["#02818a", "#67a9cf", "#bdc9e1", "#f6eff7"]
PuBuGn5 = ["#016c59", "#1c9099", "#67a9cf", "#bdc9e1", "#f6eff7"]
PuBuGn6 = ["#016c59", "#1c9099", "#67a9cf", "#a6bddb", "#d0d1e6", "#f6eff7"]
PuBuGn7 = ["#016450", "#02818a", "#3690c0", "#67a9cf", "#a6bddb", "#d0d1e6", "#f6eff7"]
PuBuGn8 = ["#016450", "#02818a", "#3690c0", "#67a9cf", "#a6bddb", "#d0d1e6", "#ece2f0", "#fff7fb"]
PuBuGn9 = ["#014636", "#016c59", "#02818a", "#3690c0", "#67a9cf", "#a6bddb", "#d0d1e6", "#ece2f0", "#fff7fb"]
PuBu3 = ["#2b8cbe", "#a6bddb", "#ece7f2"]
PuBu4 = ["#0570b0", "#74a9cf", "#bdc9e1", "#f1eef6"]
PuBu5 = ["#045a8d", "#2b8cbe", "#74a9cf", "#bdc9e1", "#f1eef6"]
PuBu6 = ["#045a8d", "#2b8cbe", "#74a9cf", "#a6bddb", "#d0d1e6", "#f1eef6"]
PuBu7 = ["#034e7b", "#0570b0", "#3690c0", "#74a9cf", "#a6bddb", "#d0d1e6", "#f1eef6"]
PuBu8 = ["#034e7b", "#0570b0", "#3690c0", "#74a9cf", "#a6bddb", "#d0d1e6", "#ece7f2", "#fff7fb"]
PuBu9 = ["#023858", "#045a8d", "#0570b0", "#3690c0", "#74a9cf", "#a6bddb", "#d0d1e6", "#ece7f2", "#fff7fb"]
BuPu3 = ["#8856a7", "#9ebcda", "#e0ecf4"]
BuPu4 = ["#88419d", "#8c96c6", "#b3cde3", "#edf8fb"]
BuPu5 = ["#810f7c", "#8856a7", "#8c96c6", "#b3cde3", "#edf8fb"]
BuPu6 = ["#810f7c", "#8856a7", "#8c96c6", "#9ebcda", "#bfd3e6", "#edf8fb"]
BuPu7 = ["#6e016b", "#88419d", "#8c6bb1", "#8c96c6", "#9ebcda", "#bfd3e6", "#edf8fb"]
BuPu8 = ["#6e016b", "#88419d", "#8c6bb1", "#8c96c6", "#9ebcda", "#bfd3e6", "#e0ecf4", "#f7fcfd"]
BuPu9 = ["#4d004b", "#810f7c", "#88419d", "#8c6bb1", "#8c96c6", "#9ebcda", "#bfd3e6", "#e0ecf4", "#f7fcfd"]
RdPu3 = ["#c51b8a", "#fa9fb5", "#fde0dd"]
RdPu4 = ["#ae017e", "#f768a1", "#fbb4b9", "#feebe2"]
RdPu5 = ["#7a0177", "#c51b8a", "#f768a1", "#fbb4b9", "#feebe2"]
RdPu6 = ["#7a0177", "#c51b8a", "#f768a1", "#fa9fb5", "#fcc5c0", "#feebe2"]
RdPu7 = ["#7a0177", "#ae017e", "#dd3497", "#f768a1", "#fa9fb5", "#fcc5c0", "#feebe2"]
RdPu8 = ["#7a0177", "#ae017e", "#dd3497", "#f768a1", "#fa9fb5", "#fcc5c0", "#fde0dd", "#fff7f3"]
RdPu9 = ["#49006a", "#7a0177", "#ae017e", "#dd3497", "#f768a1", "#fa9fb5", "#fcc5c0", "#fde0dd", "#fff7f3"]
PuRd3 = ["#dd1c77", "#c994c7", "#e7e1ef"]
PuRd4 = ["#ce1256", "#df65b0", "#d7b5d8", "#f1eef6"]
PuRd5 = ["#980043", "#dd1c77", "#df65b0", "#d7b5d8", "#f1eef6"]
PuRd6 = ["#980043", "#dd1c77", "#df65b0", "#c994c7", "#d4b9da", "#f1eef6"]
PuRd7 = ["#91003f", "#ce1256", "#e7298a", "#df65b0", "#c994c7", "#d4b9da", "#f1eef6"]
PuRd8 = ["#91003f", "#ce1256", "#e7298a", "#df65b0", "#c994c7", "#d4b9da", "#e7e1ef", "#f7f4f9"]
PuRd9 = ["#67001f", "#980043", "#ce1256", "#e7298a", "#df65b0", "#c994c7", "#d4b9da", "#e7e1ef", "#f7f4f9"]
OrRd3 = ["#e34a33", "#fdbb84", "#fee8c8"]
OrRd4 = ["#d7301f", "#fc8d59", "#fdcc8a", "#fef0d9"]
OrRd5 = ["#b30000", "#e34a33", "#fc8d59", "#fdcc8a", "#fef0d9"]
OrRd6 = ["#b30000", "#e34a33", "#fc8d59", "#fdbb84", "#fdd49e", "#fef0d9"]
OrRd7 = ["#990000", "#d7301f", "#ef6548", "#fc8d59", "#fdbb84", "#fdd49e", "#fef0d9"]
OrRd8 = ["#990000", "#d7301f", "#ef6548", "#fc8d59", "#fdbb84", "#fdd49e", "#fee8c8", "#fff7ec"]
OrRd9 = ["#7f0000", "#b30000", "#d7301f", "#ef6548", "#fc8d59", "#fdbb84", "#fdd49e", "#fee8c8", "#fff7ec"]
YlOrRd3 = ["#f03b20", "#feb24c", "#ffeda0"]
YlOrRd4 = ["#e31a1c", "#fd8d3c", "#fecc5c", "#ffffb2"]
YlOrRd5 = ["#bd0026", "#f03b20", "#fd8d3c", "#fecc5c", "#ffffb2"]
YlOrRd6 = ["#bd0026", "#f03b20", "#fd8d3c", "#feb24c", "#fed976", "#ffffb2"]
YlOrRd7 = ["#b10026", "#e31a1c", "#fc4e2a", "#fd8d3c", "#feb24c", "#fed976", "#ffffb2"]
YlOrRd8 = ["#b10026", "#e31a1c", "#fc4e2a", "#fd8d3c", "#feb24c", "#fed976", "#ffeda0", "#ffffcc"]
YlOrRd9 = ["#800026", "#bd0026", "#e31a1c", "#fc4e2a", "#fd8d3c", "#feb24c", "#fed976", "#ffeda0", "#ffffcc"]
YlOrBr3 = ["#d95f0e", "#fec44f", "#fff7bc"]
YlOrBr4 = ["#cc4c02", "#fe9929", "#fed98e", "#ffffd4"]
YlOrBr5 = ["#993404", "#d95f0e", "#fe9929", "#fed98e", "#ffffd4"]
YlOrBr6 = ["#993404", "#d95f0e", "#fe9929", "#fec44f", "#fee391", "#ffffd4"]
YlOrBr7 = ["#8c2d04", "#cc4c02", "#ec7014", "#fe9929", "#fec44f", "#fee391", "#ffffd4"]
YlOrBr8 = ["#8c2d04", "#cc4c02", "#ec7014", "#fe9929", "#fec44f", "#fee391", "#fff7bc", "#ffffe5"]
YlOrBr9 = ["#662506", "#993404", "#cc4c02", "#ec7014", "#fe9929", "#fec44f", "#fee391", "#fff7bc", "#ffffe5"]
Purples3 = ["#756bb1", "#bcbddc", "#efedf5"]
Purples4 = ["#6a51a3", "#9e9ac8", "#cbc9e2", "#f2f0f7"]
Purples5 = ["#54278f", "#756bb1", "#9e9ac8", "#cbc9e2", "#f2f0f7"]
Purples6 = ["#54278f", "#756bb1", "#9e9ac8", "#bcbddc", "#dadaeb", "#f2f0f7"]
Purples7 = ["#4a1486", "#6a51a3", "#807dba", "#9e9ac8", "#bcbddc", "#dadaeb", "#f2f0f7"]
Purples8 = ["#4a1486", "#6a51a3", "#807dba", "#9e9ac8", "#bcbddc", "#dadaeb", "#efedf5", "#fcfbfd"]
Purples9 = ["#3f007d", "#54278f", "#6a51a3", "#807dba", "#9e9ac8", "#bcbddc", "#dadaeb", "#efedf5", "#fcfbfd"]
Blues3 = ["#3182bd", "#9ecae1", "#deebf7"]
Blues4 = ["#2171b5", "#6baed6", "#bdd7e7", "#eff3ff"]
Blues5 = ["#08519c", "#3182bd", "#6baed6", "#bdd7e7", "#eff3ff"]
Blues6 = ["#08519c", "#3182bd", "#6baed6", "#9ecae1", "#c6dbef", "#eff3ff"]
Blues7 = ["#084594", "#2171b5", "#4292c6", "#6baed6", "#9ecae1", "#c6dbef", "#eff3ff"]
Blues8 = ["#084594", "#2171b5", "#4292c6", "#6baed6", "#9ecae1", "#c6dbef", "#deebf7", "#f7fbff"]
Blues9 = ["#08306b", "#08519c", "#2171b5", "#4292c6", "#6baed6", "#9ecae1", "#c6dbef", "#deebf7", "#f7fbff"]
Greens3 = ["#31a354", "#a1d99b", "#e5f5e0"]
Greens4 = ["#238b45", "#74c476", "#bae4b3", "#edf8e9"]
Greens5 = ["#006d2c", "#31a354", "#74c476", "#bae4b3", "#edf8e9"]
Greens6 = ["#006d2c", "#31a354", "#74c476", "#a1d99b", "#c7e9c0", "#edf8e9"]
Greens7 = ["#005a32", "#238b45", "#41ab5d", "#74c476", "#a1d99b", "#c7e9c0", "#edf8e9"]
Greens8 = ["#005a32", "#238b45", "#41ab5d", "#74c476", "#a1d99b", "#c7e9c0", "#e5f5e0", "#f7fcf5"]
Greens9 = ["#00441b", "#006d2c", "#238b45", "#41ab5d", "#74c476", "#a1d99b", "#c7e9c0", "#e5f5e0", "#f7fcf5"]
Oranges3 = ["#e6550d", "#fdae6b", "#fee6ce"]
Oranges4 = ["#d94701", "#fd8d3c", "#fdbe85", "#feedde"]
Oranges5 = ["#a63603", "#e6550d", "#fd8d3c", "#fdbe85", "#feedde"]
Oranges6 = ["#a63603", "#e6550d", "#fd8d3c", "#fdae6b", "#fdd0a2", "#feedde"]
Oranges7 = ["#8c2d04", "#d94801", "#f16913", "#fd8d3c", "#fdae6b", "#fdd0a2", "#feedde"]
Oranges8 = ["#8c2d04", "#d94801", "#f16913", "#fd8d3c", "#fdae6b", "#fdd0a2", "#fee6ce", "#fff5eb"]
Oranges9 = ["#7f2704", "#a63603", "#d94801", "#f16913", "#fd8d3c", "#fdae6b", "#fdd0a2", "#fee6ce", "#fff5eb"]
Reds3 = ["#de2d26", "#fc9272", "#fee0d2"]
Reds4 = ["#cb181d", "#fb6a4a", "#fcae91", "#fee5d9"]
Reds5 = ["#a50f15", "#de2d26", "#fb6a4a", "#fcae91", "#fee5d9"]
Reds6 = ["#a50f15", "#de2d26", "#fb6a4a", "#fc9272", "#fcbba1", "#fee5d9"]
Reds7 = ["#99000d", "#cb181d", "#ef3b2c", "#fb6a4a", "#fc9272", "#fcbba1", "#fee5d9"]
Reds8 = ["#99000d", "#cb181d", "#ef3b2c", "#fb6a4a", "#fc9272", "#fcbba1", "#fee0d2", "#fff5f0"]
Reds9 = ["#67000d", "#a50f15", "#cb181d", "#ef3b2c", "#fb6a4a", "#fc9272", "#fcbba1", "#fee0d2", "#fff5f0"]
Greys3 = ["#636363", "#bdbdbd", "#f0f0f0"]
Greys4 = ["#525252", "#969696", "#cccccc", "#f7f7f7"]
Greys5 = ["#252525", "#636363", "#969696", "#cccccc", "#f7f7f7"]
Greys6 = ["#252525", "#636363", "#969696", "#bdbdbd", "#d9d9d9", "#f7f7f7"]
Greys7 = ["#252525", "#525252", "#737373", "#969696", "#bdbdbd", "#d9d9d9", "#f7f7f7"]
Greys8 = ["#252525", "#525252", "#737373", "#969696", "#bdbdbd", "#d9d9d9", "#f0f0f0", "#ffffff"]
Greys9 = ["#000000", "#252525", "#525252", "#737373", "#969696", "#bdbdbd", "#d9d9d9", "#f0f0f0", "#ffffff"]
PuOr3 = ["#998ec3", "#f7f7f7", "#f1a340"]
PuOr4 = ["#5e3c99", "#b2abd2", "#fdb863", "#e66101"]
PuOr5 = ["#5e3c99", "#b2abd2", "#f7f7f7", "#fdb863", "#e66101"]
PuOr6 = ["#542788", "#998ec3", "#d8daeb", "#fee0b6", "#f1a340", "#b35806"]
PuOr7 = ["#542788", "#998ec3", "#d8daeb", "#f7f7f7", "#fee0b6", "#f1a340", "#b35806"]
PuOr8 = ["#542788", "#8073ac", "#b2abd2", "#d8daeb", "#fee0b6", "#fdb863", "#e08214", "#b35806"]
PuOr9 = ["#542788", "#8073ac", "#b2abd2", "#d8daeb", "#f7f7f7", "#fee0b6", "#fdb863", "#e08214", "#b35806"]
PuOr10 = ["#2d004b", "#542788", "#8073ac", "#b2abd2", "#d8daeb", "#fee0b6", "#fdb863", "#e08214", "#b35806", "#7f3b08"]
PuOr11 = ["#2d004b", "#542788", "#8073ac", "#b2abd2", "#d8daeb", "#f7f7f7", "#fee0b6", "#fdb863", "#e08214", "#b35806", "#7f3b08"]
BrBG3 = ["#5ab4ac", "#f5f5f5", "#d8b365"]
BrBG4 = ["#018571", "#80cdc1", "#dfc27d", "#a6611a"]
BrBG5 = ["#018571", "#80cdc1", "#f5f5f5", "#dfc27d", "#a6611a"]
BrBG6 = ["#01665e", "#5ab4ac", "#c7eae5", "#f6e8c3", "#d8b365", "#8c510a"]
BrBG7 = ["#01665e", "#5ab4ac", "#c7eae5", "#f5f5f5", "#f6e8c3", "#d8b365", "#8c510a"]
BrBG8 = ["#01665e", "#35978f", "#80cdc1", "#c7eae5", "#f6e8c3", "#dfc27d", "#bf812d", "#8c510a"]
BrBG9 = ["#01665e", "#35978f", "#80cdc1", "#c7eae5", "#f5f5f5", "#f6e8c3", "#dfc27d", "#bf812d", "#8c510a"]
BrBG10 = ["#003c30", "#01665e", "#35978f", "#80cdc1", "#c7eae5", "#f6e8c3", "#dfc27d", "#bf812d", "#8c510a", "#543005"]
BrBG11 = ["#003c30", "#01665e", "#35978f", "#80cdc1", "#c7eae5", "#f5f5f5", "#f6e8c3", "#dfc27d", "#bf812d", "#8c510a", "#543005"]
PRGn3 = ["#7fbf7b", "#f7f7f7", "#af8dc3"]
PRGn4 = ["#008837", "#a6dba0", "#c2a5cf", "#7b3294"]
PRGn5 = ["#008837", "#a6dba0", "#f7f7f7", "#c2a5cf", "#7b3294"]
PRGn6 = ["#1b7837", "#7fbf7b", "#d9f0d3", "#e7d4e8", "#af8dc3", "#762a83"]
PRGn7 = ["#1b7837", "#7fbf7b", "#d9f0d3", "#f7f7f7", "#e7d4e8", "#af8dc3", "#762a83"]
PRGn8 = ["#1b7837", "#5aae61", "#a6dba0", "#d9f0d3", "#e7d4e8", "#c2a5cf", "#9970ab", "#762a83"]
PRGn9 = ["#1b7837", "#5aae61", "#a6dba0", "#d9f0d3", "#f7f7f7", "#e7d4e8", "#c2a5cf", "#9970ab", "#762a83"]
PRGn10 = ["#00441b", "#1b7837", "#5aae61", "#a6dba0", "#d9f0d3", "#e7d4e8", "#c2a5cf", "#9970ab", "#762a83", "#40004b"]
PRGn11 = ["#00441b", "#1b7837", "#5aae61", "#a6dba0", "#d9f0d3", "#f7f7f7", "#e7d4e8", "#c2a5cf", "#9970ab", "#762a83", "#40004b"]
PiYG3 = ["#a1d76a", "#f7f7f7", "#e9a3c9"]
PiYG4 = ["#4dac26", "#b8e186", "#f1b6da", "#d01c8b"]
PiYG5 = ["#4dac26", "#b8e186", "#f7f7f7", "#f1b6da", "#d01c8b"]
PiYG6 = ["#4d9221", "#a1d76a", "#e6f5d0", "#fde0ef", "#e9a3c9", "#c51b7d"]
PiYG7 = ["#4d9221", "#a1d76a", "#e6f5d0", "#f7f7f7", "#fde0ef", "#e9a3c9", "#c51b7d"]
PiYG8 = ["#4d9221", "#7fbc41", "#b8e186", "#e6f5d0", "#fde0ef", "#f1b6da", "#de77ae", "#c51b7d"]
PiYG9 = ["#4d9221", "#7fbc41", "#b8e186", "#e6f5d0", "#f7f7f7", "#fde0ef", "#f1b6da", "#de77ae", "#c51b7d"]
PiYG10 = ["#276419", "#4d9221", "#7fbc41", "#b8e186", "#e6f5d0", "#fde0ef", "#f1b6da", "#de77ae", "#c51b7d", "#8e0152"]
PiYG11 = ["#276419", "#4d9221", "#7fbc41", "#b8e186", "#e6f5d0", "#f7f7f7", "#fde0ef", "#f1b6da", "#de77ae", "#c51b7d", "#8e0152"]
RdBu3 = ["#67a9cf", "#f7f7f7", "#ef8a62"]
RdBu4 = ["#0571b0", "#92c5de", "#f4a582", "#ca0020"]
RdBu5 = ["#0571b0", "#92c5de", "#f7f7f7", "#f4a582", "#ca0020"]
RdBu6 = ["#2166ac", "#67a9cf", "#d1e5f0", "#fddbc7", "#ef8a62", "#b2182b"]
RdBu7 = ["#2166ac", "#67a9cf", "#d1e5f0", "#f7f7f7", "#fddbc7", "#ef8a62", "#b2182b"]
RdBu8 = ["#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#fddbc7", "#f4a582", "#d6604d", "#b2182b"]
RdBu9 = ["#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b"]
RdBu10 = ["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
RdBu11 = ["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
RdGy3 = ["#999999", "#ffffff", "#ef8a62"]
RdGy4 = ["#404040", "#bababa", "#f4a582", "#ca0020"]
RdGy5 = ["#404040", "#bababa", "#ffffff", "#f4a582", "#ca0020"]
RdGy6 = ["#4d4d4d", "#999999", "#e0e0e0", "#fddbc7", "#ef8a62", "#b2182b"]
RdGy7 = ["#4d4d4d", "#999999", "#e0e0e0", "#ffffff", "#fddbc7", "#ef8a62", "#b2182b"]
RdGy8 = ["#4d4d4d", "#878787", "#bababa", "#e0e0e0", "#fddbc7", "#f4a582", "#d6604d", "#b2182b"]
RdGy9 = ["#4d4d4d", "#878787", "#bababa", "#e0e0e0", "#ffffff", "#fddbc7", "#f4a582", "#d6604d", "#b2182b"]
RdGy10 = ["#1a1a1a", "#4d4d4d", "#878787", "#bababa", "#e0e0e0", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
RdGy11 = ["#1a1a1a", "#4d4d4d", "#878787", "#bababa", "#e0e0e0", "#ffffff", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
RdYlBu3 = ["#91bfdb", "#ffffbf", "#fc8d59"]
RdYlBu4 = ["#2c7bb6", "#abd9e9", "#fdae61", "#d7191c"]
RdYlBu5 = ["#2c7bb6", "#abd9e9", "#ffffbf", "#fdae61", "#d7191c"]
RdYlBu6 = ["#4575b4", "#91bfdb", "#e0f3f8", "#fee090", "#fc8d59", "#d73027"]
RdYlBu7 = ["#4575b4", "#91bfdb", "#e0f3f8", "#ffffbf", "#fee090", "#fc8d59", "#d73027"]
RdYlBu8 = ["#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#fee090", "#fdae61", "#f46d43", "#d73027"]
RdYlBu9 = ["#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#ffffbf", "#fee090", "#fdae61", "#f46d43", "#d73027"]
RdYlBu10 = ["#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#fee090", "#fdae61", "#f46d43", "#d73027", "#a50026"]
RdYlBu11 = ["#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#ffffbf", "#fee090", "#fdae61", "#f46d43", "#d73027", "#a50026"]
Spectral3 = ["#99d594", "#ffffbf", "#fc8d59"]
Spectral4 = ["#2b83ba", "#abdda4", "#fdae61", "#d7191c"]
Spectral5 = ["#2b83ba", "#abdda4", "#ffffbf", "#fdae61", "#d7191c"]
Spectral6 = ["#3288bd", "#99d594", "#e6f598", "#fee08b", "#fc8d59", "#d53e4f"]
Spectral7 = ["#3288bd", "#99d594", "#e6f598", "#ffffbf", "#fee08b", "#fc8d59", "#d53e4f"]
Spectral8 = ["#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43", "#d53e4f"]
Spectral9 = ["#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#ffffbf", "#fee08b", "#fdae61", "#f46d43", "#d53e4f"]
Spectral10 = ["#5e4fa2", "#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43", "#d53e4f", "#9e0142"]
Spectral11 = ["#5e4fa2", "#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#ffffbf", "#fee08b", "#fdae61", "#f46d43", "#d53e4f", "#9e0142"]
RdYlGn3 = ["#91cf60", "#ffffbf", "#fc8d59"]
RdYlGn4 = ["#1a9641", "#a6d96a", "#fdae61", "#d7191c"]
RdYlGn5 = ["#1a9641", "#a6d96a", "#ffffbf", "#fdae61", "#d7191c"]
RdYlGn6 = ["#1a9850", "#91cf60", "#d9ef8b", "#fee08b", "#fc8d59", "#d73027"]
RdYlGn7 = ["#1a9850", "#91cf60", "#d9ef8b", "#ffffbf", "#fee08b", "#fc8d59", "#d73027"]
RdYlGn8 = ["#1a9850", "#66bd63", "#a6d96a", "#d9ef8b", "#fee08b", "#fdae61", "#f46d43", "#d73027"]
RdYlGn9 = ["#1a9850", "#66bd63", "#a6d96a", "#d9ef8b", "#ffffbf", "#fee08b", "#fdae61", "#f46d43", "#d73027"]
RdYlGn10 = ["#006837", "#1a9850", "#66bd63", "#a6d96a", "#d9ef8b", "#fee08b", "#fdae61", "#f46d43", "#d73027", "#a50026"]
RdYlGn11 = ["#006837", "#1a9850", "#66bd63", "#a6d96a", "#d9ef8b", "#ffffbf", "#fee08b", "#fdae61", "#f46d43", "#d73027", "#a50026"]
__palettes__ = [
"YlGn3", "YlGn4", "YlGn5", "YlGn6", "YlGn7", "YlGn8", "YlGn9",
"YlGnBu3", "YlGnBu4", "YlGnBu5", "YlGnBu6", "YlGnBu7", "YlGnBu8", "YlGnBu9",
"GnBu3", "GnBu4", "GnBu5", "GnBu6", "GnBu7", "GnBu8", "GnBu9",
"BuGn3", "BuGn4", "BuGn5", "BuGn6", "BuGn7", "BuGn8", "BuGn9",
"PuBuGn3", "PuBuGn4", "PuBuGn5", "PuBuGn6", "PuBuGn7", "PuBuGn8", "PuBuGn9",
"PuBu3", "PuBu4", "PuBu5", "PuBu6", "PuBu7", "PuBu8", "PuBu9",
"BuPu3", "BuPu4", "BuPu5", "BuPu6", "BuPu7", "BuPu8", "BuPu9",
"RdPu3", "RdPu4", "RdPu5", "RdPu6", "RdPu7", "RdPu8", "RdPu9",
"PuRd3", "PuRd4", "PuRd5", "PuRd6", "PuRd7", "PuRd8", "PuRd9",
"OrRd3", "OrRd4", "OrRd5", "OrRd6", "OrRd7", "OrRd8", "OrRd9",
"YlOrRd3", "YlOrRd4", "YlOrRd5", "YlOrRd6", "YlOrRd7", "YlOrRd8", "YlOrRd9",
"YlOrBr3", "YlOrBr4", "YlOrBr5", "YlOrBr6", "YlOrBr7", "YlOrBr8", "YlOrBr9",
"Purples3", "Purples4", "Purples5", "Purples6", "Purples7", "Purples8", "Purples9",
"Blues3", "Blues4", "Blues5", "Blues6", "Blues7", "Blues8", "Blues9",
"Greens3", "Greens4", "Greens5", "Greens6", "Greens7", "Greens8", "Greens9",
"Oranges3", "Oranges4", "Oranges5", "Oranges6", "Oranges7", "Oranges8", "Oranges9",
"Reds3", "Reds4", "Reds5", "Reds6", "Reds7", "Reds8", "Reds9",
"Greys3", "Greys4", "Greys5", "Greys6", "Greys7", "Greys8", "Greys9",
"PuOr3", "PuOr4", "PuOr5", "PuOr6", "PuOr7", "PuOr8", "PuOr9", "PuOr10", "PuOr11",
"BrBG3", "BrBG4", "BrBG5", "BrBG6", "BrBG7", "BrBG8", "BrBG9", "BrBG10", "BrBG11",
"PRGn3", "PRGn4", "PRGn5", "PRGn6", "PRGn7", "PRGn8", "PRGn9", "PRGn10", "PRGn11",
"PiYG3", "PiYG4", "PiYG5", "PiYG6", "PiYG7", "PiYG8", "PiYG9", "PiYG10", "PiYG11",
"RdBu3", "RdBu4", "RdBu5", "RdBu6", "RdBu7", "RdBu8", "RdBu9", "RdBu10", "RdBu11",
"RdGy3", "RdGy4", "RdGy5", "RdGy6", "RdGy7", "RdGy8", "RdGy9", "RdGy10", "RdGy11",
"RdYlBu3", "RdYlBu4", "RdYlBu5", "RdYlBu6", "RdYlBu7", "RdYlBu8", "RdYlBu9", "RdYlBu10", "RdYlBu11",
"Spectral3", "Spectral4", "Spectral5", "Spectral6", "Spectral7", "Spectral8", "Spectral9", "Spectral10", "Spectral11",
"RdYlGn3", "RdYlGn4", "RdYlGn5", "RdYlGn6", "RdYlGn7", "RdYlGn8", "RdYlGn9", "RdYlGn10", "RdYlGn11",
]
brewer = {
"YlGn" : { 3: YlGn3, 4: YlGn4, 5: YlGn5, 6: YlGn6, 7: YlGn7, 8: YlGn8, 9: YlGn9 },
"YlGnBu" : { 3: YlGnBu3, 4: YlGnBu4, 5: YlGnBu5, 6: YlGnBu6, 7: YlGnBu7, 8: YlGnBu8, 9: YlGnBu9 },
"GnBu" : { 3: GnBu3, 4: GnBu4, 5: GnBu5, 6: GnBu6, 7: GnBu7, 8: GnBu8, 9: GnBu9 },
"BuGn" : { 3: BuGn3, 4: BuGn4, 5: BuGn5, 6: BuGn6, 7: BuGn7, 8: BuGn8, 9: BuGn9 },
"PuBuGn" : { 3: PuBuGn3, 4: PuBuGn4, 5: PuBuGn5, 6: PuBuGn6, 7: PuBuGn7, 8: PuBuGn8, 9: PuBuGn9 },
"PuBu" : { 3: PuBu3, 4: PuBu4, 5: PuBu5, 6: PuBu6, 7: PuBu7, 8: PuBu8, 9: PuBu9 },
"BuPu" : { 3: BuPu3, 4: BuPu4, 5: BuPu5, 6: BuPu6, 7: BuPu7, 8: BuPu8, 9: BuPu9 },
"RdPu" : { 3: RdPu3, 4: RdPu4, 5: RdPu5, 6: RdPu6, 7: RdPu7, 8: RdPu8, 9: RdPu9 },
"PuRd" : { 3: PuRd3, 4: PuRd4, 5: PuRd5, 6: PuRd6, 7: PuRd7, 8: PuRd8, 9: PuRd9 },
"OrRd" : { 3: OrRd3, 4: OrRd4, 5: OrRd5, 6: OrRd6, 7: OrRd7, 8: OrRd8, 9: OrRd9 },
"YlOrRd" : { 3: YlOrRd3, 4: YlOrRd4, 5: YlOrRd5, 6: YlOrRd6, 7: YlOrRd7, 8: YlOrRd8, 9: YlOrRd9 },
"YlOrBr" : { 3: YlOrBr3, 4: YlOrBr4, 5: YlOrBr5, 6: YlOrBr6, 7: YlOrBr7, 8: YlOrBr8, 9: YlOrBr9 },
"Purples" : { 3: Purples3, 4: Purples4, 5: Purples5, 6: Purples6, 7: Purples7, 8: Purples8, 9: Purples9 },
"Blues" : { 3: Blues3, 4: Blues4, 5: Blues5, 6: Blues6, 7: Blues7, 8: Blues8, 9: Blues9 },
"Greens" : { 3: Greens3, 4: Greens4, 5: Greens5, 6: Greens6, 7: Greens7, 8: Greens8, 9: Greens9 },
"Oranges" : { 3: Oranges3, 4: Oranges4, 5: Oranges5, 6: Oranges6, 7: Oranges7, 8: Oranges8, 9: Oranges9 },
"Reds" : { 3: Reds3, 4: Reds4, 5: Reds5, 6: Reds6, 7: Reds7, 8: Reds8, 9: Reds9 },
"Greys" : { 3: Greys3, 4: Greys4, 5: Greys5, 6: Greys6, 7: Greys7, 8: Greys8, 9: Greys9 },
"PuOr" : { 3: PuOr3, 4: PuOr4, 5: PuOr5, 6: PuOr6, 7: PuOr7, 8: PuOr8, 9: PuOr9, 10: PuOr10, 11: PuOr11 },
"BrBG" : { 3: BrBG3, 4: BrBG4, 5: BrBG5, 6: BrBG6, 7: BrBG7, 8: BrBG8, 9: BrBG9, 10: BrBG10, 11: BrBG11 },
"PRGn" : { 3: PRGn3, 4: PRGn4, 5: PRGn5, 6: PRGn6, 7: PRGn7, 8: PRGn8, 9: PRGn9, 10: PRGn10, 11: PRGn11 },
"PiYG" : { 3: PiYG3, 4: PiYG4, 5: PiYG5, 6: PiYG6, 7: PiYG7, 8: PiYG8, 9: PiYG9, 10: PiYG10, 11: PiYG11 },
"RdBu" : { 3: RdBu3, 4: RdBu4, 5: RdBu5, 6: RdBu6, 7: RdBu7, 8: RdBu8, 9: RdBu9, 10: RdBu10, 11: RdBu11 },
"RdGy" : { 3: RdGy3, 4: RdGy4, 5: RdGy5, 6: RdGy6, 7: RdGy7, 8: RdGy8, 9: RdGy9, 10: RdGy10, 11: RdGy11 },
"RdYlBu" : { 3: RdYlBu3, 4: RdYlBu4, 5: RdYlBu5, 6: RdYlBu6, 7: RdYlBu7, 8: RdYlBu8, 9: RdYlBu9, 10: RdYlBu10, 11: RdYlBu11 },
"Spectral" : { 3: Spectral3, 4: Spectral4, 5: Spectral5, 6: Spectral6, 7: Spectral7, 8: Spectral8, 9: Spectral9, 10: Spectral10, 11: Spectral11 },
"RdYlGn" : { 3: RdYlGn3, 4: RdYlGn4, 5: RdYlGn5, 6: RdYlGn6, 7: RdYlGn7, 8: RdYlGn8, 9: RdYlGn9, 10: RdYlGn10, 11: RdYlGn11 },
}
| bsd-3-clause |
basimr/snoop-dogg-number | filter_graph_by_sdn.py | 1 | 1253 | #!/usr/bin/python2
# TODO: Add description.
import psycopg2
import networkx as nx
# TODO: Create a class for storing artists' SDN and path to avoid doing this.
SDN = 0
PATH = 1
# Load graph from disk
graph = nx.read_gexf("graph/sdn-unweighted.gexf")
# Initialize dictionary with the Snoop Dogg as the base case
artists = {"Snoop Dogg" : (0, ["Snoop Dogg"])}
# Traverse the graph breadth-first and compute every artist's Snoop Dogg Number in O(V + E)
for edge in nx.bfs_edges(graph, "Snoop Dogg"):
parent = edge[0]
child = edge[1]
dist_to_snoopdogg = artists[parent][SDN] + 1
path_to_snoopdogg = artists[parent][PATH] + [child]
artists[child] = (dist_to_snoopdogg, path_to_snoopdogg)
# Remove artists far from Snoop Dogg and save a separate graph for each iteration
# TODO: Can I use comprehensions to simplify these loops?
for sdn in [5, 4, 3, 2, 1]:
distant_artists = []
for a in artists:
if artists[a][SDN] > sdn:
distant_artists.append(a)
for a in distant_artists:
del artists[a]
graph.remove_node(a)
filename = "graph/sdn-" + str(sdn) + ".gexf"
nx.write_gexf(graph, filename)
print("Wrote graph of artists with SDN of " + sdn + " or less at " + filename)
print(nx.info(graph))
| mit |
kurokid/connme | connme/connme.py | 1 | 2726 | #!/usr/bin/env python2
import sip
sip.setapi('QString', 2)
from PyQt4 import QtGui, QtCore, QtNetwork
from connmeMain import connme
import sys,os
class SingleApplicationWithMessaging(QtGui.QApplication):
messageAvailable = QtCore.pyqtSignal(object)
def __init__(self, argv, key):
QtGui.QApplication.__init__(self, argv)
self._key = key
self._memory = QtCore.QSharedMemory(self)
self._memory.setKey(self._key)
if self._memory.attach():
self._running = True
else:
self._running = False
if not self._memory.create(1):
raise RuntimeError(self._memory.errorString())
self._timeout = 1000
self._server = QtNetwork.QLocalServer(self)
if not self.isRunning():
self._server.newConnection.connect(self.handleMessage)
self._server.listen(self._key)
def handleMessage(self):
socket = self._server.nextPendingConnection()
if socket.waitForReadyRead(self._timeout):
self.messageAvailable.emit(
socket.readAll().data().decode('utf-8'))
socket.disconnectFromServer()
else:
QtCore.qDebug(socket.errorString())
def isRunning(self):
return self._running
def sendMessage(self, message):
if self.isRunning():
socket = QtNetwork.QLocalSocket(self)
socket.connectToServer(self._key, QtCore.QIODevice.WriteOnly)
if not socket.waitForConnected(self._timeout):
print(socket.errorString())
return False
if not isinstance(message, bytes):
message = message.encode('utf-8')
socket.write(message)
if not socket.waitForBytesWritten(self._timeout):
print(socket.errorString())
return False
socket.disconnectFromServer()
return True
return False
def main():
key = 'connme'
app = SingleApplicationWithMessaging(sys.argv, key)
if app.isRunning():
app.sendMessage(' '.join(sys.argv[1:]))
sys.exit(1)
gui = connme()
gui.address = os.path.realpath(__file__)
app.messageAvailable.connect(gui.processClient)
gui.showGui()
sys.exit(app.exec_())
if __name__ == '__main__':
euid = os.geteuid()
os.chdir(sys.path[0])
if euid != 0:
if os.path.exists("/usr/bin/gksu"):
args = ['gksu', sys.executable] + sys.argv + [os.environ]
os.execlpe('gksu', *args)
elif os.path.exists("/usr/bin/kdesudo"):
args = ['kdesudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('kdesudo', *args)
main() | gpl-3.0 |
gooddata/openstack-nova | nova/tests/functional/notification_sample_tests/test_exception_notification.py | 4 | 1803 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client as api_client
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestExceptionNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def test_versioned_exception_notification_with_correct_params(
self):
post = {
"aggregate": {
"name": "versioned_exc_aggregate",
"availability_zone": "nova"
}
}
self.admin_api.api_post('os-aggregates', post)
# recreating the aggregate raises exception
self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_post, 'os-aggregates', post)
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
traceback = fake_notifier.VERSIONED_NOTIFICATIONS[3][
'payload']['nova_object.data']['traceback']
self.assertIn('AggregateNameExists', traceback)
self._verify_notification(
'compute-exception',
replacements={
'traceback': self.ANY},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
| apache-2.0 |
BaseBot/Triangula | src/python/triangula/navigation.py | 1 | 1101 | class TaskWaypoint:
"""
Consists of a target Pose defining a location and orientation, and a Task which should be run when the robot reaches
the target position. The task can be None, in which case the robot won't attempt to do anything at the target point.
"""
def __init__(self, pose, task=None, stop=False):
"""
Constructor
:param triangula.chassis.Pose pose:
The target Pose, defining the location and orientation of this waypoint
:param triangula.task.Task task:
A Task to run when the target point is reached. The task will be run until a non-None value is returned from
the poll method. Defaults to None, in which case no task will be invoked and the robot will proceed
immediately to the next waypoint.
:param stop:
Defaults to False, if this is set to True then the robot will come to a complete stop before either running
the sub-task or proceeding to the next waypoint.
"""
self.pose = pose
self.task = task
self.stop = stop | apache-2.0 |
ehashman/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/test_strava.py | 87 | 1882 | import json
from social.tests.backends.oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = '227615'
access_token_body = json.dumps({
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 227615,
"resource_state": 3,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "California",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "null",
"premium": "true",
"created_at": "2008-01-01T17:44:00Z",
"updated_at": "2013-09-04T20:00:50Z",
"follower_count": 273,
"friend_count": 19,
"mutual_friend_count": 0,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"email": "john@applestrava.com",
"clubs": [],
"bikes": [],
"shoes": []
}
})
user_data_body = json.dumps({
"id": 227615,
"resource_state": 2,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "CA",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "accepted",
"premium": "true",
"created_at": "2011-03-19T21:59:57Z",
"updated_at": "2013-09-05T16:46:54Z",
"approve_followers": "false"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| agpl-3.0 |
nathanaevitas/odoo | openerp/addons/account_analytic_analysis/__init__.py | 425 | 1107 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_analysis
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/gnupg/package.py | 5 | 2213 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gnupg(AutotoolsPackage):
"""GnuPG is a complete and free implementation of the OpenPGP
standard as defined by RFC4880 """
homepage = "https://gnupg.org/index.html"
url = "https://gnupg.org/ftp/gcrypt/gnupg/gnupg-2.2.3.tar.bz2"
version('2.2.3', '6911c0127e4231ce52d60f26029dba68')
version('2.1.21', '685ebf4c3a7134ba0209c96b18b2f064')
depends_on('libgcrypt')
depends_on('libassuan')
depends_on('libksba')
depends_on('libgpg-error')
depends_on('npth')
def configure_args(self):
args = ['--with-npth-prefix=%s' % self.spec['npth'].prefix,
'--with-libgcrypt-prefix=%s' % self.spec['libgcrypt'].prefix,
'--with-libksba-prefixx=%s' % self.spec['libksba'].prefix,
'--with-libassuan-prefix=%s' % self.spec['libassuan'].prefix,
'--with-libpgp-error-prefix=%s' %
self.spec['libgpg-error'].prefix]
return args
| lgpl-2.1 |
cliqz/socorro | socorro/unittest/collector/test_collector_app.py | 10 | 1977 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import eq_
from socorro.collector.collector_app import CollectorApp
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.unittest.testbase import TestCase
from configman.dotdict import DotDict
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.throttler = DotDict()
self.mocked_throttler = mock.MagicMock()
config.throttler.throttler_class = mock.MagicMock(
return_value=self.mocked_throttler)
config.storage = mock.MagicMock()
self.mocked_crash_storage = mock.MagicMock()
config.storage.crashstorage_class = mock.MagicMock(
return_value=self.mocked_crash_storage
)
config.web_server = mock.MagicMock()
self.mocked_web_server = mock.MagicMock()
config.web_server.wsgi_server_class = mock.MagicMock(
return_value=self.mocked_web_server
)
return config
def test_main(self):
config = self.get_standard_config()
c = CollectorApp(config)
c.main()
eq_(config.crash_storage, self.mocked_crash_storage)
eq_(config.throttler, self.mocked_throttler)
eq_(c.web_server, self.mocked_web_server)
config.storage.crashstorage_class.assert_called_with(config.storage)
config.web_server.wsgi_server_class.assert_called_with(
config,
(BreakpadCollector, )
)
| mpl-2.0 |
mcollins12321/anita | venv/lib/python2.7/site-packages/flask/exthook.py | 783 | 5087 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
| mit |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py | 342 | 13546 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
)
| gpl-3.0 |
osstech-jp/samba | python/samba/__init__.py | 17 | 12439 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba 4."""
__docformat__ = "restructuredText"
import os
import sys
import time
import samba.param
def source_tree_topdir():
"""Return the top level source directory."""
paths = ["../../..", "../../../.."]
for p in paths:
topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
if os.path.exists(os.path.join(topdir, 'source4')):
return topdir
raise RuntimeError("unable to find top level source directory")
def in_source_tree():
"""Return True if we are running from within the samba source tree"""
try:
topdir = source_tree_topdir()
except RuntimeError:
return False
return True
import ldb
from samba._ldb import Ldb as _Ldb
class Ldb(_Ldb):
"""Simple Samba-specific LDB subclass that takes care
of setting up the modules dir, credentials pointers, etc.
Please note that this is intended to be for all Samba LDB files,
not necessarily the Sam database. For Sam-specific helper
functions see samdb.py.
"""
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens a Samba Ldb file.
:param url: Optional LDB URL to open
:param lp: Optional loadparm object
:param modules_dir: Optional modules directory
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param options: Additional options (optional)
This is different from a regular Ldb file in that the Samba-specific
modules-dir is used by default and that credentials and session_info
can be passed through (required by some modules).
"""
if modules_dir is not None:
self.set_modules_dir(modules_dir)
else:
self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
if session_info is not None:
self.set_session_info(session_info)
if credentials is not None:
self.set_credentials(credentials)
if lp is not None:
self.set_loadparm(lp)
# This must be done before we load the schema, as these handlers for
# objectSid and objectGUID etc must take precedence over the 'binary
# attribute' declaration in the schema
self.register_samba_handlers()
# TODO set debug
def msg(l, text):
print text
#self.set_debug(msg)
self.set_utf8_casefold()
# Allow admins to force non-sync ldb for all databases
if lp is not None:
nosync_p = lp.get("nosync", "ldb")
if nosync_p is not None and nosync_p:
flags |= ldb.FLG_NOSYNC
self.set_create_perms(0600)
if url is not None:
self.connect(url, flags, options)
def searchone(self, attribute, basedn=None, expression=None,
scope=ldb.SCOPE_BASE):
"""Search for one attribute as a string.
:param basedn: BaseDN for the search.
:param attribute: Name of the attribute
:param expression: Optional search expression.
:param scope: Search scope (defaults to base).
:return: Value of attribute as a string or None if it wasn't found.
"""
res = self.search(basedn, scope, expression, [attribute])
if len(res) != 1 or res[0][attribute] is None:
return None
values = set(res[0][attribute])
assert len(values) == 1
return self.schema_format_value(attribute, values.pop())
def erase_users_computers(self, dn):
"""Erases user and computer objects from our AD.
This is needed since the 'samldb' module denies the deletion of primary
groups. Therefore all groups shouldn't be primary somewhere anymore.
"""
try:
res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
expression="(|(objectclass=user)(objectclass=computer))")
except ldb.LdbError, (errno, _):
if errno == ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
return
else:
raise
try:
for msg in res:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
def erase_except_schema_controlled(self):
"""Erase this ldb.
:note: Removes all records, except those that are controlled by
Samba4's schema.
"""
basedn = ""
# Try to delete user/computer accounts to allow deletion of groups
self.erase_users_computers(basedn)
# Delete the 'visible' records, and the invisble 'deleted' records (if
# this DB supports it)
for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"]):
try:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
res = self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"])
assert len(res) == 0
# delete the specials
for attr in ["@SUBCLASSES", "@MODULES",
"@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def erase(self):
"""Erase this ldb, removing all records."""
self.erase_except_schema_controlled()
# delete the specials
for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def load_ldif_file_add(self, ldif_path):
"""Load a LDIF file.
:param ldif_path: Path to LDIF file.
"""
self.add_ldif(open(ldif_path, 'r').read())
def add_ldif(self, ldif, controls=None):
"""Add data based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
assert changetype == ldb.CHANGETYPE_NONE
self.add(msg, controls)
def modify_ldif(self, ldif, controls=None):
"""Modify database based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
if changetype == ldb.CHANGETYPE_ADD:
self.add(msg, controls)
else:
self.modify(msg, controls)
def substitute_var(text, values):
"""Substitute strings of the form ${NAME} in str, replacing
with substitutions from values.
:param text: Text in which to subsitute.
:param values: Dictionary with keys and values.
"""
for (name, value) in values.items():
assert isinstance(name, str), "%r is not a string" % name
assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
text = text.replace("${%s}" % name, value)
return text
def check_all_substituted(text):
"""Check that all substitution variables in a string have been replaced.
If not, raise an exception.
:param text: The text to search for substitution variables
"""
if not "${" in text:
return
var_start = text.find("${")
var_end = text.find("}", var_start)
raise Exception("Not all variables substituted: %s" %
text[var_start:var_end+1])
def read_and_sub_file(file_name, subst_vars):
"""Read a file and sub in variables found in it
:param file_name: File to be read (typically from setup directory)
param subst_vars: Optional variables to subsitute in the file.
"""
data = open(file_name, 'r').read()
if subst_vars is not None:
data = substitute_var(data, subst_vars)
check_all_substituted(data)
return data
def setup_file(template, fname, subst_vars=None):
"""Setup a file in the private dir.
:param template: Path of the template file.
:param fname: Path of the file to create.
:param subst_vars: Substitution variables.
"""
if os.path.exists(fname):
os.unlink(fname)
data = read_and_sub_file(template, subst_vars)
f = open(fname, 'w')
try:
f.write(data)
finally:
f.close()
MAX_NETBIOS_NAME_LEN = 15
def is_valid_netbios_char(c):
return (c.isalnum() or c in " !#$%&'()-.@^_{}~")
def valid_netbios_name(name):
"""Check whether a name is valid as a NetBIOS name. """
# See crh's book (1.4.1.1)
if len(name) > MAX_NETBIOS_NAME_LEN:
return False
for x in name:
if not is_valid_netbios_char(x):
return False
return True
def import_bundled_package(modulename, location, source_tree_container,
namespace):
"""Import the bundled version of a package.
:note: This should only be called if the system version of the package
is not adequate.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/${source_tree_container})
:param source_tree_container: Directory under source root that
contains the bundled third party modules.
:param namespace: Namespace to import module from, when not in source tree
"""
if in_source_tree():
extra_path = os.path.join(source_tree_topdir(), source_tree_container,
location)
if not extra_path in sys.path:
sys.path.insert(0, extra_path)
sys.modules[modulename] = __import__(modulename)
else:
sys.modules[modulename] = __import__(
"%s.%s" % (namespace, modulename), fromlist=[namespace])
def ensure_third_party_module(modulename, location):
"""Add a location to sys.path if a third party dependency can't be found.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/third_party)
"""
try:
__import__(modulename)
except ImportError:
import_bundled_package(modulename, location,
source_tree_container="third_party",
namespace="samba.third_party")
def dn_from_dns_name(dnsdomain):
"""return a DN from a DNS name domain/forest root"""
return "DC=" + ",DC=".join(dnsdomain.split("."))
def current_unix_time():
return int(time.time())
import _glue
version = _glue.version
interface_ips = _glue.interface_ips
set_debug_level = _glue.set_debug_level
get_debug_level = _glue.get_debug_level
unix2nttime = _glue.unix2nttime
nttime2string = _glue.nttime2string
nttime2unix = _glue.nttime2unix
unix2nttime = _glue.unix2nttime
generate_random_password = _glue.generate_random_password
strcasecmp_m = _glue.strcasecmp_m
strstr_m = _glue.strstr_m
| gpl-3.0 |
hoatle/odoo | addons/website_mail/controllers/main.py | 149 | 4279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
class WebsiteMail(http.Controller):
@http.route(['/website_mail/follow'], type='json', auth="public", website=True)
def website_message_subscribe(self, id=0, object=None, message_is_follower="on", email=False, **post):
cr, uid, context = request.cr, request.uid, request.context
partner_obj = request.registry['res.partner']
user_obj = request.registry['res.users']
_id = int(id)
_message_is_follower = message_is_follower == 'on'
_object = request.registry[object]
# search partner_id
public_id = request.website.user_id.id
if uid != public_id:
partner_ids = [user_obj.browse(cr, uid, uid, context).partner_id.id]
else:
# mail_thread method
partner_ids = _object._find_partner_from_emails(
cr, SUPERUSER_ID, _id, [email], context=context, check_followers=True)
if not partner_ids or not partner_ids[0]:
name = email.split('@')[0]
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': name, 'email': email}, context=context)]
# add or remove follower
if _message_is_follower:
_object.check_access_rule(cr, uid, [_id], 'read', context)
_object.message_unsubscribe(cr, SUPERUSER_ID, [_id], partner_ids, context=context)
return False
else:
_object.check_access_rule(cr, uid, [_id], 'read', context)
# add partner to session
request.session['partner_id'] = partner_ids[0]
_object.message_subscribe(cr, SUPERUSER_ID, [_id], partner_ids, context=context)
return True
@http.route(['/website_mail/is_follower'], type='json', auth="public", website=True)
def call(self, model, id, **post):
id = int(id)
cr, uid, context = request.cr, request.uid, request.context
partner_obj = request.registry.get('res.partner')
users_obj = request.registry.get('res.users')
obj = request.registry.get(model)
partner_id = None
public_id = request.website.user_id.id
if uid != public_id:
partner_id = users_obj.browse(cr, SUPERUSER_ID, uid, context).partner_id
elif request.session.get('partner_id'):
partner_id = partner_obj.browse(cr, SUPERUSER_ID, request.session.get('partner_id'), context)
email = partner_id and partner_id.email or ""
values = {
'is_user': uid != public_id,
'email': email,
'is_follower': False,
'alias_name': False,
}
if not obj:
return values
obj_ids = obj.exists(cr, SUPERUSER_ID, [id], context=context)
if obj_ids:
if partner_id:
values['is_follower'] = len(
request.registry['mail.followers'].search(
cr, SUPERUSER_ID, [
('res_model', '=', model),
('res_id', '=', obj_ids[0]),
('partner_id', '=', partner_id.id)
], context=context)) == 1
return values
| agpl-3.0 |
ArcherSys/ArcherSys | Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| mit |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Zoho/Sheet/DownloadSpreadsheet.py | 5 | 4174 | # -*- coding: utf-8 -*-
###############################################################################
#
# DownloadSpreadsheet
# Downloads a specified spreadsheet in a user's Zoho Sheet Account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DownloadSpreadsheet(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DownloadSpreadsheet Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DownloadSpreadsheet, self).__init__(temboo_session, '/Library/Zoho/Sheet/DownloadSpreadsheet')
def new_input_set(self):
return DownloadSpreadsheetInputSet()
def _make_result_set(self, result, path):
return DownloadSpreadsheetResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DownloadSpreadsheetChoreographyExecution(session, exec_id, path)
class DownloadSpreadsheetInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DownloadSpreadsheet
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Zoho)
"""
super(DownloadSpreadsheetInputSet, self)._set_input('APIKey', value)
def set_BookId(self, value):
"""
Set the value of the BookId input for this Choreo. ((required, integer) Specifies the unique spreadsheet id to download.)
"""
super(DownloadSpreadsheetInputSet, self)._set_input('BookId', value)
def set_DownloadFormat(self, value):
"""
Set the value of the DownloadFormat input for this Choreo. ((required, string) Specifies the file format in which the documents need to be downloaded. Possible values for documents: xls, xlsx, ods, sxc, pdf, html, csv, tsv.)
"""
super(DownloadSpreadsheetInputSet, self)._set_input('DownloadFormat', value)
def set_LoginID(self, value):
"""
Set the value of the LoginID input for this Choreo. ((required, string) Your Zoho username (or login id))
"""
super(DownloadSpreadsheetInputSet, self)._set_input('LoginID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zoho password)
"""
super(DownloadSpreadsheetInputSet, self)._set_input('Password', value)
class DownloadSpreadsheetResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DownloadSpreadsheet Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Zoho. Corresponds to the DownloadFormat input.)
"""
return self._output.get('Response', None)
class DownloadSpreadsheetChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DownloadSpreadsheetResultSet(response, path)
| apache-2.0 |
ivanlai/Kaggle-Planet-Amazon | PyTorch_models.py | 1 | 5862 | # Reference and ideas from http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
from __future__ import print_function
import torch.nn as nn
import torchvision.models as models
import warnings
warnings.filterwarnings("ignore")
##################################################################
## PyTorch Model implementations in
## /usr/local/lib/python2.7/dist-packages/torchvision/models ##
##################################################################
def resnet18(num_classes, pretrained=True, freeze=False):
model = models.resnet18( pretrained=True)
if freeze:
model = freeze_all_layers(model)
# Parameters of newly constructed modules have requires_grad=True by default
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Resnet18'
def resnet34(num_classes, pretrained=True, freeze=False):
model = models.resnet34( pretrained=True)
if freeze:
model = freeze_all_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Resnet34'
def resnet50(num_classes, pretrained=True, freeze=False):
model = models.resnet50( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Resnet50'
def resnet101(num_classes, pretrained=True, freeze=False):
model = models.resnet101( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Resnet101'
def resnet152(num_classes, pretrained=True, freeze=False):
model = models.resnet152( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Resnet152'
##################################################################
def densenet121(num_classes, pretrained=True, freeze=False):
model = models.densenet121( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.classifier.in_features
model.classifier = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Densenet121'
def densenet161(num_classes, pretrained=True, freeze=False):
model = models.densenet161( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.classifier.in_features
model.classifier = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Densenet161'
def densenet169(num_classes, pretrained=True, freeze=False):
model = models.densenet169(pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.classifier.in_features
model.classifier = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Densenet169'
def densenet201(num_classes, pretrained=True, freeze=False):
model = models.densenet201( pretrained=pretrained)
if freeze:
model = freeze_all_layers(model)
num_features = model.classifier.in_features
model.classifier = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Densenet201'
##################################################################
def inception_v3(num_classes, pretrained=True, freeze=False):
model = models.inception_v3(pretrained=pretrained)
model.aux_logits = False
if freeze:
model = freeze_all_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, num_classes)
model = Add_Sigmoid(model)
return model, 'Inception_v3'
##################################################################
def vgg16(num_classes, pretrained=True, freeze=False):
# Credit: https://discuss.pytorch.org/t/how-to-perform-finetuning-in-pytorch/419/10
model = models.vgg16(pretrained=True)
if freeze:
model = freeze_all_layers(model)
mod = list(model.classifier.children())
mod.pop()
mod.append(nn.Linear(4096, 17))
new_classifier = nn.Sequential(*mod)
model.classifier = new_classifier
model = Add_Sigmoid(model)
return model, 'VGG16'
##################################################################
def vgg19(num_classes, pretrained=True, freeze=False):
# Credit: https://discuss.pytorch.org/t/how-to-perform-finetuning-in-pytorch/419/10
model = models.vgg19(pretrained=True)
if freeze:
model = freeze_all_layers(model)
mod = list(model.classifier.children())
mod.pop()
mod.append(nn.Linear(4096, 17))
new_classifier = nn.Sequential(*mod)
model.classifier = new_classifier
model = Add_Sigmoid(model)
return model, 'VGG19'
##################################################################
class Add_Sigmoid(nn.Module):
def __init__(self, pretrained_model):
super(Add_Sigmoid, self).__init__()
self.pretrained_model = pretrained_model
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.pretrained_model(x))
##################################################################
def freeze_all_layers(model):
#Freeze all layers except last during training (last layer training set to true when it get redefined)
for param in model.parameters():
param.requires_grad = False
return model
################################################################## | mit |
Dhivyap/ansible | lib/ansible/plugins/action/vyos.py | 7 | 4086 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.vyos.vyos import vyos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split('.')[-1]
self._config_module = True if module_name == 'vyos_config' else False
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith('#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit discard')
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
hi-ogawa/hiogawa-blog-ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/text.py | 197 | 68779 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt, ClassNotFound
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer',
'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer', 'EbnfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
*New in Pygments 1.6.*
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.*', String, '#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
*New in Pygments 1.4.*
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'\s+', Text),
(r'(?:[;#]|//).*$', Comment),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
],
}
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list', 'debsources']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Base Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9_]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([A-Za-z-]+:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
]
actions = [
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid\.conf',
]
actions_stats = [
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
]
actions_log = ["status", "enable", "disable", "clear"]
acls = [
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
]
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
def makelistre(list):
return r'\b(?:' + '|'.join(list) + r')\b'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control', 'debcontrol']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )(\S+)',
bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'}', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake', 'CMakeLists.txt']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
*New in Pygments 1.5.*
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
r'(HTTP)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Text),
(r"#.*?$", Comment),
],
}
class HxmlLexer(RegexLexer):
"""
Lexer for `haXe build <http://haxe.org/doc/compiler>`_ files.
*New in Pygments 1.6.*
"""
name = 'Hxml'
aliases = ['haxeml', 'hxml']
filenames = ['*.hxml']
tokens = {
'root': [
# Seperator
(r'(--)(next)', bygroups(Punctuation, Generic.Heading)),
# Compiler switches with one dash
(r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)),
# Compilerswitches with two dashes
(r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|'
r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)),
# Targets and other options that take an argument
(r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|'
r'cp|cmd)( +)(.+)',
bygroups(Punctuation, Keyword, Whitespace, String)),
# Options that take only numerical arguments
(r'(-)(swf-version)( +)(\d+)',
bygroups(Punctuation, Keyword, Number.Integer)),
# An Option that defines the size, the fps and the background
# color of an flash movie
(r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})',
bygroups(Punctuation, Keyword, Whitespace, Number.Integer,
Punctuation, Number.Integer, Punctuation, Number.Integer,
Punctuation, Number.Hex)),
# options with two dashes that takes arguments
(r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)'
r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)),
# Single line comment, multiline ones are not allowed.
(r'#.*', Comment.Single)
]
}
class EbnfLexer(RegexLexer):
"""
Lexer for `ISO/IEC 14977 EBNF
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
*New in Pygments 1.7.*
"""
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
tokens = {
'root': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'=', Operator, 'production'),
],
'production': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'"[^"]*"', String.Double),
(r"'[^']*'", String.Single),
(r'(\?[^?]*\?)', Name.Entity),
(r'[\[\]{}(),|]', Punctuation),
(r'-', Operator),
(r';', Punctuation, '#pop'),
],
'whitespace': [
(r'\s+', Text),
],
'comment_start': [
(r'\(\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
include('comment_start'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[*)]', Comment.Multiline),
],
'identifier': [
(r'([a-zA-Z][a-zA-Z0-9 \-]*)', Keyword),
],
}
| mit |
jfillmore/hoops | tests/models_tests/test_model_basekit_site.py | 1 | 4446 | from sqlalchemy.exc import IntegrityError
from tests.api_tests import APITestBase
from tests.models_tests import ModelsTestBase
from test_models.basekit import BaseKitBrand, BaseKitSite
from test_models.core import User
from hoops.common import BaseModel
import time
class TestBaseKitUserModel(ModelsTestBase):
def test_01_populate(self):
''' Populate the required Tables. '''
APITestBase.populate(self.db)
def test_02_for_fk_reference_to_basekit_brand(self):
''' Check the FK reference of BaseKitSite to BaseKitBrand.'''
my_bk_site = BaseKitSite.query.first()
my_bk_brand = my_bk_site.brand # via backref
bk_sites_for_brand = BaseKitBrand.query.filter_by(
id=my_bk_brand.id).first().basekit_sites # via relationship
assert my_bk_site in bk_sites_for_brand, "Test for checking the FK reference of BaseKitSite to BaseKitBrand failed"
def test_03_for_fk_reference_to_user(self):
''' Check the FK reference of BaseKitSite to User. '''
my_bk_site = BaseKitSite.query.first()
my_bk_user = my_bk_site.user # via backref
bk_sites_for_user = User.query.filter_by(
id=my_bk_user.id).first().basekit_sites # via relationship
assert my_bk_site in bk_sites_for_user, "Test for checking the FK reference of BaseKitSite to User failed"
def test_04_for_repr(self):
'''Test the __repr__ of BaseKitSite model.'''
first_bk_site = BaseKitSite.query.first()
assert "<BaseKitSite('" + str(first_bk_site.id) + "')>" in first_bk_site.__repr__(
), "Test for BaseKitSite __repr__ failed"
def test_05_for_unique_bk_site_id(self):
'''Test the uniqueness of bk_site_id.'''
my_bk_site_id = BaseKitSite.query.first().bk_site_id
bk_brand = BaseKitBrand.query.first()
user = User.query.first()
self.db.session.add(
BaseKitSite(brand=bk_brand, user=user, bk_site_id=my_bk_site_id, basekit_package_id='2', subdomain="com"))
try:
self.db.session.commit()
except Exception:
self.db.session.rollback()
assert IntegrityError, "Test for checking uniqueness of bk_site_id failed"
def test_06_for_unique_basekit_package_id(self):
'''Test the uniqueness of basekit_package_id.'''
my_basekit_package_id = BaseKitSite.query.first().basekit_package_id
brand = BaseKitBrand.query.first()
user = User.query.first()
self.db.session.add(
BaseKitSite(brand=brand, user=user, bk_site_id='5', basekit_package_id=my_basekit_package_id, subdomain='in'))
try:
self.db.session.commit()
except Exception:
self.db.session.rollback()
assert IntegrityError, "Test for checking uniqueness of basekit_package_id failed"
def test_07_get_the_inherited_class(self):
''' Check the inherited BaseModel model class.'''
baskekit_site = BaseKitSite()
assert isinstance(
baskekit_site, BaseModel), "Test to check inheritance of BaseKitSite from BaseModel failed"
def test_08_for_bk_user_updation(self):
''' Check the BaseKitSite updation. '''
first_bk_site = BaseKitSite.query.first()
my_bk_site_created_at = first_bk_site.created_at
my_bk_site_updated_at = first_bk_site.updated_at
time.sleep(1)
self.db.session.merge(
BaseKitSite(id=first_bk_site.id, bk_site_id='8'))
try:
self.db.session.commit()
assert my_bk_site_created_at == BaseKitSite.query.first().created_at, 'Test for checking whether "created_at" is not changing failed'
assert my_bk_site_updated_at != BaseKitSite.query.first().updated_at, 'Test for checking whether "updated_at" changes failed'
assert True
except Exception, e:
self.db.session.rollback()
raise e('Test for updating the BaseKitSite fields failed')
def test_to_json(self):
'''Check that serialization works'''
site = BaseKitSite.query.first()
out = site.to_json()
for col in ['status', 'front_end_ip_addresses', 'user_id', 'service', 'created_at', 'updated_at', 'template_id', 'package_id', 'domains', 'id', 'front_end_cnames']:
assert col in out, '%s not found in result of BaseKitSite.to_json()' % col
assert out['service'] == 'builder'
| mit |
trozet/python-tackerclient | tackerclient/shell.py | 1 | 32048 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Command-line interface to the Tacker APIs
"""
from __future__ import print_function
import argparse
import getpass
import inspect
import itertools
import logging
import os
import sys
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import discover
from keystoneclient.openstack.common.apiclient import exceptions as ks_exc
from keystoneclient import session
from oslo_utils import encodeutils
import six.moves.urllib.parse as urlparse
from cliff import app
from cliff import commandmanager
from tackerclient.common import clientmanager
from tackerclient.common import command as openstack_command
from tackerclient.common import exceptions as exc
from tackerclient.common import extension as client_extension
from tackerclient.common import utils
from tackerclient.i18n import _
from tackerclient.tacker.v1_0 import extension
from tackerclient.tacker.v1_0.vm import device
from tackerclient.tacker.v1_0.vm import device_template
from tackerclient.tacker.v1_0.vm import vnf
from tackerclient.tacker.v1_0.vm import vnfd
from tackerclient.version import __version__
VERSION = '1.0'
TACKER_API_VERSION = '1.0'
def run_command(cmd, cmd_parser, sub_argv):
_argv = sub_argv
index = -1
values_specs = []
if '--' in sub_argv:
index = sub_argv.index('--')
_argv = sub_argv[:index]
values_specs = sub_argv[index:]
known_args, _values_specs = cmd_parser.parse_known_args(_argv)
cmd.values_specs = (index == -1 and _values_specs or values_specs)
return cmd.run(known_args)
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def check_non_negative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value
class BashCompletionCommand(openstack_command.OpenStackCommand):
"""Prints all of the commands and options for bash-completion."""
resource = "bash_completion"
COMMAND_V1 = {
'bash-completion': BashCompletionCommand,
'ext-list': extension.ListExt,
'ext-show': extension.ShowExt,
'device-template-create': device_template.CreateDeviceTemplate,
'device-template-list': device_template.ListDeviceTemplate,
'device-template-show': device_template.ShowDeviceTemplate,
'device-template-update': device_template.UpdateDeviceTemplate,
'device-template-delete': device_template.DeleteDeviceTemplate,
'device-create': device.CreateDevice,
'device-list': device.ListDevice,
'device-show': device.ShowDevice,
'device-update': device.UpdateDevice,
'device-delete': device.DeleteDevice,
'interface-attach': device.AttachInterface,
'interface-detach': device.DetachInterface,
# MANO lingo
'vnfd-create': vnfd.CreateVNFD,
'vnfd-delete': vnfd.DeleteVNFD,
'vnfd-list': vnfd.ListVNFD,
'vnfd-show': vnfd.ShowVNFD,
'vnf-create': vnf.CreateVNF,
'vnf-update': vnf.UpdateVNF,
'vnf-delete': vnf.DeleteVNF,
'vnf-list': vnf.ListVNF,
'vnf-show': vnf.ShowVNF,
# 'vnf-config-create'
# 'vnf-config-push'
}
COMMANDS = {'1.0': COMMAND_V1}
class HelpAction(argparse.Action):
"""Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write(_('\nCommands for API v%s:\n') % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class TackerShell(app.App):
# verbose logging levels
WARNING_LEVEL = 0
INFO_LEVEL = 1
DEBUG_LEVEL = 2
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(TackerShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=commandmanager.CommandManager('tacker.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
self._register_extensions(VERSION)
# Pop the 'complete' to correct the outputs of 'tacker help'.
self.command_manager.commands.pop('complete')
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Tacker server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from python-keystoneclient
self._append_global_identity_args(parser)
return parser
def _append_global_identity_args(self, parser):
# FIXME(bklei): these are global identity (Keystone) arguments which
# should be consistent and shared by all service clients. Therefore,
# they should be provided by python-keystoneclient. We will need to
# refactor this code once this functionality is available in
# python-keystoneclient.
#
# Note: At that time we'll need to decide if we can just abandon
# the deprecated args (--service-type and --endpoint-type).
parser.add_argument(
'--os-service-type', metavar='<os-service-type>',
default=env('OS_SERVICEVM_SERVICE_TYPE', default='servicevm'),
help=_('Defaults to env[OS_SERVICEVM_SERVICE_TYPE] or servicevm.'))
parser.add_argument(
'--os-endpoint-type', metavar='<os-endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='publicURL'),
help=_('Defaults to env[OS_ENDPOINT_TYPE] or publicURL.'))
# FIXME(bklei): --service-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--service-type', metavar='<service-type>',
default=env('OS_SERVICEVM_SERVICE_TYPE', default='servicevm'),
help=_('DEPRECATED! Use --os-service-type.'))
# FIXME(bklei): --endpoint-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--endpoint-type', metavar='<endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='publicURL'),
help=_('DEPRECATED! Use --os-endpoint-type.'))
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help=_('DEPRECATED! Only keystone is supported.'))
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help=_('Authentication URL, defaults to env[OS_AUTH_URL].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
project_name_group = parser.add_mutually_exclusive_group()
project_name_group.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help=_('Authentication tenant name, defaults to '
'env[OS_TENANT_NAME].'))
project_name_group.add_argument(
'--os-project-name',
metavar='<auth-project-name>',
default=utils.env('OS_PROJECT_NAME'),
help='Another way to specify tenant name. '
'This option is mutually exclusive with '
' --os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].')
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
project_id_group = parser.add_mutually_exclusive_group()
project_id_group.add_argument(
'--os-tenant-id', metavar='<auth-tenant-id>',
default=env('OS_TENANT_ID'),
help=_('Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].'))
project_id_group.add_argument(
'--os-project-id',
metavar='<auth-project-id>',
default=utils.env('OS_PROJECT_ID'),
help='Another way to specify tenant ID. '
'This option is mutually exclusive with '
' --os-tenant-id. '
'Defaults to env[OS_PROJECT_ID].')
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help=_('Authentication username, defaults to env[OS_USERNAME].'))
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-id', metavar='<auth-user-id>',
default=env('OS_USER_ID'),
help=_('Authentication user ID (Env: OS_USER_ID)'))
parser.add_argument(
'--os_user_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-id',
metavar='<auth-user-domain-id>',
default=utils.env('OS_USER_DOMAIN_ID'),
help='OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].')
parser.add_argument(
'--os_user_domain_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-name',
metavar='<auth-user-domain-name>',
default=utils.env('OS_USER_DOMAIN_NAME'),
help='OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].')
parser.add_argument(
'--os_user_domain_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-project-domain-id',
metavar='<auth-project-domain-id>',
default=utils.env('OS_PROJECT_DOMAIN_ID'),
help='Defaults to env[OS_PROJECT_DOMAIN_ID].')
parser.add_argument(
'--os-project-domain-name',
metavar='<auth-project-domain-name>',
default=utils.env('OS_PROJECT_DOMAIN_NAME'),
help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')
parser.add_argument(
'--os-cert',
metavar='<certificate>',
default=utils.env('OS_CERT'),
help=_("Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT]."))
parser.add_argument(
'--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help=_("Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT]."))
parser.add_argument(
'--os-key',
metavar='<key>',
default=utils.env('OS_KEY'),
help=_("Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY]."))
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help=_('Authentication password, defaults to env[OS_PASSWORD].'))
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help=_('Authentication region name, defaults to '
'env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help=_('Authentication token, defaults to env[OS_TOKEN].'))
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--http-timeout', metavar='<seconds>',
default=env('OS_NETWORK_TIMEOUT', default=None), type=float,
help=_('Timeout in seconds to wait for an HTTP response. Defaults '
'to env[OS_NETWORK_TIMEOUT] or None if not specified.'))
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help=_('Defaults to env[OS_URL].'))
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--insecure',
action='store_true',
default=env('TACKERCLIENT_INSECURE', default=False),
help=_("Explicitly allow tackerclient to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution."))
def _bash_completion(self):
"""Prints all of the commands and options for bash-completion."""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for command_name, command in self.command_manager:
commands.add(command_name)
cmd_factory = command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(' '.join(commands | options))
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_shell_commands(module, version)
def _extend_shell_commands(self, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.TackerClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except TypeError:
pass
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion' and help_command_pos == -1:
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
return self.interact()
return self.run_subcommand(remainder)
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1
def authenticate_user(self):
"""Make sure the user has provided all of the authentication
info we need.
"""
if self.options.os_auth_strategy == 'keystone':
if self.options.os_token or self.options.os_url:
# Token flow auth takes priority
if not self.options.os_token:
raise exc.CommandError(
_("You must provide a token via"
" either --os-token or env[OS_TOKEN]"
" when providing a service URL"))
if not self.options.os_url:
raise exc.CommandError(
_("You must provide a service URL via"
" either --os-url or env[OS_URL]"
" when providing a token"))
else:
# Validate password flow auth
project_info = (self.options.os_tenant_name or
self.options.os_tenant_id or
(self.options.os_project_name and
(self.options.os_project_domain_name or
self.options.os_project_domain_id)) or
self.options.os_project_id)
if (not self.options.os_username
and not self.options.os_user_id):
raise exc.CommandError(
_("You must provide a username or user ID via"
" --os-username, env[OS_USERNAME] or"
" --os-user-id, env[OS_USER_ID]"))
if not self.options.os_password:
# No password, If we've got a tty, try prompting for it
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
self.options.os_password = getpass.getpass(
'OS Password: ')
except EOFError:
pass
# No password because we didn't have a tty or the
# user Ctl-D when prompted.
if not self.options.os_password:
raise exc.CommandError(
_("You must provide a password via"
" either --os-password or env[OS_PASSWORD]"))
if (not project_info):
# tenent is deprecated in Keystone v3. Use the latest
# terminology instead.
raise exc.CommandError(
_("You must provide a project_id or project_name ("
"with project_domain_name or project_domain_id) "
"via "
" --os-project-id (env[OS_PROJECT_ID])"
" --os-project-name (env[OS_PROJECT_NAME]),"
" --os-project-domain-id "
"(env[OS_PROJECT_DOMAIN_ID])"
" --os-project-domain-name "
"(env[OS_PROJECT_DOMAIN_NAME])"))
if not self.options.os_auth_url:
raise exc.CommandError(
_("You must provide an auth url via"
" either --os-auth-url or via env[OS_AUTH_URL]"))
auth_session = self._get_keystone_session()
auth = auth_session.auth
else: # not keystone
if not self.options.os_url:
raise exc.CommandError(
_("You must provide a service URL via"
" either --os-url or env[OS_URL]"))
auth_session = None
auth = None
self.client_manager = clientmanager.ClientManager(
token=self.options.os_token,
url=self.options.os_url,
auth_url=self.options.os_auth_url,
tenant_name=self.options.os_tenant_name,
tenant_id=self.options.os_tenant_id,
username=self.options.os_username,
user_id=self.options.os_user_id,
password=self.options.os_password,
region_name=self.options.os_region_name,
api_version=self.api_version,
auth_strategy=self.options.os_auth_strategy,
# FIXME (bklei) honor deprecated service_type and
# endpoint type until they are removed
service_type=self.options.os_service_type or
self.options.service_type,
endpoint_type=self.options.os_endpoint_type or self.endpoint_type,
insecure=self.options.insecure,
ca_cert=self.options.os_cacert,
timeout=self.options.http_timeout,
retries=self.options.retries,
raise_errors=False,
session=auth_session,
auth=auth,
log_credentials=True)
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(TackerShell, self).initialize_app(argv)
self.api_version = {'servicevm': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def configure_logging(self):
"""Create logging handlers for any log output."""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
# The default log level is INFO, in this situation, set the
# log level of the console to WARNING, to avoid displaying
# useless messages. This equals using "--quiet"
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def get_v2_auth(self, v2_auth_url):
return v2_auth.Password(
v2_auth_url,
username=self.options.os_username,
password=self.options.os_password,
tenant_id=self.options.os_tenant_id,
tenant_name=self.options.os_tenant_name)
def get_v3_auth(self, v3_auth_url):
project_id = self.options.os_project_id or self.options.os_tenant_id
project_name = (self.options.os_project_name or
self.options.os_tenant_name)
return v3_auth.Password(
v3_auth_url,
username=self.options.os_username,
password=self.options.os_password,
user_id=self.options.os_user_id,
user_domain_name=self.options.os_user_domain_name,
user_domain_id=self.options.os_user_domain_id,
project_id=project_id,
project_name=project_name,
project_domain_name=self.options.os_project_domain_name,
project_domain_id=self.options.os_project_domain_id
)
def _discover_auth_versions(self, session, auth_url):
# discover the API versions the server is supporting base on the
# given URL
try:
ks_discover = discover.Discover(session=session, auth_url=auth_url)
return (ks_discover.url_for('2.0'), ks_discover.url_for('3.0'))
except ks_exc.ClientException:
# Identity service may not support discover API version.
# Lets try to figure out the API version from the original URL.
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
return (None, auth_url)
elif path.startswith('/v2'):
return (auth_url, None)
else:
# not enough information to determine the auth version
msg = _('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url. Identity service may not support API '
'version discovery. Please provide a versioned '
'auth_url instead.')
raise exc.CommandError(msg)
def _get_keystone_session(self):
# first create a Keystone session
cacert = self.options.os_cacert or None
cert = self.options.os_cert or None
key = self.options.os_key or None
insecure = self.options.insecure or False
ks_session = session.Session.construct(dict(cacert=cacert,
cert=cert,
key=key,
insecure=insecure))
# discover the supported keystone versions using the given url
(v2_auth_url, v3_auth_url) = self._discover_auth_versions(
session=ks_session,
auth_url=self.options.os_auth_url)
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
user_domain_name = self.options.os_user_domain_name or None
user_domain_id = self.options.os_user_domain_id or None
project_domain_name = self.options.os_project_domain_name or None
project_domain_id = self.options.os_project_domain_id or None
domain_info = (user_domain_name or user_domain_id or
project_domain_name or project_domain_id)
if (v2_auth_url and not domain_info) or not v3_auth_url:
ks_session.auth = self.get_v2_auth(v2_auth_url)
else:
ks_session.auth = self.get_v3_auth(v3_auth_url)
return ks_session
def main(argv=sys.argv[1:]):
try:
return TackerShell(TACKER_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except KeyboardInterrupt:
print("... terminating tacker client", file=sys.stderr)
return 130
except exc.TackerClientException:
return 1
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
supergis/QGIS | python/ext-libs/pygments/styles/autumn.py | 364 | 2144 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| gpl-2.0 |
valtech-mooc/edx-platform | cms/djangoapps/contentstore/features/course-export.py | 62 | 2771 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from lettuce import world, step
from component_settings_editor_helpers import enter_xml_in_advanced_problem
from nose.tools import assert_true, assert_equal
from contentstore.utils import reverse_usage_url
@step('I go to the export page$')
def i_go_to_the_export_page(step):
world.click_tools()
link_css = 'li.nav-course-tools-export a'
world.css_click(link_css)
@step('I export the course$')
def i_export_the_course(step):
step.given('I go to the export page')
world.css_click('a.action-export')
@step('I edit and enter bad XML$')
def i_enter_bad_xml(step):
enter_xml_in_advanced_problem(
step,
"""<problem><h1>Smallest Canvas</h1>
<p>You want to make the smallest canvas you can.</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice>
<choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice>
</choicegroup>
</multiplechoiceresponse>
</problem>"""
)
@step('I edit and enter an ampersand$')
def i_enter_an_ampersand(step):
enter_xml_in_advanced_problem(step, "<problem>&</problem>")
@step('I get an error dialog$')
def get_an_error_dialog(step):
assert_true(world.is_css_present("div.prompt.error"))
@step('I can click to go to the unit with the error$')
def i_click_on_error_dialog(step):
world.wait_for_visible(".button.action-primary")
world.click_link_by_text('Correct failed component')
problem_string = unicode(world.scenario_dict['COURSE'].id.make_usage_key("problem", 'ignore'))
problem_string = u"Problem {}".format(problem_string[:problem_string.rfind('ignore')])
assert_true(
world.css_html("span.inline-error").startswith(problem_string),
u"{} does not start with {}".format(
world.css_html("span.inline-error"), problem_string
))
# we don't know the actual ID of the vertical. So just check that we did go to a
# vertical page in the course (there should only be one).
vertical_usage_key = world.scenario_dict['COURSE'].id.make_usage_key("vertical", "test")
vertical_url = reverse_usage_url('container_handler', vertical_usage_key)
# Remove the trailing "/None" from the URL - we don't know the course ID, so we just want to
# check that we visited a vertical URL.
if vertical_url.endswith("/test") or vertical_url.endswith("@test"):
vertical_url = vertical_url[:-5]
assert_equal(1, world.browser.url.count(vertical_url))
| agpl-3.0 |
skirsdeda/django | django/contrib/admindocs/urls.py | 574 | 1183 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url('^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url('^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url('^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url('^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url('^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url('^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url('^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url('^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
| bsd-3-clause |
peterlauri/django | django/contrib/gis/sitemaps/views.py | 144 | 2365 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The field name must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
40423248/2017springcd_hw | data/w2/cd_w2a.py | 17 | 1084 | #import os
adata = open("w2a_cadlab.txt", encoding="utf-8").read()
rdata = open("w2a_registered.txt", encoding="utf-8").read().splitlines()
#print(adata)
alist = adata.splitlines()
#print(alist[2])
n = 0
row = 0
final_list = []
w2_list = []
for stud_num in alist[2:]:
row = row + 1
blist = stud_num.split("\t")
#print(blist)
column = 0
for i in range(len(blist)):
column = column + 1
if blist[i] != "":
#print(blist[i])
clist = blist[i].split("_")
stud_data = clist[0]+"_"+clist[1]+"_"+str(row)+"_"+str(column)
final_list.append(stud_data)
w2_list.append(clist[1])
n = n +1
# 根據數列前導字串排序, 目的在建立分組數列
group_list = sorted(final_list)
print("分組名單:")
for i in range(len(group_list)):
print(group_list[i])
print("座位列表:")
for i in range(len(final_list)):
print(final_list[i])
for i in range(len(rdata)):
if rdata[i] not in w2_list:
print("缺席學生:", rdata[i])
print("學生總數:", n)
#print(os.environ)
| agpl-3.0 |
jhsenjaliya/incubator-airflow | airflow/hooks/http_hook.py | 15 | 4307 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import requests
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class HttpHook(BaseHook):
"""
Interact with HTTP servers.
"""
def __init__(self, method='POST', http_conn_id='http_default'):
self.http_conn_id = http_conn_id
self.method = method
# headers is required to make it required
def get_conn(self, headers):
"""
Returns http session for use with requests
"""
conn = self.get_connection(self.http_conn_id)
session = requests.Session()
if "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
self.base_url = schema + "://" + conn.host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port) + "/"
if conn.login:
session.auth = (conn.login, conn.password)
if headers:
session.headers.update(headers)
return session
def run(self, endpoint, data=None, headers=None, extra_options=None):
"""
Performs the request
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
url = self.base_url + endpoint
req = None
if self.method == 'GET':
# GET uses params
req = requests.Request(self.method,
url,
params=data,
headers=headers)
elif self.method == 'HEAD':
# HEAD doesn't use params
req = requests.Request(self.method,
url,
headers=headers)
else:
# Others use data
req = requests.Request(self.method,
url,
data=data,
headers=headers)
prepped_request = session.prepare_request(req)
self.log.info("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options)
def run_and_check(self, session, prepped_request, extra_options):
"""
Grabs extra options like timeout and actually runs the request,
checking for the result
"""
extra_options = extra_options or {}
response = session.send(
prepped_request,
stream=extra_options.get("stream", False),
verify=extra_options.get("verify", False),
proxies=extra_options.get("proxies", {}),
cert=extra_options.get("cert"),
timeout=extra_options.get("timeout"),
allow_redirects=extra_options.get("allow_redirects", True))
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
# Tried rewrapping, but not supported. This way, it's possible
# to get reason and code for failure by checking first 3 chars
# for the code, or do a split on ':'
self.log.error("HTTP error: %s", response.reason)
if self.method not in ('GET', 'HEAD'):
# The sensor uses GET, so this prevents filling up the log
# with the body every time the GET 'misses'.
# That's ok to do, because GETs should be repeatable and
# all data should be visible in the log (no post data)
self.log.error(response.text)
raise AirflowException(str(response.status_code)+":"+response.reason)
return response
| apache-2.0 |
paolodedios/tensorflow | tensorflow/python/keras/distribute/keras_dnn_correctness_test.py | 6 | 13341 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras DNN model using DistributionStrategy."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.eager import context
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import backend
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.distribute import strategy_combinations
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.training import gradient_descent
def all_strategy_combinations_with_eager_and_graph_modes():
return (combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=['graph', 'eager']) + combinations.combine(
distribution=strategy_combinations.multi_worker_mirrored_strategies,
mode='eager'))
def all_strategy_combinations_with_graph_mode():
return (combinations.combine(
distribution=keras_correctness_test_base.all_strategies,
mode=['graph']))
def is_default_strategy(strategy):
with strategy.scope():
return not distribution_strategy_context.has_strategy()
@testing_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class TestDistributionStrategyDnnCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
# We add few non-linear layers to make it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(
keras.layers.Dense(
10,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-4)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
if initial_weights:
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'])
return model
def get_data(self):
x_train = np.random.rand(9984, 1).astype('float32')
y_train = 3 * x_train
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_predict
def get_data_with_partial_last_batch(self):
x_train = np.random.rand(10000, 1).astype('float32')
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype('float32')
y_eval = 3 * x_eval
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train = np.random.rand(9984, 1).astype('float32')
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype('float32')
y_eval = 3 * x_eval
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
@ds_combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@ds_combinations.generate(
keras_correctness_test_base
.test_combinations_with_tpu_strategies_graph() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
self.run_correctness_test(
distribution, use_numpy, use_validation_data, partial_last_batch='eval')
@ds_combinations.generate(
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness_with_partial_last_batch(self, distribution,
use_numpy,
use_validation_data):
distribution.extended.experimental_enable_get_next_as_optional = True
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch='train_and_eval',
training_epochs=1)
@ds_combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution):
self.run_dynamic_lr_test(distribution)
class TestDistributionStrategyDnnMetricCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=[keras.metrics.BinaryAccuracy()])
return model
def run_metric_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
distribution=distribution)
batch_size = 64
batch_size = (
keras_correctness_test_base.get_batch_size(batch_size, distribution))
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
keras_correctness_test_base.batch_wrapper(train_dataset, batch_size))
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@ds_combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes())
def test_simple_dnn_metric_correctness(self, distribution):
self.run_metric_correctness_test(distribution)
class TestDistributionStrategyDnnMetricEvalCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
return model
def run_eval_metrics_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
model = self.get_model(
distribution=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@ds_combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes())
def test_identity_model_metric_eval_correctness(self, distribution):
self.run_eval_metrics_correctness_test(distribution)
class SubclassedModel(keras.Model):
def __init__(self, initial_weights, input_shapes):
super(SubclassedModel, self).__init__()
self.dense1 = keras.layers.Dense(10, activation='relu', input_shape=(1,))
self.dense2 = keras.layers.Dense(
10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4))
self.dense3 = keras.layers.Dense(10, activation='relu')
self.dense4 = keras.layers.Dense(1)
if input_shapes:
self.build(input_shapes)
else:
# This covers cases when the input is DatasetV1Adapter.
self.build((None, 1))
if initial_weights:
self.set_weights(initial_weights)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return self.dense4(x)
@testing_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
TestDistributionStrategyDnnCorrectness):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
model = SubclassedModel(initial_weights, input_shapes)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'])
return model
@ds_combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data):
if (context.executing_eagerly()) or is_default_strategy(distribution):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
elif (backend.is_tpu_strategy(distribution)
and not context.executing_eagerly()):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclass model instead.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@ds_combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution):
if ((context.executing_eagerly()
and not backend.is_tpu_strategy(distribution))
or is_default_strategy(distribution)):
self.run_dynamic_lr_test(distribution)
elif backend.is_tpu_strategy(distribution):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclass model instead.'):
self.run_dynamic_lr_test(distribution)
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_dynamic_lr_test(distribution)
@ds_combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph())
def test_dnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclass model instead.'):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch='eval')
if __name__ == '__main__':
multi_process_runner.test_main()
| apache-2.0 |
damianmoore/django-cms | cms/migrations/0041_auto__add_usersettings.py | 15 | 16370 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSettings'
db.create_table(u'cms_usersettings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
('language', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('cms', ['UserSettings'])
def backwards(self, orm):
# Deleting model 'UserSettings'
db.delete_table(u'cms_usersettings')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
demisto/content | Packs/DNSDB/Integrations/DNSDB_v2/DNSDB_v2.py | 1 | 30136 | # Copyright (c) 2020 by Farsight Security, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import locale
from typing import Iterator, Dict, List, Tuple, Union, Any, Callable, Iterable
import urllib
import urllib.parse
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
import datetime # type: ignore[no-redef]
import json
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
INTEGRATION_NAME = 'Farsight DNSDB'
INTEGRATION_COMMAND_NAME = 'dnsdb'
INTEGRATION_CONTEXT_NAME = 'DNSDB'
RECORD_SUBCONTEXT_NAME = 'Record'
SUMMARY_SUBCONTEXT_NAME = 'Summary'
RATE_SUBCONTEXT_NAME = 'Rate'
# CONSTANTS
DEFAULT_DNSDB_SERVER = 'https://api.dnsdb.info'
TIMEOUT = 60
SWCLIENT = "demisto"
VERSION = "v2.1.2"
PATH_PREFIX = 'dnsdb/v2'
IDN_REGEX = re.compile(r'(?:^|(?<=[\s=.:@]))xn--[a-z0-9\-]+\.')
FALSE_REGEX = re.compile(r'^(?i:f(alse)?)$')
COND_BEGIN = 'begin'
COND_ONGOING = 'ongoing'
COND_SUCCEEDED = 'succeeded'
COND_LIMITED = 'limited'
COND_FAILED = 'failed'
locale.setlocale(locale.LC_ALL, '')
''' HELPER FUNCTIONS '''
class QueryError(Exception):
pass
class timeval(int):
pass
class Client(BaseClient):
def __init__(self, base_url: str, apikey: str, verify=None, proxy=None):
BaseClient.__init__(
self,
base_url,
verify=verify,
headers={
'Accept': 'application/x-ndjson',
'X-Api-Key': apikey,
},
proxy=proxy,
ok_codes=(200, ),
)
self.apikey = apikey
@staticmethod
def base_params() -> dict:
return {
'swclient': SWCLIENT,
'version': VERSION,
}
def rate_limit(self) -> Dict:
params = self.base_params()
url_suffix = 'dnsdb/v2/rate_limit'
return self._http_request('GET', url_suffix=url_suffix, params=params)
def lookup_rrset(self, owner_name: str, rrtype: str = None, bailiwick: str = None, limit: int = None,
time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None) -> Iterator[Dict]:
return self._query_rrset("lookup",
owner_name=owner_name,
rrtype=rrtype,
bailiwick=bailiwick,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
offset=offset)
def summarize_rrset(self, owner_name: str, rrtype: str = None, bailiwick: str = None, limit: int = None,
time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, max_count: int = None) -> dict:
try:
return next(self._query_rrset("summarize",
owner_name=owner_name,
rrtype=rrtype,
bailiwick=bailiwick,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
max_count=max_count))
except StopIteration:
raise QueryError("no data")
def _query_rrset(self, mode: str, owner_name: str, rrtype: str = None, bailiwick: str = None, limit: int = None,
time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None, max_count: int = None) -> Iterator[Dict]:
owner_name = quote(to_ascii(owner_name))
if bailiwick:
if not rrtype:
rrtype = 'ANY'
bailiwick = quote(to_ascii(bailiwick))
path = f'{PATH_PREFIX}/{mode}/rrset/name/{owner_name}/{rrtype}/{bailiwick}'
elif rrtype:
path = f'{PATH_PREFIX}/{mode}/rrset/name/{owner_name}/{rrtype}'
else:
path = f'{PATH_PREFIX}/{mode}/rrset/name/{owner_name}'
return self._query(path, limit=limit, time_first_before=time_first_before, time_first_after=time_first_after,
time_last_before=time_last_before, time_last_after=time_last_after,
aggr=aggr, offset=offset, max_count=max_count)
def lookup_rdata_name(self, value: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None) -> Iterator[Dict]:
return self._query_rdata_name("lookup",
name=value,
rrtype=rrtype,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
offset=offset)
def summarize_rdata_name(self, value: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, max_count: int = None) -> dict:
try:
return next(self._query_rdata_name("summarize",
name=value,
rrtype=rrtype,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
max_count=max_count))
except StopIteration:
raise QueryError("no data")
def _query_rdata_name(self, mode: str, name: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None, max_count: int = None) -> Iterator[Dict]:
rdata_name = quote(to_ascii(name))
if rrtype:
path = f'{PATH_PREFIX}/{mode}/rdata/name/{rdata_name}/{rrtype}'
else:
path = f'{PATH_PREFIX}/{mode}/rdata/name/{rdata_name}'
return self._query(path, limit=limit, time_first_before=time_first_before, time_first_after=time_first_after,
time_last_before=time_last_before, time_last_after=time_last_after,
aggr=aggr, offset=offset, max_count=max_count)
def lookup_rdata_ip(self, value: str, limit: int = None,
time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None) -> Iterator[Dict]:
return self._query_rdata_ip("lookup",
ip=value,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
offset=offset)
def summarize_rdata_ip(self, value: str, limit: int = None,
time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, max_count: int = None) -> dict:
try:
return next(self._query_rdata_ip("summarize",
ip=value,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
max_count=max_count))
except StopIteration:
raise QueryError("no data")
def _query_rdata_ip(self, mode: str, ip: str,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None, max_count: int = None) -> Iterator[Dict]:
ip = ip.replace('/', ',')
path = f'{PATH_PREFIX}/{mode}/rdata/ip/{ip}'
return self._query(path, limit=limit, time_first_before=time_first_before, time_first_after=time_first_after,
time_last_before=time_last_before, time_last_after=time_last_after,
aggr=aggr, offset=offset, max_count=max_count)
def lookup_rdata_raw(self, value: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None) -> Iterator[Dict]:
return self._query_rdata_raw("lookup",
raw=value,
rrtype=rrtype,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
offset=offset)
def summarize_rdata_raw(self, value: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, max_count: int = None) -> dict:
try:
return next(self._query_rdata_raw("summarize",
raw=value,
rrtype=rrtype,
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
max_count=max_count))
except StopIteration:
raise QueryError("no data")
def _query_rdata_raw(self, mode: str, raw: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None, max_count: int = None) -> Iterator[Dict]:
if rrtype:
path = f'{PATH_PREFIX}/{mode}/rdata/raw/{quote(raw)}/{rrtype}'
else:
path = f'{PATH_PREFIX}/{mode}/rdata/raw/{quote(raw)}'
return self._query(path, limit=limit, time_first_before=time_first_before, time_first_after=time_first_after,
time_last_before=time_last_before, time_last_after=time_last_after,
aggr=aggr, offset=offset, max_count=max_count)
def flex(self, method: str, key: str, value: str, rrtype: str = None,
limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None):
path = f'{PATH_PREFIX}/{method}/{key}/{quote(value)}'
if rrtype:
path += f'/{rrtype}'
return self._query(path, limit=limit, time_first_before=time_first_before, time_first_after=time_first_after,
time_last_before=time_last_before, time_last_after=time_last_after)
def _query(self, path: str, limit: int = None, time_first_before: timeval = None, time_first_after: timeval = None,
time_last_before: timeval = None, time_last_after: timeval = None,
aggr: bool = None, offset: int = None, max_count: int = None) -> Iterator[Dict]:
params = self.base_params()
params.update(
assign_params(
limit=limit,
time_first_before=time_first_before,
time_first_after=time_first_after,
time_last_before=time_last_before,
time_last_after=time_last_after,
aggr=aggr,
offset=offset,
max_count=max_count,
)
)
res = self._http_request('GET', path,
params=params,
stream=True,
resp_type='response',
timeout=TIMEOUT)
return _handle_saf(res.iter_lines(decode_unicode=True))
def _handle_saf(i: Iterable[str]):
for line in i:
if not line:
continue
try:
saf_msg = json.loads(line)
except json.JSONDecodeError as e:
raise DemistoException(f'saf protocol error: could not decode json: {line}') from e
cond = saf_msg.get('cond')
obj = saf_msg.get('obj')
msg = saf_msg.get('msg')
if cond == COND_BEGIN:
continue
elif cond == COND_SUCCEEDED:
return
if obj:
yield obj
if cond == COND_ONGOING or not cond:
continue
elif cond == COND_LIMITED:
return
elif cond == COND_FAILED:
raise QueryError(f'saf query failed: {msg}')
else:
raise QueryError(f'saf protocol error: invalid cond: {cond}')
raise QueryError('saf query truncated')
def quote(path: str) -> str:
return urllib.parse.quote(path, safe='')
@logger
def _run_query(f, args):
sig = inspect.signature(f)
kwargs = {} # type: Dict[str, Any]
for name, p in sig.parameters.items():
if name in args:
if p.annotation != p.empty:
if p.annotation == bool:
if FALSE_REGEX.match(args[name]):
kwargs[name] = False
else:
kwargs[name] = True
elif p.annotation == timeval:
try:
kwargs[name] = int(args[name])
except ValueError:
kwargs[name] = date_to_timestamp(args[name])
else:
kwargs[name] = p.annotation(args[name])
else:
kwargs[name] = args[name]
elif p.kind == p.POSITIONAL_ONLY:
raise Exception(f'Missing argument: {name}')
return f(**kwargs)
def to_unicode(domain: str) -> str:
try:
return domain.encode('utf8').decode('idna')
except UnicodeError:
return domain
def to_ascii(domain: str) -> str:
try:
return domain.encode('idna').decode('utf8')
except UnicodeError:
return domain
def format_name_for_context(domain: str) -> str:
return domain.rstrip('.')
def format_name_for_markdown(domain: str) -> str:
return to_unicode(domain.rstrip('.'))
def parse_rdata(rdata: Union[str, List[str]]):
if isinstance(rdata, list):
return [parse_rdata(entry) for entry in rdata] # pragma: no cover
def f(m):
return to_unicode(m.group(0))
return str(IDN_REGEX.sub(f, rdata))
def format_rdata_for_markdown(rdata: Union[str, List[str]]):
rdata = parse_rdata(rdata)
if isinstance(rdata, str):
return rdata
return '<br>'.join(rdata)
def parse_rate_limit_int(i):
try:
return int(i)
except ValueError:
return i
def parse_unix_time(ts) -> str:
try:
return datetime.datetime.utcfromtimestamp(ts).strftime("%Y-%m-%dT%H:%M:%SZ") # type: ignore[attr-defined]
except TypeError:
return ts
def nop(x):
return x
@logger
def build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('RRName', 'rrname', format_name_for_context),
('RRType', 'rrtype', str),
('Bailiwick', 'bailiwick', format_name_for_context),
('RData', 'rdata', nop),
('RawRData', 'raw_rdata', nop),
('Count', 'count', int),
('NumResults', 'num_results', int),
('TimeFirst', 'time_first', parse_unix_time),
('TimeLast', 'time_last', parse_unix_time),
('TimeFirst', 'zone_time_first', parse_unix_time),
('TimeLast', 'zone_time_last', parse_unix_time),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
if 'zone_time_first' in results or 'time_first' in results:
ctx['FromZoneFile'] = 'zone_time_first' in results
return ctx
@logger
def build_rate_limits_context(results: Dict) -> Dict:
"""Formatting results from Rate Limit API to Demisto Context"""
rate = results.get('rate')
if rate is None:
raise ValueError("Missing rate key")
ctx = {}
if rate['limit'] == 'unlimited':
return {
'Unlimited': True
}
for ckey, rkey, f in (
('Limit', 'limit', parse_rate_limit_int),
('Remaining', 'remaining', parse_rate_limit_int),
('Expires', 'expires', parse_unix_time),
('ResultsMax', 'results_max', parse_rate_limit_int),
('BurstSize', 'burst_size', parse_rate_limit_int),
('BurstWindow', 'burst_window', parse_rate_limit_int),
):
if rkey in rate:
ctx[ckey] = f(rate[rkey])
if 'reset' in rate:
if rate['reset'] == "n/a":
ctx['NeverResets'] = True
else:
ctx['Reset'] = parse_unix_time(rate['reset'])
if 'offset_max' in rate:
if rate['offset_max'] == "n/a":
ctx['OffsetNotAllowed'] = True
else:
ctx['OffsetMax'] = parse_rate_limit_int(rate['offset_max'])
return ctx
@logger
def lookup_to_markdown(results: List[Dict], title: str = 'Farsight DNSDB Lookup', want_bailiwick=True, header_filter=None) -> str:
# TODO this should be more specific, include arguments?
out = []
keys = [
('RRName', 'rrname', format_name_for_context),
('RRType', 'rrtype', str),
('Bailiwick', 'bailiwick', format_name_for_context),
('RData', 'rdata', format_rdata_for_markdown),
('Count', 'count', str),
] # type: List[Tuple[str, str, Callable]]
if not want_bailiwick:
keys = list(filter(lambda r: r[1] != 'bailiwick', keys))
headers = [k[0] for k in keys] + ['TimeFirst', 'TimeLast', 'FromZoneFile']
if header_filter:
headers = list(filter(header_filter, headers))
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
if 'time_first' in result:
row['TimeFirst'] = parse_unix_time(result['time_first'])
elif 'zone_time_first' in result:
row['TimeFirst'] = parse_unix_time(result['zone_time_first'])
if 'time_last' in result:
row['TimeLast'] = parse_unix_time(result['time_last'])
elif 'zone_time_last' in result:
row['TimeLast'] = parse_unix_time(result['zone_time_last'])
row['FromZoneFile'] = str("zone_time_first" in result)
out.append(row)
return tableToMarkdown(title, out, headers=headers)
@logger
def summarize_to_markdown(summary: Dict) -> str:
headers = []
out = dict() # type: Dict[str, Any]
for ckey, rkey, f in (
('Count', 'count', int),
('NumResults', 'num_results', int),
('TimeFirst', 'time_first', parse_unix_time),
('TimeLast', 'time_last', parse_unix_time),
('ZoneTimeFirst', 'zone_time_first', parse_unix_time),
('ZoneTimeLast', 'zone_time_last', parse_unix_time),
):
if rkey in summary:
headers.append(ckey)
out[ckey] = f(summary[rkey]) # type: ignore[operator]
return tableToMarkdown('Farsight DNSDB Summarize', out, headers=headers)
@logger
def rate_limit_to_markdown(results: Dict) -> str:
rate = results.get('rate')
if rate is None:
return '### Error'
out = dict() # type: Dict[str, Any]
headers = []
if rate['limit'] != "unlimited":
for ckey, rkey, f in (
('Limit', 'limit', parse_rate_limit_int),
('Remaining', 'remaining', parse_rate_limit_int),
('Reset', 'reset', parse_unix_time),
('Expires', 'expires', parse_unix_time),
('ResultsMax', 'results_max', parse_rate_limit_int),
('OffsetMax', 'offset_max', parse_rate_limit_int),
('BurstSize', 'burst_size', parse_rate_limit_int),
('BurstWindow', 'burst_window', parse_rate_limit_int),
):
if rkey in rate:
headers.append(ckey)
if rkey == 'reset':
if rate[rkey] == "n/a":
NEVER_RESETS = 'NeverResets'
out[NEVER_RESETS] = True
headers.append(NEVER_RESETS)
else:
out[f'{ckey}'] = f(rate[rkey])
elif rkey == 'offset_max':
if rate[rkey] == "n/a":
OFFSET_NOT_ALLOWED = 'OffsetNotAllowed'
out[OFFSET_NOT_ALLOWED] = True
headers.append(OFFSET_NOT_ALLOWED)
else:
out[f'{ckey}'] = f(rate[rkey])
else:
out[f'{ckey}'] = f(rate[rkey])
else:
UNLIMITED = 'Unlimited'
out[UNLIMITED] = True
headers.append(UNLIMITED)
return tableToMarkdown('Farsight DNSDB Service Limits', out, headers=headers)
''' COMMANDS '''
@logger
def test_module(client, _):
try:
client.rate_limit()
except DemistoException as e:
if 'forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
@logger
def dnsdb_flex(client, args):
res = list(_run_query(client.flex, args))
def skip_rrname(header) -> bool:
return header.lower() not in ('rrname', 'fromzonefile')
def skip_rdata(header) -> bool:
return header.lower() not in ('rdata', 'fromzonefile')
if args.get('key') == 'rdata':
skip = skip_rrname
else:
skip = skip_rdata
return CommandResults(
readable_output=lookup_to_markdown(res, title='Farsight DNSDB Flex Search', want_bailiwick=False,
header_filter=skip),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{RECORD_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=[build_result_context(r) for r in res],
)
@logger
def dnsdb_rdata(client, args):
type = args.get('type')
if type == 'name':
res = list(_run_query(client.lookup_rdata_name, args))
elif type == 'ip':
res = list(_run_query(client.lookup_rdata_ip, args))
elif type == 'raw':
res = list(_run_query(client.lookup_rdata_raw, args))
else:
raise Exception(f'Invalid rdata query type: {type}')
return CommandResults(
readable_output=lookup_to_markdown(res, want_bailiwick=False),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{RECORD_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=[build_result_context(r) for r in res],
)
@logger
def dnsdb_summarize_rdata(client, args):
type = args.get('type')
if type == 'name':
res = _run_query(client.summarize_rdata_name, args)
elif type == 'ip':
res = _run_query(client.summarize_rdata_ip, args)
elif type == 'raw':
res = _run_query(client.summarize_rdata_raw, args)
else:
raise Exception(f'Invalid rdata query type: {type}')
return CommandResults(
readable_output=summarize_to_markdown(res),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{SUMMARY_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=build_result_context(res),
)
@logger
def dnsdb_rrset(client, args):
q = _run_query(client.lookup_rrset, args)
res = list(q)
return CommandResults(
readable_output=lookup_to_markdown(res),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{RECORD_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=[build_result_context(r) for r in res],
)
@logger
def dnsdb_summarize_rrset(client, args):
res = _run_query(client.summarize_rrset, args)
return CommandResults(
readable_output=summarize_to_markdown(res),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{SUMMARY_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=build_result_context(res),
)
@logger
def dnsdb_rate_limit(client, _):
res = client.rate_limit()
return CommandResults(
readable_output=rate_limit_to_markdown(res),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{RATE_SUBCONTEXT_NAME}',
outputs_key_field='',
outputs=build_rate_limits_context(res),
)
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
apikey = demisto.params().get('apikey')
base_url = demisto.params().get('url')
if not base_url:
base_url = DEFAULT_DNSDB_SERVER
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
client = Client(
base_url,
apikey,
verify=verify_certificate,
proxy=proxy)
command = demisto.command()
LOG(f'Command being called is {command}')
try:
if command == 'test-module':
return_results(test_module(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-flex':
return_results(dnsdb_flex(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-rdata':
return_results(dnsdb_rdata(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-summarize-rdata':
return_results(dnsdb_summarize_rdata(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-rrset':
return_results(dnsdb_rrset(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-summarize-rrset':
return_results(dnsdb_summarize_rrset(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-rate-limit':
return_results(dnsdb_rate_limit(client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit |
ZazieTheBeast/oscar | oscar/lib/python2.7/site-packages/pip/vcs/mercurial.py | 514 | 3472 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev_hash = self.get_revision_hash(location)
return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Mercurial)
| bsd-3-clause |
beacloudgenius/edx-platform | common/djangoapps/external_auth/tests/test_shib.py | 20 | 30727 | # -*- coding: utf-8 -*-
"""
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from ddt import ddt, data
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User
from django.utils.importlib import import_module
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from external_auth.views import (
shib_login, course_specific_login, course_specific_register, _flatten_to_ascii
)
from mock import patch
from student.views import create_account, change_enrollment
from student.models import UserProfile, CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
# Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
# attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
# b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
# For the sake of python convention we'll make all of these variable names ALL_CAPS
# These values would all returned from request.META, so they need to be str, not unicode
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = 'test_user@stanford.edu'
MAILS = [None, '', 'test_user@stanford.edu'] # unicode shouldn't be in emails, would fail django's email validator
DISPLAYNAMES = [None, '', 'Jason 包']
GIVENNAMES = [None, '', 'jasön; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', '包; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, display_name, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if display_name is not None:
meta_dict['displayName'] = display_name
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
for display_name in DISPLAYNAMES:
yield _build_identity_dict(mail, display_name, given_name, surname)
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(ModuleStoreTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTest, self).setUp(create_user=False)
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_request = self.request_factory.get('/shib-login')
no_remote_user_request.META.update({'Shib-Identity-Provider': IDP})
no_remote_user_request.user = AnonymousUser()
mako_middleware_process_request(no_remote_user_request)
no_remote_user_response = shib_login(no_remote_user_request)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_request = self.request_factory.get('/shib-login')
no_idp_request.META.update({'REMOTE_USER': REMOTE_USER})
no_idp_response = shib_login(no_idp_request)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
def _assert_shib_login_is_logged(self, audit_log_call, remote_user):
"""Asserts that shibboleth login attempt is being logged"""
remote_user = _flatten_to_ascii(remote_user) # django usernames have to be ascii
method_name, args, _kwargs = audit_log_call
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'logged in via Shibboleth', args[0])
self.assertIn(remote_user, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
user_w_map = UserFactory.create(email='withmap@stanford.edu')
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='womap@stanford.edu')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='inactive@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['withmap@stanford.edu', 'womap@stanford.edu',
'testuser2@someother_idp.com', 'inactive@stanford.edu']
for idp in idps:
for remote_user in remote_users:
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({'Shib-Identity-Provider': idp,
'REMOTE_USER': remote_user,
'mail': remote_user})
request.user = AnonymousUser()
mako_middleware_process_request(request)
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
if idp == "https://idp.stanford.edu/" and remote_user == 'withmap@stanford.edu':
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_w_map)
self.assertEqual(response['Location'], '/')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://idp.stanford.edu/" and remote_user == 'inactive@stanford.edu':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'warning')
self.assertEquals(len(args), 1)
self.assertIn(u'is not active after external login', args[0])
# self.assertEquals(remote_user, args[1])
elif idp == "https://idp.stanford.edu/" and remote_user == 'womap@stanford.edu':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, user_wo_map)
self.assertEqual(response['Location'], '/')
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://someother.idp.com/" and remote_user in \
['withmap@stanford.edu', 'womap@stanford.edu', 'inactive@stanford.edu']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response,
("Preferences for {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string="inactive@stanford.edu"):
"""
Tests that FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] means extauth automatically
linked users, activates them, and logs them in
"""
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_user.save()
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'REMOTE_USER': 'inactive@stanford.edu',
'mail': 'inactive@stanford.edu'
})
request.user = AnonymousUser()
with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
# reload user from db, since the view function works via db side-effects
inactive_user = User.objects.get(id=inactive_user.id)
self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))
self.assertTrue(inactive_user.is_active)
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, inactive_user)
self.assertEqual(response['Location'], '/')
# verify logging:
self.assertEquals(len(audit_log_calls), 3)
self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(log_user_string, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': False})
def test_extauth_auto_activate_user_with_flag_no_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': False}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="inactive@stanford.edu")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': True})
def test_extauth_auto_activate_user_with_flag_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': True}
"""
self._base_test_extauth_auto_activate_user_with_flag(log_user_string="user.id: 1")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form(self, identity):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_HTML = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_HTML)
else:
self.assertNotContains(response, mail_input_HTML)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
fullname_input_html = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty and displayname_empty:
self.assertContains(response, fullname_input_html)
else:
self.assertNotContains(response, fullname_input_html)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form_submit(self, identity):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
# First we pop the registration form
client = DjangoTestClient()
response1 = client.get(path='/shib-login/', data={}, follow=False, **identity)
# Then we have the user answer the registration form
# These are unicode because request.POST returns unicode
postvars = {'email': u'post_email@stanford.edu',
'username': u'post_username', # django usernames can't be unicode
'password': u'post_pássword',
'name': u'post_náme',
'terms_of_service': u'true',
'honor_code': u'true'}
# use RequestFactory instead of TestClient here because we want access to request.user
request2 = self.request_factory.post('/create_account', data=postvars)
request2.session = client.session
request2.user = AnonymousUser()
mako_middleware_process_request(request2)
with patch('student.views.AUDIT_LOG') as mock_audit_log:
_response2 = create_account(request2)
user = request2.user
mail = identity.get('mail')
# verify logging of login happening during account creation:
audit_log_calls = mock_audit_log.method_calls
self.assertEquals(len(audit_log_calls), 3)
method_name, args, _kwargs = audit_log_calls[0]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success on new account creation', args[0])
self.assertIn(u'post_username', args[0])
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 2)
self.assertIn(u'User registered with external_auth', args[0])
self.assertEquals(u'post_username', args[1])
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 3)
self.assertIn(u'Updated ExternalAuthMap for ', args[0])
self.assertEquals(u'post_username', args[1])
self.assertEquals(u'test_user@stanford.edu', args[2].external_id)
# check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
# check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
if displayname_empty:
if sn_empty and given_name_empty:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertNotIn(u';', profile.name)
else:
self.assertEqual(profile.name, request2.session['ExternalAuthMap'].external_name)
self.assertEqual(profile.name, identity.get('displayName').decode('utf-8'))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(None, "", "shib:https://idp.stanford.edu/")
def test_course_specific_login_and_reg(self, domain):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
user_id=self.test_user_id,
)
# Test for cases where course is found
# set domains
# temporarily set the branch to draft-preferred so we can update the course
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
course.enrollment_domain = domain
self.store.update_item(course, self.test_user_id)
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if domain and "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
# create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
open_enroll_course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
enrollment_domain='',
user_id=self.test_user_id,
)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "teststudent2@other.edu"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='testuser1@other.edu',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "teststudent3@gmail.com"
int_student.save()
# Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post('/change_enrollment')
request.POST.update({'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string()})
request.user = student
response = change_enrollment(request)
# If course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
else:
self.assertEqual(response.status_code, 400)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login
can auto-enroll in a class with GET or POST params. Also tests the direction functionality of
the 'next' GET/POST param
"""
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
# use django test client for sessions and url processing
# no enrollment before trying
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
self.client.logout()
request_kwargs = {'path': '/shib-login/',
'data': {'enrollment_action': 'enroll', 'course_id': course.id.to_deprecated_string(), 'next': '/testredirect'},
'follow': False,
'REMOTE_USER': 'testuser@stanford.edu',
'Shib-Identity-Provider': 'https://idp.stanford.edu/'}
response = self.client.get(**request_kwargs)
# successful login is a redirect to "/"
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/testredirect')
# now there is enrollment
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
# Clean up and try again with POST (doesn't happen with real production shib, doing this for test coverage)
self.client.logout()
CourseEnrollment.unenroll(student, course.id)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
response = self.client.post(**request_kwargs)
# successful login is a redirect to "/"
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], 'http://testserver/testredirect')
# now there is enrollment
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
class ShibUtilFnTest(TestCase):
"""
Tests util functions in shib module
"""
def test__flatten_to_ascii(self):
DIACRITIC = u"àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
STR_DIACRI = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
FLATTENED = u"aeiouAEIOUaeiouyAEIOUYaeiouAEIOUanoANOaeiouyAEIOUYaAcC" # pylint: disable=invalid-name
self.assertEqual(_flatten_to_ascii('jasön'), 'jason') # umlaut
self.assertEqual(_flatten_to_ascii('Jason包'), 'Jason') # mandarin, so it just gets dropped
self.assertEqual(_flatten_to_ascii('abc'), 'abc') # pass through
unicode_test = _flatten_to_ascii(DIACRITIC)
self.assertEqual(unicode_test, FLATTENED)
self.assertIsInstance(unicode_test, unicode)
str_test = _flatten_to_ascii(STR_DIACRI)
self.assertEqual(str_test, FLATTENED)
self.assertIsInstance(str_test, str)
| agpl-3.0 |
xujun10110/golismero | thirdparty_libs/geopy/geocoders/yahoo.py | 46 | 3135 | """
Wrapper to the Yahoo's new PlaceFinder API. (doc says that the API RELEASE 1.0 (22 JUNE 2010))
"""
import xml.dom.minidom
from geopy import util
from geopy import Point
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
class Yahoo(Geocoder):
BASE_URL = "http://where.yahooapis.com/geocode?%s"
def __init__(self, app_id, format_string='%s', output_format=None):
self.app_id = app_id
self.format_string = format_string
if output_format != None:
from warnings import warn
warn('geopy.geocoders.yahoo.Yahoo: The `output_format` parameter is deprecated '+
'and now ignored. JSON will be used internally.', DeprecationWarning)
def geocode(self, string, exactly_one=True):
if isinstance(string, unicode):
string = string.encode('utf-8')
params = {'location': self.format_string % string,
'appid': self.app_id,
'flags': 'J'
}
url = self.BASE_URL % urlencode(params)
util.logger.debug("Fetching %s..." % url)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
page = urlopen(url)
return self.parse_json(page, exactly_one)
def parse_json(self, page, exactly_one=True):
if not isinstance(page, basestring):
page = util.decode_page(page)
doc = json.loads(page)
results = doc.get('ResultSet', []).get('Results', [])
if not results:
raise ValueError("No results found")
elif exactly_one and len(results) != 1:
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(results))
def parse_result(place):
line1, line2, line3, line4 = place.get('line1'), place.get('line2'), place.get('line3'), place.get('line4')
address = util.join_filter(", ", [line1, line2, line3, line4])
city = place.get('city')
state = place.get('state')
country = place.get('country')
location = util.join_filter(", ", [address, city, country])
lat, lng = place.get('latitude'), place.get('longitude')
#if lat and lng:
# point = Point(floatlat, lng)
#else:
# point = None
return (location, (float(lat), float(lng)))
if exactly_one:
return parse_result(results[0])
else:
return [parse_result(result) for result in results]
def reverse(self, coord, exactly_one=True):
(lat, lng) = coord
params = {'location': '%s,%s' % (lat, lng),
'gflags' : 'R',
'appid': self.app_id,
'flags': 'J'
}
url = self.BASE_URL % urlencode(params)
return self.geocode_url(url, exactly_one)
| gpl-2.0 |
haveal/googleads-python-lib | examples/dfp/v201505/report_service/run_report_with_custom_fields.py | 4 | 3264 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example runs a report that with custom fields found in the line
items of an order.
"""
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
from googleads import errors
# Set the ID of the order to get line items from.
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201505')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201505')
# Filter for line items of a given order.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE orderId = :orderId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Collect all line item custom field IDs for an order.
custom_field_ids = set()
# Get users by statement.
while True:
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Get custom field IDs from the line items of an order.
for line_item in response['results']:
if 'customFieldValues' in line_item:
for custom_field_value in line_item['customFieldValues']:
custom_field_ids.add(custom_field_value['customFieldId'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Create statement object to filter for an order.
filter_statement = {'query': query, 'values': values}
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['LINE_ITEM_ID', 'LINE_ITEM_NAME'],
'statement': filter_statement,
'columns': ['AD_SERVER_IMPRESSIONS'],
'dateRangeType': 'LAST_MONTH',
'customFieldIds': list(custom_field_ids)
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.DfpReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id \'%s\' downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ORDER_ID)
| apache-2.0 |
risent/unidecode | unidecode/x0b2.py | 253 | 4680 | data = (
'nyok', # 0x00
'nyot', # 0x01
'nyop', # 0x02
'nyoh', # 0x03
'nu', # 0x04
'nug', # 0x05
'nugg', # 0x06
'nugs', # 0x07
'nun', # 0x08
'nunj', # 0x09
'nunh', # 0x0a
'nud', # 0x0b
'nul', # 0x0c
'nulg', # 0x0d
'nulm', # 0x0e
'nulb', # 0x0f
'nuls', # 0x10
'nult', # 0x11
'nulp', # 0x12
'nulh', # 0x13
'num', # 0x14
'nub', # 0x15
'nubs', # 0x16
'nus', # 0x17
'nuss', # 0x18
'nung', # 0x19
'nuj', # 0x1a
'nuc', # 0x1b
'nuk', # 0x1c
'nut', # 0x1d
'nup', # 0x1e
'nuh', # 0x1f
'nweo', # 0x20
'nweog', # 0x21
'nweogg', # 0x22
'nweogs', # 0x23
'nweon', # 0x24
'nweonj', # 0x25
'nweonh', # 0x26
'nweod', # 0x27
'nweol', # 0x28
'nweolg', # 0x29
'nweolm', # 0x2a
'nweolb', # 0x2b
'nweols', # 0x2c
'nweolt', # 0x2d
'nweolp', # 0x2e
'nweolh', # 0x2f
'nweom', # 0x30
'nweob', # 0x31
'nweobs', # 0x32
'nweos', # 0x33
'nweoss', # 0x34
'nweong', # 0x35
'nweoj', # 0x36
'nweoc', # 0x37
'nweok', # 0x38
'nweot', # 0x39
'nweop', # 0x3a
'nweoh', # 0x3b
'nwe', # 0x3c
'nweg', # 0x3d
'nwegg', # 0x3e
'nwegs', # 0x3f
'nwen', # 0x40
'nwenj', # 0x41
'nwenh', # 0x42
'nwed', # 0x43
'nwel', # 0x44
'nwelg', # 0x45
'nwelm', # 0x46
'nwelb', # 0x47
'nwels', # 0x48
'nwelt', # 0x49
'nwelp', # 0x4a
'nwelh', # 0x4b
'nwem', # 0x4c
'nweb', # 0x4d
'nwebs', # 0x4e
'nwes', # 0x4f
'nwess', # 0x50
'nweng', # 0x51
'nwej', # 0x52
'nwec', # 0x53
'nwek', # 0x54
'nwet', # 0x55
'nwep', # 0x56
'nweh', # 0x57
'nwi', # 0x58
'nwig', # 0x59
'nwigg', # 0x5a
'nwigs', # 0x5b
'nwin', # 0x5c
'nwinj', # 0x5d
'nwinh', # 0x5e
'nwid', # 0x5f
'nwil', # 0x60
'nwilg', # 0x61
'nwilm', # 0x62
'nwilb', # 0x63
'nwils', # 0x64
'nwilt', # 0x65
'nwilp', # 0x66
'nwilh', # 0x67
'nwim', # 0x68
'nwib', # 0x69
'nwibs', # 0x6a
'nwis', # 0x6b
'nwiss', # 0x6c
'nwing', # 0x6d
'nwij', # 0x6e
'nwic', # 0x6f
'nwik', # 0x70
'nwit', # 0x71
'nwip', # 0x72
'nwih', # 0x73
'nyu', # 0x74
'nyug', # 0x75
'nyugg', # 0x76
'nyugs', # 0x77
'nyun', # 0x78
'nyunj', # 0x79
'nyunh', # 0x7a
'nyud', # 0x7b
'nyul', # 0x7c
'nyulg', # 0x7d
'nyulm', # 0x7e
'nyulb', # 0x7f
'nyuls', # 0x80
'nyult', # 0x81
'nyulp', # 0x82
'nyulh', # 0x83
'nyum', # 0x84
'nyub', # 0x85
'nyubs', # 0x86
'nyus', # 0x87
'nyuss', # 0x88
'nyung', # 0x89
'nyuj', # 0x8a
'nyuc', # 0x8b
'nyuk', # 0x8c
'nyut', # 0x8d
'nyup', # 0x8e
'nyuh', # 0x8f
'neu', # 0x90
'neug', # 0x91
'neugg', # 0x92
'neugs', # 0x93
'neun', # 0x94
'neunj', # 0x95
'neunh', # 0x96
'neud', # 0x97
'neul', # 0x98
'neulg', # 0x99
'neulm', # 0x9a
'neulb', # 0x9b
'neuls', # 0x9c
'neult', # 0x9d
'neulp', # 0x9e
'neulh', # 0x9f
'neum', # 0xa0
'neub', # 0xa1
'neubs', # 0xa2
'neus', # 0xa3
'neuss', # 0xa4
'neung', # 0xa5
'neuj', # 0xa6
'neuc', # 0xa7
'neuk', # 0xa8
'neut', # 0xa9
'neup', # 0xaa
'neuh', # 0xab
'nyi', # 0xac
'nyig', # 0xad
'nyigg', # 0xae
'nyigs', # 0xaf
'nyin', # 0xb0
'nyinj', # 0xb1
'nyinh', # 0xb2
'nyid', # 0xb3
'nyil', # 0xb4
'nyilg', # 0xb5
'nyilm', # 0xb6
'nyilb', # 0xb7
'nyils', # 0xb8
'nyilt', # 0xb9
'nyilp', # 0xba
'nyilh', # 0xbb
'nyim', # 0xbc
'nyib', # 0xbd
'nyibs', # 0xbe
'nyis', # 0xbf
'nyiss', # 0xc0
'nying', # 0xc1
'nyij', # 0xc2
'nyic', # 0xc3
'nyik', # 0xc4
'nyit', # 0xc5
'nyip', # 0xc6
'nyih', # 0xc7
'ni', # 0xc8
'nig', # 0xc9
'nigg', # 0xca
'nigs', # 0xcb
'nin', # 0xcc
'ninj', # 0xcd
'ninh', # 0xce
'nid', # 0xcf
'nil', # 0xd0
'nilg', # 0xd1
'nilm', # 0xd2
'nilb', # 0xd3
'nils', # 0xd4
'nilt', # 0xd5
'nilp', # 0xd6
'nilh', # 0xd7
'nim', # 0xd8
'nib', # 0xd9
'nibs', # 0xda
'nis', # 0xdb
'niss', # 0xdc
'ning', # 0xdd
'nij', # 0xde
'nic', # 0xdf
'nik', # 0xe0
'nit', # 0xe1
'nip', # 0xe2
'nih', # 0xe3
'da', # 0xe4
'dag', # 0xe5
'dagg', # 0xe6
'dags', # 0xe7
'dan', # 0xe8
'danj', # 0xe9
'danh', # 0xea
'dad', # 0xeb
'dal', # 0xec
'dalg', # 0xed
'dalm', # 0xee
'dalb', # 0xef
'dals', # 0xf0
'dalt', # 0xf1
'dalp', # 0xf2
'dalh', # 0xf3
'dam', # 0xf4
'dab', # 0xf5
'dabs', # 0xf6
'das', # 0xf7
'dass', # 0xf8
'dang', # 0xf9
'daj', # 0xfa
'dac', # 0xfb
'dak', # 0xfc
'dat', # 0xfd
'dap', # 0xfe
'dah', # 0xff
)
| gpl-2.0 |
asadziach/tensorflow | tensorflow/python/saved_model/builder_impl.py | 14 | 18751 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
class SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides functionality to build a `SavedModel`
protocol buffer. Specifically, this allows multiple meta graphs to be saved as
part of a single language-neutral `SavedModel`, while sharing variables and
assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they can be provided
when the meta graph def is added. If multiple meta graph defs are associated
an asset of the same name, only the first version is retained.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = saved_model.builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
raise AssertionError(
"Export directory already exists. Please specify a different export "
"directory: %s" % export_dir)
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _asset_path_from_tensor(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("Asset path tensor must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError("Asset path tensor must be of type constant.")
if path_tensor.dtype != dtypes.string:
raise TypeError("Asset path tensor must be of dtype string.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError("Asset path tensor must be a scalar.")
return str_values[0]
def _add_asset_to_collection(self, asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
asset_source_filepath_list = self._maybe_save_assets(
assets_collection_to_add)
# Return if there are no assets to write.
if len(asset_source_filepath_list) is 0:
tf_logging.info("No assets to write.")
return
assets_destination_dir = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
# Copy each asset from source path to destination path.
for asset_source_filepath in asset_source_filepath_list:
asset_source_filename = os.path.basename(asset_source_filepath)
asset_destination_filepath = os.path.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_source_filename))
# Only copy the asset file to the destination if it does not already
# exist. This is to ensure that an asset with the same name defined as
# part of multiple graphs is only copied the first time.
if not file_io.file_exists(asset_destination_filepath):
file_io.copy(asset_source_filepath, asset_destination_filepath)
tf_logging.info("Assets written to: %s", assets_destination_dir)
def _maybe_add_legacy_init_op(self, legacy_init_op=None):
"""Add legacy init op to the SavedModel.
Args:
legacy_init_op: Optional legacy init op to support backward compatibility.
Raises:
TypeError if legacy init op is not of type `Operation`.
"""
if legacy_init_op is not None:
if not isinstance(legacy_init_op, ops.Operation):
raise TypeError("legacy_init_op needs to be an Operation: %r" %
legacy_init_op)
ops.add_to_collection(constants.LEGACY_INIT_OP_KEY, legacy_init_op)
def _add_main_op(self, main_op):
"""Add main op to the SavedModel.
Args:
main_op: Main op to run as part of graph initialization.
Raises:
TypeError if main op is not of type `Operation`.
"""
if main_op is not None:
if not isinstance(main_op, ops.Operation):
raise TypeError("main_op needs to be an Operation: %r" % main_op)
ops.add_to_collection(constants.MAIN_OP_KEY, main_op)
def _maybe_save_assets(self, assets_collection_to_add=None):
"""Saves assets to the meta graph.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
Returns:
The list of filepaths to the assets in the assets collection.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
asset_source_filepath_list = []
if assets_collection_to_add is None:
tf_logging.info("No assets to save.")
return asset_source_filepath_list
# Iterate over the supplied asset collection, build the `AssetFile` proto
# and add them to the collection with key `constants.ASSETS_KEY`, in the
# graph.
for asset_tensor in assets_collection_to_add:
asset_source_filepath = self._asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError("Invalid asset filepath tensor %s" % asset_tensor)
asset_source_filename = os.path.basename(asset_source_filepath)
# Build `AssetFile` proto and add it to the asset collection in the graph.
self._add_asset_to_collection(asset_source_filename, asset_tensor)
asset_source_filepath_list.append(asset_source_filepath)
tf_logging.info("Assets added to graph.")
return asset_source_filepath_list
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def _validate_tensor_info(self, tensor_info):
"""Validates the `TensorInfo` proto.
Checks if the `name` and `dtype` fields exist and are non-empty.
Args:
tensor_info: `TensorInfo` protocol buffer to validate.
Raises:
AssertionError: If the `name` or `dtype` fields of the supplied
`TensorInfo` proto are not populated.
"""
if tensor_info is None:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the name "
"and dtype fields set.")
if not tensor_info.name:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the name "
"field set: %s" % tensor_info)
if tensor_info.dtype is types_pb2.DT_INVALID:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the dtype "
"field set: %s" % tensor_info)
def _validate_signature_def_map(self, signature_def_map):
"""Validates the `SignatureDef` entries in the signature def map.
Validation of entries in the signature def map includes ensuring that the
`name` and `dtype` fields of the TensorInfo protos of the `inputs` and
`outputs` of each `SignatureDef` are populated.
Args:
signature_def_map: The map of signature defs to be validated.
"""
if signature_def_map is not None:
for signature_def_key in signature_def_map:
signature_def = signature_def_map[signature_def_key]
inputs = signature_def.inputs
outputs = signature_def.outputs
for inputs_key in inputs:
self._validate_tensor_info(inputs[inputs_key])
for outputs_key in outputs:
self._validate_tensor_info(outputs[outputs_key])
def add_meta_graph(self,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel. Note
that this collection should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
legacy_init_op: Legacy support for op or group of ops to execute after the
restore op upon a load.
clear_devices: Set to true if the device info on the default graph should
be cleared.
main_op: Op or group of ops to execute when the graph is loaded.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet.
"""
if not self._has_saved_variables:
raise AssertionError(
"Graph state including variables and assets has not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
self._validate_signature_def_map(signature_def_map)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
if main_op is None:
# Add legacy init op to the SavedModel.
self._maybe_add_legacy_init_op(legacy_init_op)
else:
self._add_main_op(main_op)
# Initialize a saver to generate a sharded output for all saveables in the
# current scope.
saver = tf_saver.Saver(
variables._all_saveable_objects(), # pylint: disable=protected-access
sharded=True,
write_version=saver_pb2.SaverDef.V2,
allow_empty=True)
meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None):
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel.
legacy_init_op: Legacy support for op or group of ops to execute after the
restore op upon a load.
clear_devices: Set to true if the device info on the default graph should
be cleared.
main_op: Op or group of ops to execute when the graph is loaded.
"""
if self._has_saved_variables:
raise AssertionError("Graph state including variables and assets has "
"already been saved. Please invoke "
"`add_meta_graph()` instead.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
self._validate_signature_def_map(signature_def_map)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
# Create the variables sub-directory, if it does not exist.
variables_dir = os.path.join(
compat.as_text(self._export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
variables_path = os.path.join(
compat.as_text(variables_dir),
compat.as_text(constants.VARIABLES_FILENAME))
if main_op is None:
# Add legacy init op to the SavedModel.
self._maybe_add_legacy_init_op(legacy_init_op)
else:
self._add_main_op(main_op)
# Initialize a saver to generate a sharded output for all saveables in the
# current scope.
saver = tf_saver.Saver(
variables._all_saveable_objects(), # pylint: disable=protected-access
sharded=True,
write_version=saver_pb2.SaverDef.V2,
allow_empty=True)
# Save the variables. Also, disable writing the checkpoint state proto. The
# file is not used during SavedModel loading. In addition, since a
# SavedModel can be copied or moved, this avoids the checkpoint state to
# become outdated.
saver.save(sess, variables_path, write_meta_graph=False, write_state=False)
# Export the meta graph def.
meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, self._saved_model.SerializeToString())
tf_logging.info("SavedModel written to: %s", path)
return path
| apache-2.0 |
ran5515/DeepDecision | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
Erebot/Module_GoF | docs/src/conf.py | 31 | 1241 | # -*- coding: utf-8 -*-
import os
import stat
from os.path import join, abspath
from subprocess import call
def prepare(globs, locs):
# RTD defaults the current working directory to where conf.py resides.
# In our case, that means <root>/docs/src/.
cwd = os.getcwd()
root = abspath(join(cwd, '..', '..'))
os.chdir(root)
# Download the PHP binary & composer.phar if necessary
base = 'https://github.com/Erebot/Buildenv/releases/download/1.4.0'
for f in ('php', 'composer.phar'):
call(['curl', '-L', '-z', f, '-o', f, '%s/%s' % (base, f)])
# Make sure the PHP interpreter is executable
os.chmod('./php', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Call composer to download/update dependencies as necessary
os.environ['COMPOSER_CACHE_DIR'] = './cache'
call(['./php', 'composer.phar', 'update', '-n', '--ignore-platform-reqs',
'--no-progress'], env=os.environ)
# Load the second-stage configuration file.
os.chdir(cwd)
conf = join(root, 'vendor', 'erebot', 'buildenv', 'sphinx', 'rtd.py')
print "Including the second configuration file (%s)..." % (conf, )
exec(compile(open(conf).read(), conf, 'exec'), globs, locs)
prepare(globals(), locals())
| gpl-3.0 |
pywirrarika/smtwixes | wixnlp/seg.py | 2 | 2752 | #!/usr/bin/env python3
# Copyright (C) 2016.
# Author: Jesús Manuel Mager Hois
# e-mail: <fongog@gmail.com>
# Project website: http://turing.iimas.unam.mx/wix/
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import codecs
from .wmorph import Verb
class Word:
def __init__(self):
F = codecs.open("data/dic", mode="r", encoding="utf-8")
self.dic = {}
self.symbols = '!¡"¿?,.'
line = F.readline()
while line:
line = line.split()
if line:
wr = line[0].replace('\n', '')
self.dic[wr] = line
line = F.readline()
def checkdic(self,word):
if word in self.dic.keys():
return (word, 1)
else:
if word in self.symbols:
return (word, 1)
else:
return (word, 0)
def segment(line, joinm=1, s=0):
debug=0
F = codecs.open("data/dic", mode="r", encoding="utf-8")
w = Word()
tokens = line.split()
words = []
for token in tokens:
word = Verb(token, debug=debug)
typ = w.checkdic(token)
if debug:
print(typ)
if typ[1] == 1:
if debug:
print("NOT VERB:", token)
words.append(token)
else:
print("VERB:", token, str(word.paths))
pathsize = 100000
chpath=0
for p in word.paths:
if len(p) < pathsize:
chpath = p
pathsize = len(p)
if chpath:
for affix in chpath:
if not s:
words.append(str(affix[1])+str(affix[0]))
else:
words.append(str(affix[1]))
else:
words.append(token)
if joinm:
return " ".join(words)
return words
def segtext(text, s=0):
lines = text.split("\n")
seglines = []
for line in lines:
seglines.append(segment(line, s=s))
return "\n".join(seglines)
if __name__ == "__main__":
alo = "nanait+a"
words = segment(alo)
print(words)
| gpl-3.0 |
codeforsanjose/calischools | schools/serializers.py | 1 | 1123 | from rest_framework import serializers
from .models import County, District, School
class CountySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = County
class CountyMixin(serializers.Serializer):
county = CountySerializer(read_only=True)
class DistrictCompactSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = District
exclude = ('county',)
class DistrictSerializer(CountyMixin,
serializers.HyperlinkedModelSerializer):
class Meta:
model = District
class DistrictCompactMixin(serializers.Serializer):
district = DistrictCompactSerializer(read_only=True)
class SchoolCompactSerializer(serializers.HyperlinkedModelSerializer):
short_code = serializers.ReadOnlyField()
class Meta:
model = School
fields = ('url', 'short_code', 'name',)
class SchoolSerializer(DistrictCompactMixin,
CountyMixin,
serializers.HyperlinkedModelSerializer):
short_code = serializers.ReadOnlyField()
class Meta:
model = School
| mit |
mdaniel/intellij-community | python/helpers/py2only/docutils/parsers/rst/directives/tables.py | 86 | 20344 | # $Id: tables.py 7747 2014-03-20 10:51:10Z milde $
# Authors: David Goodger <goodger@python.org>; David Priest
# Copyright: This module has been placed in the public domain.
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = CSVTable.encode_for_csv(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = CSVTable.encode_for_csv(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = CSVTable.encode_for_csv(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
message = str(detail)
if sys.version_info < (3,) and '1-character string' in message:
message += '\nwith Python 2.x this must be an ASCII character.'
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, message), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| apache-2.0 |
0k/OpenUpgrade | addons/hr_expense/__openerp__.py | 52 | 2896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Expense Tracker',
'version': '1.0',
'category': 'Human Resources',
'sequence': 29,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Confirmation of the sheet by the employee
* Validation by his manager
* Validation by the accountant and accounting entries creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/expenses',
'images': ['images/hr_expenses_analysis.jpeg', 'images/hr_expenses.jpeg'],
'depends': ['hr', 'account_accountant', 'report'],
'data': [
'security/ir.model.access.csv',
'hr_expense_data.xml',
'hr_expense_sequence.xml',
'hr_expense_workflow.xml',
'hr_expense_view.xml',
'hr_expense_report.xml',
'security/ir_rule.xml',
'report/hr_expense_report_view.xml',
'hr_expense_installer_view.xml',
'views/report_expense.xml',
],
'demo': ['hr_expense_demo.xml'],
'test': [
'test/expense_demo.yml',
'test/expense_process.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ddzialak/boto | boto/sqs/bigmessage.py | 170 | 4729 | # Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import boto
from boto.sqs.message import RawMessage
from boto.exception import SQSDecodeError
class BigMessage(RawMessage):
"""
The BigMessage class provides large payloads (up to 5GB)
by storing the payload itself in S3 and then placing a reference
to the S3 object in the actual SQS message payload.
To create a BigMessage, you should create a BigMessage object
and pass in a file-like object as the ``body`` param and also
pass in the an S3 URL specifying the bucket in which to store
the message body::
import boto.sqs
from boto.sqs.bigmessage import BigMessage
sqs = boto.sqs.connect_to_region('us-west-2')
queue = sqs.get_queue('myqueue')
fp = open('/path/to/bigmessage/data')
msg = BigMessage(queue, fp, 's3://mybucket')
queue.write(msg)
Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
is interpreted to mean that the body of the message is already
stored in S3 and the that S3 URL is then used directly with no
content uploaded by BigMessage.
"""
def __init__(self, queue=None, body=None, s3_url=None):
self.s3_url = s3_url
super(BigMessage, self).__init__(queue, body)
def _get_bucket_key(self, s3_url):
bucket_name = key_name = None
if s3_url:
if s3_url.startswith('s3://'):
# We need to split out the bucket from the key (if
# supplied). We also have to be aware that someone
# may provide a trailing '/' character as in:
# s3://foo/ and we want to handle that.
s3_components = s3_url[5:].split('/', 1)
bucket_name = s3_components[0]
if len(s3_components) > 1:
if s3_components[1]:
key_name = s3_components[1]
else:
msg = 's3_url parameter should start with s3://'
raise SQSDecodeError(msg, self)
return bucket_name, key_name
def encode(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url
def _get_s3_object(self, s3_url):
bucket_name, key_name = self._get_bucket_key(s3_url)
if bucket_name and key_name:
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.get_key(key_name)
return key
else:
msg = 'Unable to decode S3 URL: %s' % s3_url
raise SQSDecodeError(msg, self)
def decode(self, value):
self.s3_url = value
key = self._get_s3_object(value)
return key.get_contents_as_string()
def delete(self):
# Delete the object in S3 first, then delete the SQS message
if self.s3_url:
key = self._get_s3_object(self.s3_url)
key.delete()
super(BigMessage, self).delete()
| mit |
stiartsly/pjsip | pjsip-apps/src/swig/importsym.py | 20 | 5108 | # $Id: importsym.py 4704 2014-01-16 05:30:46Z ming $
#
# importsym.py: Import C symbol decls (structs, enums, etc) and write them
# to another file
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pycparser
from pycparser import c_generator
import sys
import os
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if sys.platform == 'win32' and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
#
PJ_ROOT_PATH = "../../../"
# CPP is needed by pycparser.
CPP_PATH = which("cpp")
if not CPP_PATH:
print 'Error: need to have cpp in PATH'
sys.exit(1)
# Hardcoded!
if sys.platform == 'win32':
PYCPARSER_DIR="C:/devs/tools/pycparser"
elif sys.platform == "linux2":
PYCPARSER_DIR="/home/bennylp/Desktop/opt/src/pycparser-master"
else:
PYCPARSER_DIR="/Library/Python/2.7/site-packages/pycparser"
if not os.path.exists(PYCPARSER_DIR + '/utils/fake_libc_include'):
print "Error: couldn't find pycparser utils in '%s'" % PYPARSER_DIR
sys.exit(1)
# Heading, to be placed before the source files
C_HEADING_SECTION = """
#define PJ_AUTOCONF 1
#define jmp_buf int
#define __attribute__(x)
"""
# CPP (C preprocessor) settings
CPP_CFLAGS = [
'-I' + PYCPARSER_DIR + '/utils/fake_libc_include',
"-I" + PJ_ROOT_PATH + "pjlib/include",
"-I" + PJ_ROOT_PATH + "pjlib-util/include",
"-I" + PJ_ROOT_PATH + "pjnath/include",
"-I" + PJ_ROOT_PATH + "pjmedia/include",
"-I" + PJ_ROOT_PATH + "pjsip/include"
]
class SymbolVisitor(pycparser.c_ast.NodeVisitor):
def __init__(self, names):
self.nodeDict = {}
for name in names:
self.nodeDict[name] = None
def _add(self, node):
if self.nodeDict.has_key(node.name):
self.nodeDict[node.name] = node
def visit_Struct(self, node):
self._add(node)
def visit_Enum(self, node):
self._add(node)
def visit_Typename(self, node):
self._add(node)
def visit_Typedef(self, node):
self._add(node)
TEMP_FILE="tmpsrc.h"
class SymbolImporter:
"""
Import C selected declarations from C source file and move it
to another file.
Parameters:
- listfile Path of file containing list of C source file
and identifier names to be imported. The format
of the listfile is:
filename name1 name2 name3
for example:
pj/sock_qos.h pj_qos_type pj_qos_flag
pj/types.h pj_status_t PJ_SUCCESS
"""
def __init__(self):
pass
def process(self, listfile, outfile):
# Read listfile
f = open(listfile)
lines = f.readlines()
f.close()
# Process each line in list file, while generating the
# temporary C file to be processed by pycparser
f = open(TEMP_FILE, "w")
f.write(C_HEADING_SECTION)
names = []
fcnt = 0
for line in lines:
spec = line.split()
if len(spec) < 2:
continue
fcnt += 1
f.write("#include <%s>\n" % spec[0])
names.extend(spec[1:])
f.close()
print 'Parsing %d symbols from %d files..' % (len(names), fcnt)
# Parse the temporary C file
ast = pycparser.parse_file(TEMP_FILE, use_cpp=True, cpp_path=CPP_PATH, cpp_args=CPP_CFLAGS)
os.remove(TEMP_FILE)
# Filter the declarations that we wanted
print 'Filtering..'
visitor = SymbolVisitor(names)
visitor.visit(ast)
# Print symbol declarations to outfile
print 'Writing declarations..'
f = open(outfile, 'w')
f.write("// This file is autogenerated by importsym script, do not modify!\n\n")
gen = pycparser.c_generator.CGenerator()
for name in names:
node = visitor.nodeDict[name]
if not node:
print " ** Warning: declaration for '%s' is not found **" % k
else:
print " writing '%s'.." % name
output = gen.visit(node) + ";\n\n"
f.write(output)
f.close()
print "Done."
if __name__ == "__main__":
print "Importing symbols: 'symbols.lst' --> 'symbols.i'"
si = SymbolImporter()
si.process("symbols.lst", "symbols.i")
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("yacctab.py")
except OSError:
pass
| gpl-2.0 |
marcoantoniooliveira/labweb | tests/integration/core/validator_tests.py | 62 | 2408 | from django.test import TestCase
from django.core.exceptions import ValidationError
from django.contrib.flatpages.models import FlatPage
from oscar.core.validators import ExtendedURLValidator
from oscar.core.validators import URLDoesNotExistValidator
class TestExtendedURLValidatorWithVerifications(TestCase):
"""
ExtendedURLValidator with verify_exists=True
"""
def setUp(self):
self.validator = ExtendedURLValidator(verify_exists=True)
def test_validates_local_url(self):
try:
self.validator('/')
except ValidationError:
self.fail('ExtendedURLValidator raised ValidationError'
'unexpectedly!')
def test_validates_local_url_with_query_strings(self):
try:
self.validator('/?q=test') # Query strings shouldn't affect validation
except ValidationError:
self.fail('ExtendedURLValidator raised ValidationError'
'unexpectedly!')
def test_raises_validation_error_for_missing_urls(self):
with self.assertRaises(ValidationError):
self.validator('/invalid/')
def test_validates_urls_missing_preceding_slash(self):
try:
self.validator('catalogue/')
except ValidationError:
self.fail('ExtendedURLValidator raised ValidationError'
'unexpectedly!')
def test_raises_validation_error_for_urls_without_trailing_slash(self):
with self.assertRaises(ValidationError):
self.validator('/catalogue') # Missing the / is bad
def test_validates_flatpages_urls(self):
FlatPage.objects.create(title='test page', url='/test/page/')
try:
self.validator('/test/page/')
except ValidationError:
self.fail('ExtendedURLValidator raises ValidationError'
'unexpectedly!')
class TestExtendedURLValidatorWithoutVerifyExists(TestCase):
"""
ExtendedURLValidator with verify_exists=False
"""
def setUp(self):
self.validator = URLDoesNotExistValidator()
def test_raises_exception_for_local_urls(self):
self.assertRaises(ValidationError, self.validator, '/')
def test_raises_exception_for_flatpages(self):
FlatPage.objects.create(title='test page', url='/test/page/')
self.assertRaises(ValidationError, self.validator, '/test/page/')
| bsd-3-clause |
MungoRae/home-assistant | homeassistant/components/device_tracker/luci.py | 12 | 5084 | """
Support for OpenWRT (luci) routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.luci/
"""
import json
import logging
import re
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
class InvalidLuciTokenError(HomeAssistantError):
"""When an invalid token is detected."""
pass
def get_scanner(hass, config):
"""Validate the configuration and return a Luci scanner."""
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class LuciDeviceScanner(DeviceScanner):
"""This class queries a wireless router running OpenWrt firmware."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.refresh_token()
self.mac2name = None
self.success_init = self.token is not None
def refresh_token(self):
"""Get a new token."""
self.token = _get_token(self.host, self.username, self.password)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
url = 'http://{}/cgi-bin/luci/rpc/uci'.format(self.host)
result = _req_json_rpc(url, 'get_all', 'dhcp',
params={'auth': self.token})
if result:
hosts = [x for x in result.values()
if x['.type'] == 'host' and
'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
url = 'http://{}/cgi-bin/luci/rpc/sys'.format(self.host)
try:
result = _req_json_rpc(url, 'net.arptable',
params={'auth': self.token})
except InvalidLuciTokenError:
_LOGGER.info("Refreshing token")
self.refresh_token()
return False
if result:
self.last_results = []
for device_entry in result:
# Check if the Flags for each device contain
# NUD_REACHABLE and if so, add it to last_results
if int(device_entry['Flags'], 16) & 0x2:
self.last_results.append(device_entry['HW address'])
return True
return False
def _req_json_rpc(url, method, *args, **kwargs):
"""Perform one JSON RPC operation."""
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except KeyError:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, check your username and password")
return
elif res.status_code == 403:
_LOGGER.error("Luci responded with a 403 Invalid token")
raise InvalidLuciTokenError
else:
_LOGGER.error('Invalid response from luci: %s', res)
def _get_token(host, username, password):
"""Get authentication token for the given host+username+password."""
url = 'http://{}/cgi-bin/luci/rpc/auth'.format(host)
return _req_json_rpc(url, 'login', username, password)
| apache-2.0 |
RIOT-OS/RIOT | dist/tools/kconfiglib/genconfig.py | 7 | 15262 | #!/usr/bin/env python3
# Copyright (c) 2018-2019, Ulf Magnusson
# 2020 HAW Hamburg
# SPDX-License-Identifier: ISC
"""
This script is used to merge multiple configuration sources and generate
different outputs related to Kconfig:
- Generate a header file with #defines from the configuration, matching the
format of include/generated/autoconf.h in the Linux kernel.
- Write the configuration output as a .config file. See --config-out.
- The --sync-deps, --file-list, and --env-list options generate information that
can be used to avoid needless rebuilds/reconfigurations.
Before writing a header or configuration file, Kconfiglib compares the old
contents of the file against the new contents. If there's no change, the write
is skipped. This avoids updating file metadata like the modification time, and
might save work depending on your build setup.
A custom header string can be inserted at the beginning of generated
configuration and header files by setting the KCONFIG_CONFIG_HEADER and
KCONFIG_AUTOHEADER_HEADER environment variables, respectively. The string is
not automatically made a comment (this is by design, to allow anything to be
added), and no trailing newline is added, so add '/* */', '#', and newlines as
appropriate.
"""
import argparse
import logging
import os
import sys
from riot_kconfig import RiotKconfig
import kconfiglib
DEFAULT_SYNC_DEPS_PATH = "deps/"
class Colors:
"""
ASCII colors for logging.
"""
GREEN = "\033[1;32m"
RED = "\033[1;31m"
YELLOW = "\033[1;33m"
PURPLE = "\033[1;35m"
RESET = "\033[0m"
class NoConfigurationFile(Exception):
"""
Raised when an operation that requires a configuration input file is
executed but the file is not specified.
"""
pass
def is_module(symbol):
"""
Checks if a given symbol represents a module, depending on its prefix.
"""
return symbol.name.startswith("MODULE_")
def is_error(symbol):
"""
Checks if a given symbol represents an error, depending on its prefix.
"""
return symbol.name.startswith("ERROR_")
def log_error(message):
"""
Convenience function to log an error.
"""
log(message, level=logging.ERROR)
def log(message, level=logging.DEBUG, color=None):
"""
Logs a message using 'logging', with a given color and level. If no level is
passed the message is logged using debug level. If no color is passed the
following rules apply:
- error messages are RED
- warning messages are YELLOW
- all other messages have no color
"""
if sys.stdout.isatty():
# running in a real terminal
if color is None:
if level == logging.ERROR:
color = Colors.RED
elif level == logging.WARNING:
color = Colors.YELLOW
else:
color = Colors.RESET
logging.log(level, "{}{}{}".format(color, message, Colors.RESET))
else:
# being piped or redirected
logging.log(level, "{}".format(message))
def merge_configs(kconf, configs=[]):
"""
Merges multiple configuration files given a Kconfig tree. configs should be
an array of paths to the files that need to be merged.
"""
# Enable warnings for assignments to undefined symbols
kconf.warn_assign_undef = True
# (This script uses alldefconfig as the base. Other starting states could be
# set up here as well. The approach in examples/allnoconfig_simpler.py could
# provide an allnoconfig starting state for example.)
# Disable warnings generated for multiple assignments to the same symbol within
# a (set of) configuration files. Assigning a symbol multiple times might be
# done intentionally when merging configuration files.
kconf.warn_assign_override = False
kconf.warn_assign_redun = False
# Create a merged configuration by loading the fragments with replace=False.
if configs:
for config in configs:
log(kconf.load_config(config, replace=False))
def check_config_symbols(kconf):
"""
Verifies that symbols got the values assigned by the user. This does not
check choices. For that, please refer to check_config_choices.
"""
ret = True
for sym in kconf.unique_defined_syms:
# choices are evaluated separately because when merging configurations
# the choice could be overridden
if sym.choice:
continue
# Was the symbol assigned to?
if sym.user_value is None:
continue
# Tristate values are represented as 0, 1, 2. Having them as
# "n", "m", "y" is more convenient here, so convert.
if sym.type in (kconfiglib.BOOL, kconfiglib.TRISTATE):
user_value = kconfiglib.TRI_TO_STR[sym.user_value]
else:
user_value = sym.user_value
if user_value != sym.str_value:
log("{} was assigned the value '{}' but got the value"
" '{}'. Check the dependencies.".format(sym.name, user_value,
sym.str_value))
symbol_type = "module" if is_module(sym) else "parameter"
log_error("=> The {} {} could not be set to {}."
.format(symbol_type, sym.name, user_value))
msg = ""
missing_deps = get_sym_missing_deps(sym)
if len(missing_deps):
msg = " Check the following unmet dependencies: "
msg += ", ".join(missing_deps) + "\n"
elif sym.type == kconfiglib.HEX or sym.type == kconfiglib.INT:
rng = get_sym_applying_range(sym)
if rng is not None:
msg = " Check that the value is in the correct range:"
msg += "[{} - {}] {}\n".format(rng[0], rng[1], rng[2])
log_error(msg)
ret = False
return ret
def check_config_choices(kconf):
"""
Verifies that the choice options that have been selected after processing
the configuration match what the user selected.
This is verified separately from the rest of the symbols because as we
are merging multiple configuration files the choice selection can be
overridden, so this check needs a different logic.
"""
ret = True
for choice in kconf.unique_choices:
if choice.user_selection and choice.user_selection is not choice.selection:
ret = False
log("{} choice option could not be set".format(choice.name))
log_error("=> The choice {} was selected but was not set.\n"
.format(choice.user_selection.name_and_loc))
return ret
def check_application_symbol(kconf):
"""
Check that the APPLICATION symbol is set.
If the special symbol APPLICATION is present it should be set to 'y'. It is
used to:
- imply modules that are optional for the application
- add dependencies on other symbols or conditions (e.g. only use 32-bits
architectures on this application)
"""
app_sym = list(filter(lambda s: s.name == "APPLICATION", kconf.unique_defined_syms))
if len(app_sym) > 1:
log("=> The special symbol APPLICATION is defined more than once",
level=logging.WARNING)
log_error("=> The special symbol APPLICATION is defined more than once")
return False
elif len(app_sym) == 1:
app = app_sym[0]
if app.str_value != 'y':
log("=> The application symbol (APPLICATION) is not set.",
level=logging.WARNING)
log_error("=> The application symbol (APPLICATION) is not set.")
log_error(" Check that the symbol defaults to 'y'.")
missing_deps = get_sym_missing_deps(app)
if len(missing_deps):
msg = " Check the following unmet dependencies: "
msg += ", ".join(missing_deps) + "\n"
log_error(msg)
return False
return True
def check_configs(kconf):
"""
Verifies that the generated configuration is valid.
A configuration is not valid when:
- A module could not be set to the value defined by the user.
- A configuration parameter could not be set to value defined by the
user.
"""
test_kconfig = os.getenv("TEST_KCONFIG")
if (test_kconfig):
app_check = check_application_symbol(kconf)
else:
app_check = True
sym_check = check_config_symbols(kconf)
choice_check = check_config_choices(kconf)
return app_check and sym_check and choice_check
def get_sym_missing_deps(sym):
"""
Returns an array of strings, where each element is the string representation
of the expressions on which `sym` depends and are missing.
"""
# this splits the top expressions that are connected via AND (&&)
# this will be the case, for example, for expressions that are defined in
# multiple lines
top_deps = kconfiglib.split_expr(sym.direct_dep, kconfiglib.AND)
# we only need the expressions that are not met
expr = [dep for dep in top_deps if kconfiglib.expr_value(dep) == 0]
# convert each expression to strings and add the value for a friendlier
# output message
expr_str = []
for expr in expr:
s = kconfiglib.expr_str(expr)
if isinstance(expr, tuple):
s = "({})".format(s)
expr_str.append("{} (={})".format(s, kconfiglib.TRI_TO_STR[kconfiglib.expr_value(expr)]))
return expr_str
def get_sym_applying_range(sym):
"""
Returns the first range that applies to a symbol (the active range) and the
condition when it is not a constant.
The return value is a tuple holding string representations of the range like
so:
(low, high, condition)
When the condition is a constant (e.g. 'y' when there is no condition) it
will be an empty string.
"""
# multiple ranges could apply to a symbol
for rng in sym.ranges:
(low, high, cond) = rng
# get the first active one
if kconfiglib.expr_value(cond):
cond_str = ""
if cond is not sym.kconfig.y:
cond_str = "if {}".format(kconfiglib.expr_str(cond))
return (low.str_value, high.str_value, cond_str)
return None
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument(
"--header-path",
metavar="HEADER_FILE",
help="""
Path to write the generated header file to. If not specified the header file is
not written.
""")
parser.add_argument(
"--config-out",
metavar="CONFIG_FILE",
help="""
Write the configuration to CONFIG_FILE. If not specified the file is not
written.
""")
parser.add_argument(
"--kconfig-filename",
metavar="KCONFIG_FILENAME",
nargs="?",
default="Kconfig",
help="Top-level Kconfig file (default: Kconfig)")
parser.add_argument(
"--sync-deps",
metavar="OUTPUT_DIR",
nargs="?",
const=DEFAULT_SYNC_DEPS_PATH,
help="""
Enable generation of symbol dependency information for incremental builds,
optionally specifying the output directory (default: {}). See the docstring of
Kconfig.sync_deps() in Kconfiglib for more information.
""".format(DEFAULT_SYNC_DEPS_PATH))
parser.add_argument(
"--file-list",
metavar="FILE_LIST_FILE",
help="""
Write a makefile listing all the Kconfig files used, and adding them as
dependencies of HEADER_FILE. The paths are absolute. Files appear in the order
they're 'source'd.
""")
parser.add_argument(
"--env-list",
metavar="ENV_LIST_FILE",
help="""
Write a list of all environment variables referenced in Kconfig files to
ENV_LIST_FILE, with one variable per line. Each line has the format NAME=VALUE.
Only environment variables referenced with the preprocessor $(VAR) syntax are
included, and not variables referenced with the older $VAR syntax (which is
only supported for backwards compatibility).
""")
parser.add_argument(
"--ignore-config-errors",
action="store_true",
help="""Configuration errors are reported but the script does not exit
with error.""")
parser.add_argument(
"--warnings-are-not-errors",
action="store_true",
help="Kconfig warnings are not considered errors")
parser.add_argument(
"-d", "--debug",
action="store_true",
help="Enable debug messages")
parser.add_argument(
"--config-sources",
metavar="CONFIG_SOURCES",
nargs='*',
help="List of configuration files to merge and apply. May be empty.")
args = parser.parse_args()
log_level = logging.DEBUG if args.debug else logging.ERROR
logging.basicConfig(format='[genconfig.py]:%(levelname)s-%(message)s',
level=log_level)
kconf = RiotKconfig(args.kconfig_filename, warn_to_stderr=False)
merge_configs(kconf, args.config_sources)
# HACK: Force all symbols to be evaluated, to catch warnings generated
# during evaluation (such as out-of-range integers)
kconf.write_config(os.devnull)
if not check_configs(kconf) and not args.ignore_config_errors:
sys.exit(1)
if kconf.warnings:
if args.warnings_are_not_errors:
for warning in kconf.warnings:
log(warning, level=logging.WARNING)
else:
log_error("Treating Kconfig warnings as errors:")
for warning in kconf.warnings:
log_error("=> {}".format(warning))
if not args.ignore_config_errors:
sys.exit(1)
if args.config_out is not None:
logging.debug(kconf.write_config(args.config_out, save_old=False))
if args.header_path is not None:
logging.debug(kconf.write_autoconf(args.header_path))
if args.sync_deps is not None:
log("Incremental build header files generated at '{}'".format(args.sync_deps))
kconf.sync_deps(args.sync_deps)
if args.file_list is not None:
if args.config_out is None:
raise NoConfigurationFile("Can't generate Kconfig dependency file without configuration file")
log("Kconfig dependencies written to '{}'".format(args.file_list))
with open(args.file_list, "w", encoding="utf-8") as f:
f.write("{}: \\\n".format(args.config_out))
# add dependencies
for path in kconf.kconfig_filenames:
f.write(" {} \\\n".format(os.path.abspath(path)))
# add empty recipes for dependencies
f.write("\n\n")
for path in kconf.kconfig_filenames:
f.write("{}:\n\n".format(os.path.abspath(path)))
if args.env_list is not None:
log("Kconfig environmental variables written to '{}'".format(args.env_list))
with open(args.env_list, "w", encoding="utf-8") as f:
for env_var in kconf.env_vars:
f.write("{}={}\n".format(env_var, os.environ[env_var]))
if __name__ == "__main__":
main()
| lgpl-2.1 |
mdavoodi/konkourse-python | conversation/views.py | 1 | 4095 | from django.http import HttpResponse
from django.utils import simplejson
from account.models import UserProfile
from conversation.models import ConversationPost, ConvoWall, ConversationComment
from notification.views import notifyComment, notifyPost
def post(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
POST = request.POST
wall = ConvoWall.objects.get(id=POST['id'])
message = POST['message']
if message == '':
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
elif len(message) > 5000:
results = {'success': False, 'error': 'invalid length'}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
post_type = POST['type']
wallPost = ConversationPost(creator=request.user, wall=wall, message=message, post_type=post_type)
wallPost.save()
notifyPost(request=request, wall=wall, post=wallPost)
results = {'success': True}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
def comment(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
POST = request.POST
convoPost = ConversationPost.objects.get(id=POST['id'])
message = POST['message']
if message == '':
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
elif len(message) > 5000:
results = {'success': False, 'error': 'invalid length'}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
comment = ConversationComment(creator=request.user, message=message, post=convoPost)
comment.save()
convoPost.comments.add(comment)
convoPost.save()
notifyComment(request=request, post=convoPost, comment=comment)
results = {'success': True}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
def deletePost(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
POST = request.POST
convoPost = ConversationPost.objects.get(id=POST['id'])
parent = convoPost.wall.getParent
if convoPost.creator != request.user or (isinstance(parent, UserProfile) and parent.user != request.user):
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
convoPost.deleted = True
convoPost.save()
results = {'success': True}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
def deleteComment(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
POST = request.POST
comment = ConversationComment.objects.get(id=POST['id'])
parent = comment.post.wall.getParent
if comment.creator != request.user or comment.post != request.user or (isinstance(parent, UserProfile) and parent.user != request.user):
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
comment.deleted = True
comment.save()
results = {'success': True}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
| mit |
p4datasystems/CarnotKEdist | dist/Lib/xml/dom/pulldom.py | 109 | 11972 | import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
if aname == 'xmlns':
qname = aname
else:
qname = 'xmlns:' + aname
attr = self.document.createAttributeNS(xmlns_uri, qname)
attr.value = value
node.setAttributeNodeNS(attr)
self._xmlns_attrs = []
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
if self.pulldom.firstEvent[1] is None:
return None
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| apache-2.0 |
tadebayo/myedge | edge/src/edge/settings/production.py | 2 | 1789 | # In production set the environment variable like this:
# DJANGO_SETTINGS_MODULE=edge.settings.production
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
TEMPLATE_DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = ["edge.com"]
# Cache the templates in memory for speed-up
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0].update({"APP_DIRS": False})
# Define STATIC_ROOT for the collectstatic command
STATIC_ROOT = join(BASE_DIR, '..', 'site', 'static')
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
| mit |
XENON1T/processing | montecarlo/fax_waveform/CreateFakeCSV.py | 1 | 4806 | #################################
## Sub-code used in WF simulation
## It creates a csv file for the input of fax
## by Qing Lin
## @ 2016-09-12
##
## HARDCODE WARNING: The FV dimensions below need to be modified
## according to the detector you wish to simulate
##
## Ref: http://xenon1t.github.io/pax/simulator.html#instruction-file-format
## Code: https://github.com/XENON1T/pax/blob/master/pax/plugins/io/WaveformSimulator.py#L244
##
#################################
import sys
import numpy as np
import scipy as sp
if len(sys.argv)<2:
print("========= Syntax ==========")
print("python CreateFakeCSV.py ..... ")
print("<detector: XENON100, XENON1T>")
print("<number of events>")
print("<photon number lower>")
print("<photon number upper>")
print("<electron number lower>")
print("<electron number upper>")
print("<recoil type: ER, NR>")
print("<output file (abs. path)>")
print("<If force S1-S2 correlation (0 for no; 1 for yes)>")
exit()
Detector = sys.argv[1]
NumEvents = int(sys.argv[2])
PhotonNumLower = float(sys.argv[3])
PhotonNumUpper = float(sys.argv[4])
ElectronNumLower = float(sys.argv[5])
ElectronNumUpper = float(sys.argv[6])
DefaultType = sys.argv[7]
OutputFilename = sys.argv[8]
IfS1S2Correlation = True
if int(sys.argv[9])==0:
IfS1S2Correlation = False
####################################
## Some nuisance parameters (HARDCODE WARNING):
####################################
MaxDriftTime = 650. # us
####################################
## Some functions (HARDCODE WARNING):
####################################
# Current FV cut for Xe1T
scalecmtomm=1
def radius2_cut(zpos):
return 1400*scalecmtomm**2+(zpos+100*scalecmtomm)*(2250-1900)*scalecmtomm/100
def IfPassFV(x,y,z):
if Detector == "XENON100":
# check if the x,y,z passing X48kg0
I = np.power( (z+15.)/14.6, 4.)
I += np.power( (x**2+y**2)/20000., 4.)
if I<1:
return True
elif Detector == "XENON1T": # NEED TO UPDATE THIS
Zlower, Zupper = -90*scalecmtomm, -15*scalecmtomm
Zcut = ((z>=Zlower) & (z<=Zupper))
R2upper=radius2_cut(z)
Rcut = (x**2+y**2<R2upper)
if(Zcut & Rcut):
return True
return False
def RandomizeFV():
# randomize the X, Y, Z according to X48kg FV
if Detector == "XENON100":
Zlower, Zupper = -14.6-15.0, -14.6+15.0
Rlower, Rupper = -np.sqrt(200.), np.sqrt(200.)
elif Detector == "XENON1T": # NEED TO UPDATE THIS
Zlower, Zupper = -90*scalecmtomm, -15*scalecmtomm
Rlower, Rupper = -46*scalecmtomm, 46*scalecmtomm
for i in range(100000):
x = np.random.uniform(Rlower,Rupper)
y = np.random.uniform(Rlower,Rupper)
z = np.random.uniform(Zlower,Zupper)
if IfPassFV(x,y,z):
return (x,y,z)
return (0,0,0)
####################################
## Starts to create
####################################
# Some default
DefaultEventTime = MaxDriftTime*1000.
##########
fout = open(OutputFilename, 'w')
# headers
fout.write("instruction,recoil_type,x,y,depth,s1_photons,s2_electrons,t\n")
if IfS1S2Correlation:
# events loop
for i in range(NumEvents):
fout.write(str(i)+",")
fout.write(DefaultType+",")
X, Y, Z = RandomizeFV()
fout.write(str(X)+",")
fout.write(str(Y)+",")
#fout.write("random,")
#fout.write("random,")
fout.write(str(-Z)+",")
NumPhoton = int( np.random.uniform(PhotonNumLower, PhotonNumUpper) )
fout.write(str(NumPhoton)+",")
NumElectron = int( np.random.uniform(ElectronNumLower, ElectronNumUpper) )
fout.write(str(NumElectron)+",")
fout.write(str(DefaultEventTime)+"\n")
else:
# events loop S1-S2 no correlation
for i in range(NumEvents):
# first for S1
fout.write(str(i)+",")
fout.write(DefaultType+",")
X, Y, Z = RandomizeFV()
fout.write(str(X)+",")
fout.write(str(Y)+",")
fout.write(str(-Z)+",")
NumPhoton = int( np.random.uniform(PhotonNumLower, PhotonNumUpper) )
fout.write(str(NumPhoton)+",")
fout.write("0,")
fout.write(str(DefaultEventTime)+"\n")
# second for S2
fout.write(str(i)+",")
fout.write(DefaultType+",")
X, Y, Z = RandomizeFV()
fout.write(str(X)+",")
fout.write(str(Y)+",")
fout.write(str(-Z)+",")
fout.write("0,")
NumElectron = int( np.random.uniform(ElectronNumLower, ElectronNumUpper) )
fout.write(str(NumElectron)+",")
TimeOffset = np.random.uniform(-MaxDriftTime*1000., MaxDriftTime*1000.)
S2EventTime = DefaultEventTime+TimeOffset
fout.write(str(S2EventTime)+"\n")
fout.close()
| apache-2.0 |
gdooper/scipy | scipy/sparse/compressed.py | 14 | 43146 | """Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib.six import zip as izip
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, IndexMixin, get_index_dtype,
downcast_intp_index, get_sum_dtype)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row and column oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M,N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
self.shape = self._swap((major_dim,minor_dim))
if dtype is not None:
self.data = np.asarray(self.data, dtype=dtype)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self.shape = other.shape
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name,minor_name = self._swap(('row','column'))
major_dim,minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.data.ndim != 1 or self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("%s index values must be < %d" %
(minor_name,minor_dim))
if self.indices.min() < 0:
raise ValueError("%s index values must be >= 0" %
minor_name)
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning)
#TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other,'_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is inefficient",
SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other,'_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self,other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmatic operator overrides #
#################################
def __add__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_plus_')
elif isdense(other):
# Convert this matrix to a dense matrix and add them
return self.todense() + other
else:
return NotImplemented
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_minus_')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return self.todense() - other
else:
return NotImplemented
def __rsub__(self,other): # other - self
# note: this can't be replaced by other + (-self) for unsigned types
if isscalarlike(other):
if other == 0:
return -self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return other - self.todense()
else:
return NotImplemented
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Dense matrix.
if isdense(other):
if self.shape == other.shape:
ret = self.tocoo()
ret.data = np.multiply(ret.data, other[ret.row, ret.col]
).view(np.ndarray).ravel()
return ret
# Single element.
elif other.size == 1:
return self._mul_scalar(other.flat[0])
# Anything else.
return np.multiply(self.todense(), other)
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M,N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools,self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools,self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M,N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=M*N)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
fn = getattr(_sparsetools, self.format + '_matmat_pass1')
fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
indptr)
nnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.asarray(indptr, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat_pass2')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data,indices,indptr),shape=(M,N))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
# TODO support k-th diagonal
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(self.shape), dtype=upcast(self.dtype))
fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y)
return y
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results to "
"a dense matrix.",
SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0)
def minimum(self, other):
return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0)
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = np.asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
major_index = np.flatnonzero(np.diff(self.indptr))
if self.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(self.data)
else:
value = ufunc.reduceat(self.data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def __setitem__(self, index, x):
# Process arrays from IndexMixin
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
(broadcast_col or x.shape[1] == i.shape[1])):
raise ValueError("shape mismatch in assignment")
# clear entries that will be overwritten
ci, cj = self._swap((i.ravel(), j.ravel()))
self._zero_many(ci, cj)
x = x.tocoo()
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(i.shape[0]), len(r))
c = np.tile(c, i.shape[0])
x = np.tile(x, i.shape[0])
if broadcast_col:
r = np.repeat(r, i.shape[1])
c = np.tile(np.arange(i.shape[1]), len(c))
x = np.repeat(x, i.shape[1])
# only assign entries in the new sparsity structure
i = i[r, c]
j = j[r, c]
else:
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
if np.size(x) == 0:
return
i, j = self._swap((i.ravel(), j.ravel()))
self._set_many(i, j, x.ravel())
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
check_bounds(i, M)
check_bounds(j, N)
i = np.asarray(i, dtype=self.indices.dtype)
j = np.asarray(j, dtype=self.indices.dtype)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(x)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a %s_matrix is expensive. "
"lil_matrix is more efficient." % self.format,
SparseEfficiencyWarning)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.asarray(np.ediff1d(self.indptr, to_begin=0), dtype=idx_dtype)
nnzs[1:][ui] += new_nnzs
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
def _get_single_element(self,row,col):
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0 <= row < M) or not (0 <= col < N):
raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
# TODO make use of sorted indices (if present)
start = self.indptr[major_index]
end = self.indptr[major_index+1]
# can use np.add(..., where) from numpy 1.7
return np.compress(minor_index == self.indices[start:end],
self.data[start:end]).sum(dtype=self.dtype)
def _get_submatrix(self, slice0, slice1):
"""Return a submatrix of this matrix (new matrix is created)."""
slice0, slice1 = self._swap((slice0,slice1))
shape0, shape1 = self._swap(self.shape)
def _process_slice(sl, num):
if isinstance(sl, slice):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif np.isscalar(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
return sl[0], sl[1]
def _in_bounds(i0, i1, num):
if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
(i0, num, i1, num, i0, i1))
i0, i1 = _process_slice(slice0, shape0)
j0, j1 = _process_slice(slice1, shape1)
_in_bounds(i0, i1, shape0)
_in_bounds(j0, j1, shape1)
aux = _sparsetools.get_csr_submatrix(shape0, shape1,
self.indptr, self.indices,
self.data,
i0, i1, j0, j1)
data, indices, indptr = aux[2], aux[1], aux[0]
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
return self.tocoo(copy=False).toarray(order=order, out=out)
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.data = self.data[:self.nnz]
self.indices = self.indices[:self.nnz]
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
# too much waste, trim arrays
indices = indices.copy()
data = data.copy()
A = self.__class__((data, indices, indptr), shape=self.shape)
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = np.matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
| bsd-3-clause |
SporkCoin/spork | spork/users/tests/test_views.py | 367 | 1840 | from django.test import RequestFactory
from test_plus.test import TestCase
from ..views import (
UserRedirectView,
UserUpdateView
)
class BaseUserTestCase(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
class TestUserRedirectView(BaseUserTestCase):
def test_get_redirect_url(self):
# Instantiate the view directly. Never do this outside a test!
view = UserRedirectView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
view.request = request
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
view.get_redirect_url(),
'/users/testuser/'
)
class TestUserUpdateView(BaseUserTestCase):
def setUp(self):
# call BaseUserTestCase.setUp()
super(TestUserUpdateView, self).setUp()
# Instantiate the view directly. Never do this outside a test!
self.view = UserUpdateView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
self.view.request = request
def test_get_success_url(self):
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
self.view.get_success_url(),
'/users/testuser/'
)
def test_get_object(self):
# Expect: self.user, as that is the request's user object
self.assertEqual(
self.view.get_object(),
self.user
)
| mit |
harshhemani/keras | tests/auto/keras/test_constraints.py | 78 | 2446 | import unittest
import numpy as np
from numpy.testing import assert_allclose
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1, 0.5, 3, 8, 1e-7]
np.random.seed(3537)
self.example_array = np.random.random((100, 100)) * 100. - 50.
self.example_array[0, 0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert (np.all(normed.eval() < m))
# a more explicit example
norm_instance = maxnorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
x_normed_actual = norm_instance(x).eval()
assert_allclose(x_normed_actual, x_normed_target)
def test_nonneg(self):
from keras.constraints import nonneg
nonneg_instance = nonneg()
normed = nonneg_instance(self.example_array)
assert (np.all(np.min(normed.eval(), axis=1) == 0.))
def test_identity(self):
from keras.constraints import identity
identity_instance = identity()
normed = identity_instance(self.example_array)
assert (np.all(normed == self.example_array))
def test_identity_oddballs(self):
"""
test the identity constraint on some more exotic input.
this does not need to pass for the desired real life behaviour,
but it should in the current implementation.
"""
from keras.constraints import identity
identity_instance = identity()
oddball_examples = ["Hello", [1], -1, None]
assert(oddball_examples == identity_instance(oddball_examples))
def test_unitnorm(self):
from keras.constraints import unitnorm
unitnorm_instance = unitnorm()
normalized = unitnorm_instance(self.example_array)
norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1))
difference = norm_of_normalized - 1. #in the unit norm constraint, it should be equal to 1.
largest_difference = np.max(np.abs(difference))
self.assertAlmostEqual(largest_difference, 0.)
if __name__ == '__main__':
unittest.main()
| mit |
akhan7/servo | tests/wpt/web-platform-tests/tools/py/py/_io/terminalwriter.py | 175 | 12542 | """
Helper functions for writing to terminals and files.
"""
import sys, os
import py
py3k = sys.version_info[0] >= 3
from py.builtin import text, bytes
win32_and_ctypes = False
colorama = None
if sys.platform == "win32":
try:
import colorama
except ImportError:
try:
import ctypes
win32_and_ctypes = True
except ImportError:
pass
def _getdimensions():
import termios,fcntl,struct
call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
height,width = struct.unpack( "hhhh", call ) [:2]
return height, width
def get_terminal_width():
height = width = 0
try:
height, width = _getdimensions()
except py.builtin._sysex:
raise
except:
# pass to fallback below
pass
if width == 0:
# FALLBACK:
# * some exception happened
# * or this is emacs terminal which reports (0,0)
width = int(os.environ.get('COLUMNS', 80))
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
return width
terminal_width = get_terminal_width()
# XXX unify with _escaped func below
def ansi_print(text, esc, file=None, newline=True, flush=False):
if file is None:
file = sys.stderr
text = text.rstrip()
if esc and not isinstance(esc, tuple):
esc = (esc,)
if esc and sys.platform != "win32" and file.isatty():
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +
'\x1b[0m') # ANSI color code "reset"
if newline:
text += '\n'
if esc and win32_and_ctypes and file.isatty():
if 1 in esc:
bold = True
esc = tuple([x for x in esc if x != 1])
else:
bold = False
esctable = {() : FOREGROUND_WHITE, # normal
(31,): FOREGROUND_RED, # red
(32,): FOREGROUND_GREEN, # green
(33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
(34,): FOREGROUND_BLUE, # blue
(35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
(36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
(37,): FOREGROUND_WHITE, # white
(39,): FOREGROUND_WHITE, # reset
}
attr = esctable.get(esc, FOREGROUND_WHITE)
if bold:
attr |= FOREGROUND_INTENSITY
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
if file is sys.stderr:
handle = GetStdHandle(STD_ERROR_HANDLE)
else:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
attr |= (oldcolors & 0x0f0)
SetConsoleTextAttribute(handle, attr)
while len(text) > 32768:
file.write(text[:32768])
text = text[32768:]
if text:
file.write(text)
SetConsoleTextAttribute(handle, oldcolors)
else:
file.write(text)
if flush:
file.flush()
def should_do_markup(file):
if os.environ.get('PY_COLORS') == '1':
return True
if os.environ.get('PY_COLORS') == '0':
return False
return hasattr(file, 'isatty') and file.isatty() \
and os.environ.get('TERM') != 'dumb' \
and not (sys.platform.startswith('java') and os._name == 'nt')
class TerminalWriter(object):
_esctable = dict(black=30, red=31, green=32, yellow=33,
blue=34, purple=35, cyan=36, white=37,
Black=40, Red=41, Green=42, Yellow=43,
Blue=44, Purple=45, Cyan=46, White=47,
bold=1, light=2, blink=5, invert=7)
# XXX deprecate stringio argument
def __init__(self, file=None, stringio=False, encoding=None):
if file is None:
if stringio:
self.stringio = file = py.io.TextIO()
else:
file = py.std.sys.stdout
elif py.builtin.callable(file) and not (
hasattr(file, "write") and hasattr(file, "flush")):
file = WriteFile(file, encoding=encoding)
if hasattr(file, "isatty") and file.isatty() and colorama:
file = colorama.AnsiToWin32(file).stream
self.encoding = encoding or getattr(file, 'encoding', "utf-8")
self._file = file
self.fullwidth = get_terminal_width()
self.hasmarkup = should_do_markup(file)
self._lastlen = 0
def _escaped(self, text, esc):
if esc and self.hasmarkup:
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +'\x1b[0m')
return text
def markup(self, text, **kw):
esc = []
for name in kw:
if name not in self._esctable:
raise ValueError("unknown markup: %r" %(name,))
if kw[name]:
esc.append(self._esctable[name])
return self._escaped(text, tuple(esc))
def sep(self, sepchar, title=None, fullwidth=None, **kw):
if fullwidth is None:
fullwidth = self.fullwidth
# the goal is to have the line be as long as possible
# under the condition that len(line) <= fullwidth
if sys.platform == "win32":
# if we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
# (we may not know the exact line width)
# so let's be defensive to avoid empty lines in the output
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = (fullwidth - len(title) - 2) // (2*len(sepchar))
fill = sepchar * N
line = "%s %s %s" % (fill, title, fill)
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
# in some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
# trailing space is not important at the end of the line
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **kw)
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
if self.hasmarkup and kw:
markupmsg = self.markup(msg, **kw)
else:
markupmsg = msg
write_out(self._file, markupmsg)
def line(self, s='', **kw):
self.write(s, **kw)
self._checkfill(s)
self.write('\n')
def reline(self, line, **kw):
if not self.hasmarkup:
raise ValueError("cannot use rewrite-line without terminal")
self.write(line, **kw)
self._checkfill(line)
self.write('\r')
self._lastlen = len(line)
def _checkfill(self, line):
diff2last = self._lastlen - len(line)
if diff2last > 0:
self.write(" " * diff2last)
class Win32ConsoleWriter(TerminalWriter):
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
oldcolors = None
if self.hasmarkup and kw:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
default_bg = oldcolors & 0x00F0
attr = default_bg
if kw.pop('bold', False):
attr |= FOREGROUND_INTENSITY
if kw.pop('red', False):
attr |= FOREGROUND_RED
elif kw.pop('blue', False):
attr |= FOREGROUND_BLUE
elif kw.pop('green', False):
attr |= FOREGROUND_GREEN
elif kw.pop('yellow', False):
attr |= FOREGROUND_GREEN|FOREGROUND_RED
else:
attr |= oldcolors & 0x0007
SetConsoleTextAttribute(handle, attr)
write_out(self._file, msg)
if oldcolors:
SetConsoleTextAttribute(handle, oldcolors)
class WriteFile(object):
def __init__(self, writemethod, encoding=None):
self.encoding = encoding
self._writemethod = writemethod
def write(self, data):
if self.encoding:
data = data.encode(self.encoding, "replace")
self._writemethod(data)
def flush(self):
return
if win32_and_ctypes:
TerminalWriter = Win32ConsoleWriter
import ctypes
from ctypes import wintypes
# ctypes access to the Windows console
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0000 # black text
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_WHITE = 0x0007
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
BACKGROUND_BLACK = 0x0000 # background color black
BACKGROUND_BLUE = 0x0010 # background color contains blue.
BACKGROUND_GREEN = 0x0020 # background color contains green.
BACKGROUND_RED = 0x0040 # background color contains red.
BACKGROUND_WHITE = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
SHORT = ctypes.c_short
class COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', wintypes.WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD)]
_GetStdHandle = ctypes.windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [wintypes.DWORD]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(kind):
return _GetStdHandle(kind)
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
SetConsoleTextAttribute.restype = wintypes.BOOL
_GetConsoleScreenBufferInfo = \
ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
return info
def _getdimensions():
handle = GetStdHandle(STD_OUTPUT_HANDLE)
info = GetConsoleInfo(handle)
# Substract one from the width, otherwise the cursor wraps
# and the ending \n causes an empty line to display.
return info.dwSize.Y, info.dwSize.X - 1
def write_out(fil, msg):
# XXX sometimes "msg" is of type bytes, sometimes text which
# complicates the situation. Should we try to enforce unicode?
try:
# on py27 and above writing out to sys.stdout with an encoding
# should usually work for unicode messages (if the encoding is
# capable of it)
fil.write(msg)
except UnicodeEncodeError:
# on py26 it might not work because stdout expects bytes
if fil.encoding:
try:
fil.write(msg.encode(fil.encoding))
except UnicodeEncodeError:
# it might still fail if the encoding is not capable
pass
else:
fil.flush()
return
# fallback: escape all unicode characters
msg = msg.encode("unicode-escape").decode("ascii")
fil.write(msg)
fil.flush()
| mpl-2.0 |
teheavy/AMA3D | Nh3D/3_CathTopo_uploader.py | 1 | 1726 | # Script Version: 1.0
# Author: Te Chen
# Project: AMA3D
# Task Step: 1
# This script is specially for loading CATH Node Name file and record all the topology level into database.
# CathDomainList File format: Cath Names File (CNF) Format 2.0, to find more info, please visit www.cathdb.info
import MySQLdb
import os
import sys
# Connect to Database by reading Account File.
with open("Account", "r") as file:
parsed = file.readline().split()
DB = MySQLdb.connect(host=parsed[0], user=parsed[1], passwd=parsed[2], db=parsed[3])
cursor = DB.cursor()
# Read the node list and register CATH topology into database.
os.getcwd()
node_file = open("./Nh3D/CathNames", "r")
line = node_file.readline()
trigger = ''
while line:
if line.startswith("#") == False and line != "":
node_info = line.split(" ")
if len(node_info) == 3:
if node_info[0].count('.') == 2:
print "Working on Node: " + node_info[0]
cursor.execute("""INSERT INTO Topology(Node, Description, Comment, Representative) VALUES (\'%s\', \'%s\', \'%s\', \'%s\')"""
% (node_info[0], str(MySQLdb.escape_string(node_info[2][1:-1])), 'from CathNames', node_info[1]))
# print """INSERT INTO Topology(Node, Description, Comment, Representative) VALUES (\'%s\', \'%s\', \'%s\', \'%s\')"""\
# % (node_info[0], (node_info[2][1:-1]).replace(";", ""), 'from CathNames', node_info[1])
# Trigger a new TC
print trigger
sys.stdout.flush()
trigger = "trigger\t%s\t%d\t%d"%(node_info[0], 4, 0)
elif node_info[0].count('.') == 3:
# Trigger a new TC but leave last flag on.
print trigger[:-1] + "1"
sys.stdout.flush()
break
line = node_file.readline()
# Wrap up and close connection.
DB.commit()
DB.close() | gpl-2.0 |
mustafat/odoo-1 | addons/pos_restaurant/restaurant.py | 325 | 2246 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class restaurant_printer(osv.osv):
_name = 'restaurant.printer'
_columns = {
'name' : fields.char('Printer Name', size=32, required=True, help='An internal identification of the printer'),
'proxy_ip': fields.char('Proxy IP Address', size=32, help="The IP Address or hostname of the Printer's hardware proxy"),
'product_categories_ids': fields.many2many('pos.category','printer_category_rel', 'printer_id','category_id',string='Printed Product Categories'),
}
_defaults = {
'name' : 'Printer',
}
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'iface_splitbill': fields.boolean('Bill Splitting', help='Enables Bill Splitting in the Point of Sale'),
'iface_printbill': fields.boolean('Bill Printing', help='Allows to print the Bill before payment'),
'printer_ids': fields.many2many('restaurant.printer','pos_config_printer_rel', 'config_id','printer_id',string='Order Printers'),
}
_defaults = {
'iface_splitbill': False,
'iface_printbill': False,
}
| agpl-3.0 |
juliengdt/juliengdt-resources | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/hdl.py | 363 | 16209 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
*New in Pygments 1.4.*
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(r'(always|always_comb|always_ff|always_latch|and|assign|automatic|'
r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|'
r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|'
r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|'
r'endtable|endtask|enum|event|final|for|force|forever|fork|function|'
r'generate|genvar|highz0|highz1|if|initial|inout|input|'
r'integer|join|large|localparam|macromodule|medium|module|'
r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|'
r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|'
r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|'
r'rtranif1|scalared|signed|small|specify|specparam|strength|'
r'string|strong0|strong1|struct|table|task|'
r'tran|tranif0|tranif1|type|typedef|'
r'unsigned|var|vectored|void|wait|weak0|weak1|while|'
r'xnor|xor)\b', Keyword),
(r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|'
r'else|elsif|endcelldefine|endif|endprotect|endprotected|'
r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|'
r'noremove_gatenames|noremove_netnames|nounconnected_drive|'
r'protect|protected|remove_gatenames|remove_netnames|resetall|'
r'timescale|unconnected_drive|undef)\b', Comment.Preproc),
(r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|'
r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|'
r'getpattern|history|incsave|input|itor|key|list|log|'
r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|'
r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|'
r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|'
r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|'
r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
*New in Pygments 1.5.*
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'(accept_on|alias|always|always_comb|always_ff|always_latch|'
r'and|assert|assign|assume|automatic|before|begin|bind|bins|'
r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|'
r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|'
r'context|continue|cover|covergroup|coverpoint|cross|deassign|'
r'default|defparam|design|disable|dist|do|edge|else|end|endcase|'
r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|'
r'endgroup|endinterface|endmodule|endpackage|endprimitive|'
r'endprogram|endproperty|endsequence|endspecify|endtable|'
r'endtask|enum|event|eventually|expect|export|extends|extern|'
r'final|first_match|for|force|foreach|forever|fork|forkjoin|'
r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|'
r'ignore_bins|illegal_bins|implies|import|incdir|include|'
r'initial|inout|input|inside|instance|int|integer|interface|'
r'intersect|join|join_any|join_none|large|let|liblist|library|'
r'local|localparam|logic|longint|macromodule|matches|medium|'
r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|'
r'not|notif0|notif1|null|or|output|package|packed|parameter|'
r'pmos|posedge|primitive|priority|program|property|protected|'
r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|'
r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|'
r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|'
r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|'
r's_until|s_until_with|scalared|sequence|shortint|shortreal|'
r'showcancelled|signed|small|solve|specify|specparam|static|'
r'string|strong|strong0|strong1|struct|super|supply0|supply1|'
r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|'
r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|'
r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|'
r'unsigned|until|until_with|untyped|use|uwire|var|vectored|'
r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|'
r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ),
(r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|'
r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|'
r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|'
r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b',
Comment.Preproc ),
(r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|'
r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|'
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
def analyse_text(text):
if text.startswith('//') or text.startswith('/*'):
return 0.5
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
*New in Pygments 1.5.*
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)'
r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'endblock': [
include('keywords'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(r'(boolean|bit|character|severity_level|integer|time|delay_length|'
r'natural|positive|string|bit_vector|file_open_kind|'
r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|'
r'std_logic_vector)\b', Keyword.Type),
],
'keywords': [
(r'(abs|access|after|alias|all|and|'
r'architecture|array|assert|attribute|begin|block|'
r'body|buffer|bus|case|component|configuration|'
r'constant|disconnect|downto|else|elsif|end|'
r'entity|exit|file|for|function|generate|'
r'generic|group|guarded|if|impure|in|'
r'inertial|inout|is|label|library|linkage|'
r'literal|loop|map|mod|nand|new|'
r'next|nor|not|null|of|on|'
r'open|or|others|out|package|port|'
r'postponed|procedure|process|pure|range|record|'
r'register|reject|return|rol|ror|select|'
r'severity|signal|shared|sla|sli|sra|'
r'srl|subtype|then|to|transport|type|'
r'units|until|use|variable|wait|when|'
r'while|with|xnor|xor)\b', Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer),
(r'[0-1_]+(\.[0-1_])', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'H"[0-9a-fA-F_]+"', Number.Oct),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[0-1_]+"', Number.Oct),
],
}
| mit |
jwlawson/tensorflow | tensorflow/python/framework/graph_io.py | 46 | 2434 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from google.protobuf import text_format
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a text proto unless `as_text` is `False`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path,
text_format.MessageToString(graph_def))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
| apache-2.0 |
fangxingli/hue | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/_3d.py | 10 | 3063 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors import Typed, Alias
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors.nested import (
NestedBool,
NestedInteger,
NestedMinMax,
)
from openpyxl.descriptors.excel import ExtensionList
from .marker import PictureOptions
from .shapes import ShapeProperties
class View3D(Serialisable):
tagname = "view3D"
rotX = NestedMinMax(min=-90, max=90, allow_none=True)
x_rotation = Alias('rotX')
hPercent = NestedMinMax(min=5, max=500, allow_none=True)
height_percent = Alias('hPercent')
rotY = NestedInteger(min=-90, max=90, allow_none=True)
y_rotation = Alias('rotY')
depthPercent = NestedInteger(allow_none=True)
rAngAx = NestedBool(allow_none=True)
right_angle_axes = Alias('rAngAx')
perspective = NestedInteger(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('rotX', 'hPercent', 'rotY', 'depthPercent', 'rAngAx',
'perspective',)
def __init__(self,
rotX=15,
hPercent=None,
rotY=20,
depthPercent=None,
rAngAx=True,
perspective=None,
extLst=None,
):
self.rotX = rotX
self.hPercent = hPercent
self.rotY = rotY
self.depthPercent = depthPercent
self.rAngAx = rAngAx
self.perspective = perspective
class Surface(Serialisable):
tagname = "surface"
thickness = NestedInteger(allow_none=True)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
shapeProperties = Alias('spPr')
pictureOptions = Typed(expected_type=PictureOptions, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('thickness', 'spPr', 'pictureOptions',)
def __init__(self,
thickness=None,
spPr=None,
pictureOptions=None,
extLst=None,
):
self.thickness = thickness
self.spPr = spPr
self.pictureOptions = pictureOptions
class _3DBase(Serialisable):
"""
Base class for 3D charts
"""
view3D = Typed(expected_type=View3D, allow_none=True)
floor = Typed(expected_type=Surface, allow_none=True)
sideWall = Typed(expected_type=Surface, allow_none=True)
backWall = Typed(expected_type=Surface, allow_none=True)
def __init__(self,
view3D=None,
floor=None,
sideWall=None,
backWall=None
):
if view3D is None:
view3D = View3D()
self.view3D = view3D
if floor is None:
floor = Surface()
self.floor = floor
if sideWall is None:
sideWall = Surface()
self.sideWall = sideWall
if backWall is None:
backWall = Surface()
self.backWall = backWall
| apache-2.0 |
grevutiu-gabriel/phantomjs | src/qt/qtwebkit/Source/WebCore/inspector/generate_protocol_externs.py | 117 | 9058 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
type_traits = {
"any": "*",
"string": "string",
"integer": "number",
"number": "number",
"boolean": "boolean",
"array": "Array.<*>",
"object": "Object",
}
ref_types = {}
def full_qualified_type_id(domain_name, type_id):
if type_id.find(".") == -1:
return "%s.%s" % (domain_name, type_id)
return type_id
def fix_camel_case(name):
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
def generate_enum(name, json):
enum_members = []
for member in json["enum"]:
enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
def param_type(domain_name, param):
if "type" in param:
if param["type"] == "array":
items = param["items"]
return "Array.<%s>" % param_type(domain_name, items)
else:
return type_traits[param["type"]]
if "$ref" in param:
type_id = full_qualified_type_id(domain_name, param["$ref"])
if type_id in ref_types:
return ref_types[type_id]
else:
print "Type not found: " + type_id
return "!! Type not found: " + type_id
def generate_protocol_externs(output_path, input_path):
input_file = open(input_path, "r")
json_string = input_file.read()
json_string = json_string.replace(": true", ": True")
json_string = json_string.replace(": false", ": False")
json_api = eval(json_string)["domains"]
output_file = open(output_path, "w")
output_file.write(
"""
var Protocol = {};
/** @typedef {string}*/
Protocol.Error;
""")
for domain in json_api:
domain_name = domain["domain"]
if "types" in domain:
for type in domain["types"]:
type_id = full_qualified_type_id(domain_name, type["id"])
ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
for domain in json_api:
domain_name = domain["domain"]
output_file.write("\n\n\nvar %sAgent = {};\n" % domain_name)
if "types" in domain:
for type in domain["types"]:
if type["type"] == "object":
typedef_args = []
if "properties" in type:
for property in type["properties"]:
suffix = ""
if ("optional" in property):
suffix = "|undefined"
if "enum" in property:
enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
output_file.write(generate_enum(enum_name, property))
typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
else:
typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
if (typedef_args):
output_file.write("\n/** @typedef {{%s}|null} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
elif type["type"] == "string" and "enum" in type:
output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
elif type["type"] == "array":
suffix = ""
if ("optional" in property):
suffix = "|undefined"
output_file.write("\n/** @typedef {Array.<%s>%s} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), suffix, domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
if "commands" in domain:
for command in domain["commands"]:
output_file.write("\n/**\n")
params = []
if ("parameters" in command):
for in_param in command["parameters"]:
if ("optional" in in_param):
params.append("opt_%s" % in_param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
else:
params.append(in_param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
returns = ["?Protocol.Error"]
if ("returns" in command):
for out_param in command["returns"]:
if ("optional" in out_param):
returns.append("%s=" % param_type(domain_name, out_param))
else:
returns.append("%s" % param_type(domain_name, out_param))
output_file.write(" * @param {function(%s):void=} opt_callback\n" % ", ".join(returns))
output_file.write(" */\n")
params.append("opt_callback")
output_file.write("%sAgent.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
output_file.write("%sAgent.%s.invoke = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
output_file.write("/** @interface */\n")
output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
if "events" in domain:
for event in domain["events"]:
params = []
if ("parameters" in event):
output_file.write("/**\n")
for param in event["parameters"]:
if ("optional" in param):
params.append("opt_%s" % param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
else:
params.append(param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
output_file.write(" */\n")
output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
output_file.write("/**\n * @param {%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
output_file.write("InspectorBackend.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
output_file.close()
if __name__ == "__main__":
import sys
import os.path
program_name = os.path.basename(__file__)
if len(sys.argv) < 4 or sys.argv[1] != "-o":
sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
exit(1)
output_path = sys.argv[2]
input_path = sys.argv[3]
generate_protocol_externs(output_path, input_path)
| bsd-3-clause |
TwinkleChawla/nova | nova/api/auth.py | 13 | 5832 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import request_id
from oslo_serialization import jsonutils
import webob.dec
import webob.exc
from nova import context
from nova.i18n import _
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help='Whether to use per-user rate limiting for the api. '
'This option is only used by v2 api. Rate limiting '
'is removed from v2.1 api.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='''
The strategy to use for auth: keystone or noauth2. noauth2 is designed for
testing only, as it does no actual credential checking. noauth2 provides
administrative credentials only if 'admin' is specified as the username.
'''),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
return _load_pipeline(loader, pipeline)
def pipeline_factory_v21(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
return _load_pipeline(loader, local_conf[CONF.auth_strategy].split())
# NOTE(oomichi): This pipeline_factory_v3 is for passing check-grenade-dsvm.
pipeline_factory_v3 = pipeline_factory_v21
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
# NOTE(jamielennox): This is a full auth plugin set by auth_token
# middleware in newer versions.
user_auth_plugin = req.environ.get('keystone.token_auth')
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id,
user_auth_plugin=user_auth_plugin)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
roles = req.headers.get('X_ROLES', '')
return [r.strip() for r in roles.split(',')]
| apache-2.0 |
patrickwind/My_Blog | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| gpl-2.0 |
andris210296/andris-projeto | backend/venv/test/lib/python2.7/site-packages/unidecode/x05b.py | 252 | 4668 | data = (
'Gui ', # 0x00
'Deng ', # 0x01
'Zhi ', # 0x02
'Xu ', # 0x03
'Yi ', # 0x04
'Hua ', # 0x05
'Xi ', # 0x06
'Hui ', # 0x07
'Rao ', # 0x08
'Xi ', # 0x09
'Yan ', # 0x0a
'Chan ', # 0x0b
'Jiao ', # 0x0c
'Mei ', # 0x0d
'Fan ', # 0x0e
'Fan ', # 0x0f
'Xian ', # 0x10
'Yi ', # 0x11
'Wei ', # 0x12
'Jiao ', # 0x13
'Fu ', # 0x14
'Shi ', # 0x15
'Bi ', # 0x16
'Shan ', # 0x17
'Sui ', # 0x18
'Qiang ', # 0x19
'Lian ', # 0x1a
'Huan ', # 0x1b
'Xin ', # 0x1c
'Niao ', # 0x1d
'Dong ', # 0x1e
'Yi ', # 0x1f
'Can ', # 0x20
'Ai ', # 0x21
'Niang ', # 0x22
'Neng ', # 0x23
'Ma ', # 0x24
'Tiao ', # 0x25
'Chou ', # 0x26
'Jin ', # 0x27
'Ci ', # 0x28
'Yu ', # 0x29
'Pin ', # 0x2a
'Yong ', # 0x2b
'Xu ', # 0x2c
'Nai ', # 0x2d
'Yan ', # 0x2e
'Tai ', # 0x2f
'Ying ', # 0x30
'Can ', # 0x31
'Niao ', # 0x32
'Wo ', # 0x33
'Ying ', # 0x34
'Mian ', # 0x35
'Kaka ', # 0x36
'Ma ', # 0x37
'Shen ', # 0x38
'Xing ', # 0x39
'Ni ', # 0x3a
'Du ', # 0x3b
'Liu ', # 0x3c
'Yuan ', # 0x3d
'Lan ', # 0x3e
'Yan ', # 0x3f
'Shuang ', # 0x40
'Ling ', # 0x41
'Jiao ', # 0x42
'Niang ', # 0x43
'Lan ', # 0x44
'Xian ', # 0x45
'Ying ', # 0x46
'Shuang ', # 0x47
'Shuai ', # 0x48
'Quan ', # 0x49
'Mi ', # 0x4a
'Li ', # 0x4b
'Luan ', # 0x4c
'Yan ', # 0x4d
'Zhu ', # 0x4e
'Lan ', # 0x4f
'Zi ', # 0x50
'Jie ', # 0x51
'Jue ', # 0x52
'Jue ', # 0x53
'Kong ', # 0x54
'Yun ', # 0x55
'Zi ', # 0x56
'Zi ', # 0x57
'Cun ', # 0x58
'Sun ', # 0x59
'Fu ', # 0x5a
'Bei ', # 0x5b
'Zi ', # 0x5c
'Xiao ', # 0x5d
'Xin ', # 0x5e
'Meng ', # 0x5f
'Si ', # 0x60
'Tai ', # 0x61
'Bao ', # 0x62
'Ji ', # 0x63
'Gu ', # 0x64
'Nu ', # 0x65
'Xue ', # 0x66
'[?] ', # 0x67
'Zhuan ', # 0x68
'Hai ', # 0x69
'Luan ', # 0x6a
'Sun ', # 0x6b
'Huai ', # 0x6c
'Mie ', # 0x6d
'Cong ', # 0x6e
'Qian ', # 0x6f
'Shu ', # 0x70
'Chan ', # 0x71
'Ya ', # 0x72
'Zi ', # 0x73
'Ni ', # 0x74
'Fu ', # 0x75
'Zi ', # 0x76
'Li ', # 0x77
'Xue ', # 0x78
'Bo ', # 0x79
'Ru ', # 0x7a
'Lai ', # 0x7b
'Nie ', # 0x7c
'Nie ', # 0x7d
'Ying ', # 0x7e
'Luan ', # 0x7f
'Mian ', # 0x80
'Zhu ', # 0x81
'Rong ', # 0x82
'Ta ', # 0x83
'Gui ', # 0x84
'Zhai ', # 0x85
'Qiong ', # 0x86
'Yu ', # 0x87
'Shou ', # 0x88
'An ', # 0x89
'Tu ', # 0x8a
'Song ', # 0x8b
'Wan ', # 0x8c
'Rou ', # 0x8d
'Yao ', # 0x8e
'Hong ', # 0x8f
'Yi ', # 0x90
'Jing ', # 0x91
'Zhun ', # 0x92
'Mi ', # 0x93
'Zhu ', # 0x94
'Dang ', # 0x95
'Hong ', # 0x96
'Zong ', # 0x97
'Guan ', # 0x98
'Zhou ', # 0x99
'Ding ', # 0x9a
'Wan ', # 0x9b
'Yi ', # 0x9c
'Bao ', # 0x9d
'Shi ', # 0x9e
'Shi ', # 0x9f
'Chong ', # 0xa0
'Shen ', # 0xa1
'Ke ', # 0xa2
'Xuan ', # 0xa3
'Shi ', # 0xa4
'You ', # 0xa5
'Huan ', # 0xa6
'Yi ', # 0xa7
'Tiao ', # 0xa8
'Shi ', # 0xa9
'Xian ', # 0xaa
'Gong ', # 0xab
'Cheng ', # 0xac
'Qun ', # 0xad
'Gong ', # 0xae
'Xiao ', # 0xaf
'Zai ', # 0xb0
'Zha ', # 0xb1
'Bao ', # 0xb2
'Hai ', # 0xb3
'Yan ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Shen ', # 0xb7
'Chen ', # 0xb8
'Rong ', # 0xb9
'Huang ', # 0xba
'Mi ', # 0xbb
'Kou ', # 0xbc
'Kuan ', # 0xbd
'Bin ', # 0xbe
'Su ', # 0xbf
'Cai ', # 0xc0
'Zan ', # 0xc1
'Ji ', # 0xc2
'Yuan ', # 0xc3
'Ji ', # 0xc4
'Yin ', # 0xc5
'Mi ', # 0xc6
'Kou ', # 0xc7
'Qing ', # 0xc8
'Que ', # 0xc9
'Zhen ', # 0xca
'Jian ', # 0xcb
'Fu ', # 0xcc
'Ning ', # 0xcd
'Bing ', # 0xce
'Huan ', # 0xcf
'Mei ', # 0xd0
'Qin ', # 0xd1
'Han ', # 0xd2
'Yu ', # 0xd3
'Shi ', # 0xd4
'Ning ', # 0xd5
'Qin ', # 0xd6
'Ning ', # 0xd7
'Zhi ', # 0xd8
'Yu ', # 0xd9
'Bao ', # 0xda
'Kuan ', # 0xdb
'Ning ', # 0xdc
'Qin ', # 0xdd
'Mo ', # 0xde
'Cha ', # 0xdf
'Ju ', # 0xe0
'Gua ', # 0xe1
'Qin ', # 0xe2
'Hu ', # 0xe3
'Wu ', # 0xe4
'Liao ', # 0xe5
'Shi ', # 0xe6
'Zhu ', # 0xe7
'Zhai ', # 0xe8
'Shen ', # 0xe9
'Wei ', # 0xea
'Xie ', # 0xeb
'Kuan ', # 0xec
'Hui ', # 0xed
'Liao ', # 0xee
'Jun ', # 0xef
'Huan ', # 0xf0
'Yi ', # 0xf1
'Yi ', # 0xf2
'Bao ', # 0xf3
'Qin ', # 0xf4
'Chong ', # 0xf5
'Bao ', # 0xf6
'Feng ', # 0xf7
'Cun ', # 0xf8
'Dui ', # 0xf9
'Si ', # 0xfa
'Xun ', # 0xfb
'Dao ', # 0xfc
'Lu ', # 0xfd
'Dui ', # 0xfe
'Shou ', # 0xff
)
| mit |
avihad/ARP-Storm | src/arp_open_flow/pox/samples/pretty_log.py | 47 | 1166 | # Copyright 2012-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a very simple component which provides some kind of nice
log formatting.
It demonstrates launching another component (there should eventually
be a nice interface for doing this), and formatting color log messages.
Also, any arguments are passed to log.level, so you can use it as a
shortcut for that too.
"""
def launch (**kw):
import pox.log.color
pox.log.color.launch()
import pox.log
pox.log.launch(format="[@@@bold@@@level%(name)-23s@@@reset] " +
"@@@bold%(message)s@@@normal")
import pox.log.level
pox.log.level.launch(**kw)
| apache-2.0 |
Intel-Corporation/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py | 56 | 4253 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Note: if `num_epochs` is not `None`, local counter `epochs` will be created
by relevant function. Use `local_variables_initializer()` to initialize
local variables.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
items_to_tensors = dict(zip(items, tensors))
if record_key in items_to_tensors:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items_to_tensors[record_key] = key
super(DatasetDataProvider, self).__init__(
items_to_tensors=items_to_tensors,
num_samples=dataset.num_samples)
| apache-2.0 |
brandond/ansible | test/units/modules/network/f5/test_bigip_asm_policy.py | 17 | 20611 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules._bigip_asm_policy import V1Parameters
from library.modules._bigip_asm_policy import V2Parameters
from library.modules._bigip_asm_policy import ModuleManager
from library.modules._bigip_asm_policy import V1Manager
from library.modules._bigip_asm_policy import V2Manager
from library.modules._bigip_asm_policy import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5._bigip_asm_policy import V1Parameters
from ansible.modules.network.f5._bigip_asm_policy import V2Parameters
from ansible.modules.network.f5._bigip_asm_policy import ModuleManager
from ansible.modules.network.f5._bigip_asm_policy import V1Manager
from ansible.modules.network.f5._bigip_asm_policy import V2Manager
from ansible.modules.network.f5._bigip_asm_policy import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='fake_policy',
state='present',
file='/var/fake/fake.xml'
)
p = V1Parameters(params=args)
assert p.name == 'fake_policy'
assert p.state == 'present'
assert p.file == '/var/fake/fake.xml'
def test_module_parameters_template(self):
args = dict(
name='fake_policy',
state='present',
template='LotusDomino 6.5 (http)'
)
p = V1Parameters(params=args)
assert p.name == 'fake_policy'
assert p.state == 'present'
assert p.template == 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.policy = os.path.join(fixture_path, 'fake_policy.xml')
self.patcher1 = patch('time.sleep')
self.patcher1.start()
try:
self.p1 = patch('library.modules._bigip_asm_policy.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5._bigip_asm_policy.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
def test_activate_import_from_file(self, *args):
set_module_args(dict(
name='fake_policy',
file=self.policy,
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1.remove_temp_policy_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['file'] == self.policy
assert results['active'] is True
def test_activate_import_from_template(self, *args):
set_module_args(dict(
name='fake_policy',
template='OWA Exchange 2007 (https)',
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1.create_from_template_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['template'] == 'OWA Exchange 2007 (https)'
assert results['active'] is True
def test_activate_create_by_name(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.create_on_device = Mock(return_value=True)
v1.create_blank = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['active'] is True
def test_activate_policy_exists_inactive(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.update_on_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['active'] is True
def test_activate_policy_exists_active(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_active.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is False
def test_deactivate_policy_exists_active(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
server='localhost',
password='password',
user='admin',
active='no'
))
current = V1Parameters(params=load_fixture('load_asm_policy_active.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.update_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['active'] is False
def test_deactivate_policy_exists_inactive(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
server='localhost',
password='password',
user='admin',
active='no'
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is False
def test_import_from_file(self, *args):
set_module_args(dict(
name='fake_policy',
file=self.policy,
state='present',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.remove_temp_policy_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['file'] == self.policy
assert results['active'] is False
def test_import_from_template(self, *args):
set_module_args(dict(
name='fake_policy',
template='LotusDomino 6.5 (http)',
state='present',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.create_from_template_on_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['template'] == 'LotusDomino 6.5 (http)'
assert results['active'] is False
def test_create_by_name(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.create_on_device = Mock(return_value=True)
v1.create_blank = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['active'] is False
def test_delete_policy(self, *args):
set_module_args(dict(
name='fake_policy',
state='absent',
server='localhost',
password='password',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(side_effect=[True, False])
v1.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
def test_activate_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
server='localhost',
password='password',
user='admin',
))
current = V1Parameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Apply policy task failed.'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.wait_for_task = Mock(return_value=False)
v1.update_on_device = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
server='localhost',
password='password',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Failed to create ASM policy: fake_policy'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.create_on_device = Mock(return_value=False)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_delete_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='absent',
server='localhost',
password='password',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Failed to delete ASM policy: fake_policy'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(side_effect=[True, True])
v1.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
| gpl-3.0 |
Copenbacon/code-katas | katas/baker.py | 1 | 1095 | """Pete likes to bake some cakes. He has some recipes and ingredients. Unfortunately he is not good in maths. Can you help him to find out, how many cakes he could bake considering his recipes?
Write a function cakes(), which takes the recipe (object) and the available ingredients (also an object) and returns the maximum number of cakes Pete can bake (integer). For simplicity there are no units for the amounts (e.g. 1 lb of flour or 200 g of sugar are simply 1 or 200). Ingredients that are not present in the objects, can be considered as 0.
Examples:
# must return 2
cakes({flour: 500, sugar: 200, eggs: 1}, {flour: 1200, sugar: 1200, eggs: 5, milk: 200})
# must return 0
cakes({apples: 3, flour: 300, sugar: 150, milk: 100, oil: 100}, {sugar: 500, flour: 2000, milk: 2000})"""
def cakes(recipe, available):
match = {}
for item in recipe.keys():
if item not in available.keys():
return 0
if available[item]//recipe[item] == 0:
return 0
else:
match[item] = available[item]//recipe[item]
return min(match.values()) | mit |
holmes/intellij-community | python/testData/codeInsight/classMRO/TangledInheritance.py | 79 | 6841 |
class Class001(object):
unique_attr = 1
class Class002(Class001):
unique_attr = 2
class Class003(Class002, Class001):
unique_attr = 3
class Class004(Class003, Class002, Class001):
unique_attr = 4
class Class005(Class004, Class003, Class002):
unique_attr = 5
class Class006(Class005, Class004, Class003):
unique_attr = 6
class Class007(Class006, Class005, Class004):
unique_attr = 7
class Class008(Class007, Class006, Class005):
unique_attr = 8
class Class009(Class008, Class007, Class006):
unique_attr = 9
class Class010(Class009, Class008, Class007):
unique_attr = 10
class Class011(Class010, Class009, Class008):
unique_attr = 11
class Class012(Class011, Class010, Class009):
unique_attr = 12
class Class013(Class012, Class011, Class010):
unique_attr = 13
class Class014(Class013, Class012, Class011):
unique_attr = 14
class Class015(Class014, Class013, Class012):
unique_attr = 15
class Class016(Class015, Class014, Class013):
unique_attr = 16
class Class017(Class016, Class015, Class014):
unique_attr = 17
class Class018(Class017, Class016, Class015):
unique_attr = 18
class Class019(Class018, Class017, Class016):
unique_attr = 19
class Class020(Class019, Class018, Class017):
unique_attr = 20
class Class021(Class020, Class019, Class018):
unique_attr = 21
class Class022(Class021, Class020, Class019):
unique_attr = 22
class Class023(Class022, Class021, Class020):
unique_attr = 23
class Class024(Class023, Class022, Class021):
unique_attr = 24
class Class025(Class024, Class023, Class022):
unique_attr = 25
class Class026(Class025, Class024, Class023):
unique_attr = 26
class Class027(Class026, Class025, Class024):
unique_attr = 27
class Class028(Class027, Class026, Class025):
unique_attr = 28
class Class029(Class028, Class027, Class026):
unique_attr = 29
class Class030(Class029, Class028, Class027):
unique_attr = 30
class Class031(Class030, Class029, Class028):
unique_attr = 31
class Class032(Class031, Class030, Class029):
unique_attr = 32
class Class033(Class032, Class031, Class030):
unique_attr = 33
class Class034(Class033, Class032, Class031):
unique_attr = 34
class Class035(Class034, Class033, Class032):
unique_attr = 35
class Class036(Class035, Class034, Class033):
unique_attr = 36
class Class037(Class036, Class035, Class034):
unique_attr = 37
class Class038(Class037, Class036, Class035):
unique_attr = 38
class Class039(Class038, Class037, Class036):
unique_attr = 39
class Class040(Class039, Class038, Class037):
unique_attr = 40
class Class041(Class040, Class039, Class038):
unique_attr = 41
class Class042(Class041, Class040, Class039):
unique_attr = 42
class Class043(Class042, Class041, Class040):
unique_attr = 43
class Class044(Class043, Class042, Class041):
unique_attr = 44
class Class045(Class044, Class043, Class042):
unique_attr = 45
class Class046(Class045, Class044, Class043):
unique_attr = 46
class Class047(Class046, Class045, Class044):
unique_attr = 47
class Class048(Class047, Class046, Class045):
unique_attr = 48
class Class049(Class048, Class047, Class046):
unique_attr = 49
class Class050(Class049, Class048, Class047):
unique_attr = 50
class Class051(Class050, Class049, Class048):
unique_attr = 51
class Class052(Class051, Class050, Class049):
unique_attr = 52
class Class053(Class052, Class051, Class050):
unique_attr = 53
class Class054(Class053, Class052, Class051):
unique_attr = 54
class Class055(Class054, Class053, Class052):
unique_attr = 55
class Class056(Class055, Class054, Class053):
unique_attr = 56
class Class057(Class056, Class055, Class054):
unique_attr = 57
class Class058(Class057, Class056, Class055):
unique_attr = 58
class Class059(Class058, Class057, Class056):
unique_attr = 59
class Class060(Class059, Class058, Class057):
unique_attr = 60
class Class061(Class060, Class059, Class058):
unique_attr = 61
class Class062(Class061, Class060, Class059):
unique_attr = 62
class Class063(Class062, Class061, Class060):
unique_attr = 63
class Class064(Class063, Class062, Class061):
unique_attr = 64
class Class065(Class064, Class063, Class062):
unique_attr = 65
class Class066(Class065, Class064, Class063):
unique_attr = 66
class Class067(Class066, Class065, Class064):
unique_attr = 67
class Class068(Class067, Class066, Class065):
unique_attr = 68
class Class069(Class068, Class067, Class066):
unique_attr = 69
class Class070(Class069, Class068, Class067):
unique_attr = 70
class Class071(Class070, Class069, Class068):
unique_attr = 71
class Class072(Class071, Class070, Class069):
unique_attr = 72
class Class073(Class072, Class071, Class070):
unique_attr = 73
class Class074(Class073, Class072, Class071):
unique_attr = 74
class Class075(Class074, Class073, Class072):
unique_attr = 75
class Class076(Class075, Class074, Class073):
unique_attr = 76
class Class077(Class076, Class075, Class074):
unique_attr = 77
class Class078(Class077, Class076, Class075):
unique_attr = 78
class Class079(Class078, Class077, Class076):
unique_attr = 79
class Class080(Class079, Class078, Class077):
unique_attr = 80
class Class081(Class080, Class079, Class078):
unique_attr = 81
class Class082(Class081, Class080, Class079):
unique_attr = 82
class Class083(Class082, Class081, Class080):
unique_attr = 83
class Class084(Class083, Class082, Class081):
unique_attr = 84
class Class085(Class084, Class083, Class082):
unique_attr = 85
class Class086(Class085, Class084, Class083):
unique_attr = 86
class Class087(Class086, Class085, Class084):
unique_attr = 87
class Class088(Class087, Class086, Class085):
unique_attr = 88
class Class089(Class088, Class087, Class086):
unique_attr = 89
class Class090(Class089, Class088, Class087):
unique_attr = 90
class Class091(Class090, Class089, Class088):
unique_attr = 91
class Class092(Class091, Class090, Class089):
unique_attr = 92
class Class093(Class092, Class091, Class090):
unique_attr = 93
class Class094(Class093, Class092, Class091):
unique_attr = 94
class Class095(Class094, Class093, Class092):
unique_attr = 95
class Class096(Class095, Class094, Class093):
unique_attr = 96
class Class097(Class096, Class095, Class094):
unique_attr = 97
class Class098(Class097, Class096, Class095):
unique_attr = 98
class Class099(Class098, Class097, Class096):
unique_attr = 99
class Class100(Class099, Class098, Class097):
unique_attr = 100
| apache-2.0 |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/src/test/unit/test_oc_secret.py | 62 | 6303 | '''
Unit tests for oc secret
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_secret import OCSecret, locate_oc_binary # noqa: E402
class OCSecretTest(unittest.TestCase):
'''
Test class for OCSecret
'''
@mock.patch('oc_secret.locate_oc_binary')
@mock.patch('oc_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_secret.Utils._write')
@mock.patch('oc_secret.OCSecret._run')
def test_adding_a_secret(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing adding a secret '''
# Arrange
# run_ansible input parameters
params = {
'state': 'present',
'namespace': 'default',
'name': 'testsecretname',
'type': 'Opaque',
'contents': [{
'path': "/tmp/somesecret.json",
'data': "{'one': 1, 'two': 2, 'three': 3}",
}],
'decode': False,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False,
'files': None,
'delete_after': True,
'force': False,
}
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(1, '', 'Error from server: secrets "testsecretname" not found'),
(0, 'secret/testsecretname', ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
# Act
results = OCSecret.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['state'], 'present')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'secrets', 'new', 'testsecretname', '--type=Opaque', mock.ANY, '-n', 'default'], None),
])
mock_write.assert_has_calls([
mock.call(mock.ANY, "{'one': 1, 'two': 2, 'three': 3}"),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
andela-earinde/bellatrix-py | app/js/lib/lib/modules/test/test_sys.py | 2 | 29154 | # -*- coding: utf-8 -*-
import unittest, test.test_support
from test.script_helper import assert_python_ok, assert_python_failure
import sys, os, cStringIO
import struct
import operator
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.test_support.reap_children()
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is not None)
self.assertTrue(value is exc)
self.assertTrue(traceback is not None)
with test.test_support.check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is None)
self.assertTrue(value is None)
self.assertTrue(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assertTrue(typ1 is typ2)
self.assertTrue(value1 is exc)
self.assertTrue(value1 is value2)
self.assertTrue(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
# both unnormalized...
rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46')
self.assertEqual(rc, 46)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# ... and normalized
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (repr(err), repr(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
self.assertRaises(OverflowError, sys.setrecursionlimit, 1 << 31)
try:
sys.setrecursionlimit((1 << 31) - 5)
try:
# issue13546: isinstance(e, ValueError) used to fail
# when the recursion limit is close to 1<<31
raise ValueError()
except ValueError, e:
pass
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.test_support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.test_support.impl_detail("reference counting")
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
@test.test_support.impl_detail("current_frames")
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.test_support.reap_threads
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, basestring)
self.assertIsInstance(sys.exec_prefix, basestring)
self.assertIsInstance(sys.executable, basestring)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.long_info), 2)
if test.test_support.check_impl_detail(cpython=True):
self.assertTrue(sys.long_info.bits_per_digit % 5 == 0)
else:
self.assertTrue(sys.long_info.bits_per_digit >= 1)
self.assertTrue(sys.long_info.sizeof_digit >= 1)
self.assertEqual(type(sys.long_info.bits_per_digit), int)
self.assertEqual(type(sys.long_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertIsInstance(sys.maxint, int)
if test.test_support.have_unicode:
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, basestring)
self.assertIsInstance(sys.prefix, basestring)
self.assertIsInstance(sys.version, basestring)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
@test.test_support.impl_detail("sys._clear_type_cache")
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
@test.test_support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
check_sizeof = test.test_support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
size = test.test_support.calcobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size('l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
def test_errors(self):
class BadSizeof(object):
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof(object):
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class OverflowSizeof(long):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.test_support.calcobjsize
self.assertEqual(sys.getsizeof(True, -1), size('l'))
def test_objecttypes(self):
# check all types defined in Objects/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# bool
check(True, size('l'))
# buffer
with test.test_support.check_py3k_warnings():
check(buffer(''), size('2P2Pil'))
# builtin_function_or_method
check(len, size('3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size('P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size('7P'))
# instance (old-style class)
check(class_oldstyle(), size('3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size('4P'))
# complex
check(complex(0,1), size('2d'))
# code
check(get_cell().func_code, size('4i8Pi3P'))
# BaseException
check(BaseException(), size('3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size('2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size('P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size('P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size('P2PPP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('l3P'))
# file
check(self.file, size('4P2i4P3i3P3i'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('P'))
# classmethod
check(bar, size('P'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pi2P'))
# integer
check(1, size('l'))
check(100, size('l'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('lP'))
# long
check(0L, vsize(''))
check(1L, vsize('') + self.longdigit)
check(-1L, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('P'))
# None
check(None, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
check(iter(xrange(1)), size('4l'))
# reverse
check(reversed(''), size('PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size('P3P'))
# slice
check(slice(1), size('3P'))
# str
vh = test.test_support._vheader
check('', struct.calcsize(vh + 'lic'))
check('abc', struct.calcsize(vh + 'lic') + 3)
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# tupleiterator
check(iter(()), size('lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size('PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pl2P'))
# xrange
check(xrange(1), size('3l'))
check(xrange(66000), size('3l'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| mit |
HiSPARC/station-software | user/python/Lib/ctypes/test/test_find.py | 9 | 2116 | import unittest
import os.path
import sys
from test import test_support
from ctypes import *
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
lib_gle = None
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
lib_gle = None
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if is_resource_enabled("printing"):
if lib_gl or lib_glu or lib_gle:
print "OpenGL libraries:"
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print "\t", item
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
def setUp(self):
self.gl = self.glu = self.gle = None
if lib_gl:
try:
self.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
except OSError:
pass
if lib_glu:
try:
self.glu = CDLL(lib_glu, RTLD_GLOBAL)
except OSError:
pass
if lib_gle:
try:
self.gle = CDLL(lib_gle)
except OSError:
pass
def tearDown(self):
self.gl = self.glu = self.gle = None
@unittest.skipUnless(lib_gl, 'lib_gl not available')
def test_gl(self):
if self.gl:
self.gl.glClearIndex
@unittest.skipUnless(lib_glu, 'lib_glu not available')
def test_glu(self):
if self.glu:
self.glu.gluBeginCurve
@unittest.skipUnless(lib_gle, 'lib_gle not available')
def test_gle(self):
if self.gle:
self.gle.gleGetJoinStyle
def test_shell_injection(self):
result = find_library('; echo Hello shell > ' + test_support.TESTFN)
self.assertFalse(os.path.lexists(test_support.TESTFN))
self.assertIsNone(result)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
pmghalvorsen/gramps_branch | gramps/plugins/view/view.gpr.py | 1 | 8180 | # encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Douglas S. Blank
# Copyright (C) 2009 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
MODULE_VERSION="4.2"
#------------------------------------------------------------------------
#
# default views of Gramps
#
#------------------------------------------------------------------------
register(VIEW,
id = 'eventview',
name = _("Events"),
description = _("The view showing all the events"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'eventview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Events", _("Events")),
viewclass = 'EventView',
order = START,
)
register(VIEW,
id = 'familyview',
name = _("Families"),
description = _("The view showing all families"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'familyview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Families", _("Families")),
viewclass = 'FamilyView',
order = START,
)
register(VIEW,
id = 'dashboardview',
name = _("Dashboard"),
description = _("The view showing Gramplets"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'dashboardview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Dashboard", _("Dashboard")),
viewclass = 'DashboardView',
order = START,
)
register(VIEW,
id = 'mediaview',
name = _("Media"),
description = _("The view showing all the media objects"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'mediaview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Media", _("Media")),
viewclass = 'MediaView',
order = START,
)
register(VIEW,
id = 'noteview',
name = _("Notes"),
description = _("The view showing all the notes"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'noteview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Notes", _("Notes")),
viewclass = 'NoteView',
order = START,
)
register(VIEW,
id = 'relview',
name = _("Relationships"),
description = _("The view showing all relationships of the selected person"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'relview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Relationships", _("Relationships")),
viewclass = 'RelationshipView',
order = START,
)
register(VIEW,
id = 'pedigreeview',
name = _("Pedigree"),
description = _("The view showing an ancestor pedigree of the selected person"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'pedigreeview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Ancestry", _("Charts")),
viewclass = 'PedigreeView',
order = START,
stock_icon = 'gramps-pedigree',
)
register(VIEW,
id = 'fanchartview',
name = _("Fan Chart"),
category = ("Ancestry", _("Charts")),
description = _("A view showing parents through a fanchart"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'fanchartview.py',
authors = ["Douglas S. Blank", "B. Malengier"],
authors_email = ["doug.blank@gmail.com", "benny.malengier@gmail.com"],
viewclass = 'FanChartView',
stock_icon = 'gramps-fanchart',
)
register(VIEW,
id = 'fanchartdescview',
name = _("Descendant Fan"),
category = ("Ancestry", _("Charts")),
description = _("Showing descendants through a fanchart"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'fanchartdescview.py',
authors = ["B. Malengier"],
authors_email = ["benny.malengier@gmail.com"],
viewclass = 'FanChartDescView',
stock_icon = 'gramps-fanchartdesc',
)
register(VIEW,
id = 'personview',
name = _("Grouped People"),
description = _("The view showing all people in the Family Tree grouped per"
" family name"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'persontreeview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("People", _("People")),
viewclass = 'PersonTreeView',
order = START,
stock_icon = 'gramps-tree-group',
)
register(VIEW,
id = 'personlistview',
name = _("People"),
description = _("The view showing all people in the Family Tree"
" in a flat list"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'personlistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("People", _("People")),
viewclass = 'PersonListView',
order = START,
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'placelistview',
name = _("Places"),
description = _("The view showing all the places of the Family Tree"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'placelistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Places", _("Places")),
viewclass = 'PlaceListView',
order = START,
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'placetreeview',
name = _("Place Tree"),
description = _("A view displaying places in a tree format."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'placetreeview.py',
authors = ["Donald N. Allingham", "Gary Burton", "Nick Hall"],
authors_email = [""],
category = ("Places", _("Places")),
viewclass = 'PlaceTreeView',
stock_icon = 'gramps-tree-group',
)
register(VIEW,
id = 'repoview',
name = _("Repositories"),
description = _("The view showing all the repositories"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'repoview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Repositories", _("Repositories")),
viewclass = 'RepositoryView',
order = START,
)
register(VIEW,
id = 'sourceview',
name = _("Sources"),
description = _("The view showing all the sources"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'sourceview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Sources", _("Sources")),
viewclass = 'SourceView',
order = START,
stock_icon = 'gramps-tree-list',
)
register(VIEW,
id = 'citationlistview',
name = _("Citations"),
description = _("The view showing all the citations"),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'citationlistview.py',
authors = ["The Gramps project"],
authors_email = ["http://gramps-project.org"],
category = ("Citations", _("Citations")),
viewclass = 'CitationListView',
order = START,
)
register(VIEW,
id = 'citationtreeview',
name = _("Citation Tree"),
description = _("A view displaying citations and sources in a tree format."),
version = '1.0',
gramps_target_version = MODULE_VERSION,
status = STABLE,
fname = 'citationtreeview.py',
authors = ["Tim G L Lyons", "Nick Hall"],
authors_email = [""],
category = ("Sources", _("Sources")),
viewclass = 'CitationTreeView',
stock_icon = 'gramps-tree-select',
)
| gpl-2.0 |
ackalker/ocrfeeder | src/ocrfeeder/odf/presentation.py | 96 | 2714 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import PRESENTATIONNS
from element import Element
# ODF 1.0 section 9.6 and 9.7
# Autogenerated
def AnimationGroup(**args):
return Element(qname = (PRESENTATIONNS,'animation-group'), **args)
def Animations(**args):
return Element(qname = (PRESENTATIONNS,'animations'), **args)
def DateTime(**args):
return Element(qname = (PRESENTATIONNS,'date-time'), **args)
def DateTimeDecl(**args):
return Element(qname = (PRESENTATIONNS,'date-time-decl'), **args)
def Dim(**args):
return Element(qname = (PRESENTATIONNS,'dim'), **args)
def EventListener(**args):
return Element(qname = (PRESENTATIONNS,'event-listener'), **args)
def Footer(**args):
return Element(qname = (PRESENTATIONNS,'footer'), **args)
def FooterDecl(**args):
return Element(qname = (PRESENTATIONNS,'footer-decl'), **args)
def Header(**args):
return Element(qname = (PRESENTATIONNS,'header'), **args)
def HeaderDecl(**args):
return Element(qname = (PRESENTATIONNS,'header-decl'), **args)
def HideShape(**args):
return Element(qname = (PRESENTATIONNS,'hide-shape'), **args)
def HideText(**args):
return Element(qname = (PRESENTATIONNS,'hide-text'), **args)
def Notes(**args):
return Element(qname = (PRESENTATIONNS,'notes'), **args)
def Placeholder(**args):
return Element(qname = (PRESENTATIONNS,'placeholder'), **args)
def Play(**args):
return Element(qname = (PRESENTATIONNS,'play'), **args)
def Settings(**args):
return Element(qname = (PRESENTATIONNS,'settings'), **args)
def Show(**args):
return Element(qname = (PRESENTATIONNS,'show'), **args)
def ShowShape(**args):
return Element(qname = (PRESENTATIONNS,'show-shape'), **args)
def ShowText(**args):
return Element(qname = (PRESENTATIONNS,'show-text'), **args)
def Sound(**args):
return Element(qname = (PRESENTATIONNS,'sound'), **args)
| gpl-3.0 |
phassoa/openelisglobal-core | liquibase/OE5.1/testCatalogKenya/Scripts/resultLimits.py | 15 | 4081 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
test_names = []
sample_types = []
type = []
result_type = []
norm_min = []
norm_max = []
valid_min = []
valid_max = []
dict_normal = []
dict_normal_results =[]
descriptions = []
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
result_type_file = open("resultType.txt", 'r')
norm_min_file = open("minNormal.txt", 'r')
norm_max_file = open("maxNormal.txt", 'r')
valid_min_file = open("minValid.txt", 'r')
valid_max_file = open("maxValid.txt", 'r')
dict_normal_file = open("dictNormal.txt", 'r')
result = open("output/resultLimitResults.txt", 'w')
for line in name_file:
test_names.append(line.strip())
name_file.close()
for line in sample_type_file:
sample_types.append(line.strip())
name_file.close()
for line in result_type_file:
result_type.append(line.strip())
for line in norm_min_file:
norm_min.append( line.strip())
norm_min_file.close()
for line in norm_max_file:
norm_max.append( line.strip())
norm_max_file.close()
for line in valid_min_file:
valid_min.append( line.strip())
valid_min_file.close()
for line in valid_max_file:
valid_max.append( line.strip())
valid_max_file.close()
for line in dict_normal_file:
dict_normal.append( line.strip())
dict_normal_file.close()
insert_head = "INSERT INTO result_limits( id, test_id, test_result_type_id, min_age, max_age, gender, low_normal, high_normal, low_valid, high_valid, lastupdated) \n\t"
insert_dict_head = "INSERT INTO result_limits( id, test_id, test_result_type_id, lastupdated, normal_dictionary_id) \n\t"
next_val = " VALUES ( nextval( 'result_limits_seq' ) "
select_type_ages = " (select id from clinlims.type_of_test_result where test_result_type = 'N' ) , 0, 'Infinity' "
select_dict_type = " (select id from clinlims.type_of_test_result where test_result_type = 'D' ) "
result.write("Copy the following into ResultLimits.sql\n")
for row in range(0, len(test_names)):
if result_type[row] == 'Numeric' or result_type[row] == 'numeric': #is this a new test with numeric results
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
if description not in descriptions:
split_min_limit = get_split_names(norm_min[row])
split_max_limit = get_split_names(norm_max[row])
for i in range(0, len(split_min_limit)):
descriptions.append(description)
result.write( insert_head )
result.write( next_val + ", ( select id from clinlims.test where description = " + description + " ) , \n\t\t\t" + select_type_ages + ", " )
result.write( "'gender' ," + split_min_limit[i] + "," + split_max_limit[i] + "," + valid_min[row] + "," + valid_max[row] + ", now() );\n")
elif result_type[row] == 'Select List' and len(dict_normal[row]) > 1: #is this a new test with dictionary
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
if description not in descriptions:
descriptions.append(description)
dict_normal_results.append( insert_dict_head )
dict_normal_results.append( next_val + ", ( select id from clinlims.test where description = " + description + " ) ," + select_dict_type + ", " )
dict_normal_results.append( " now() , (select max(id) from clinlims.dictionary where dict_entry = '" + dict_normal[row] + "' ) );\n")
result.write("\n\nCopy the following into DictResultLimits.sql\n")
for line in dict_normal_results:
result.write(line)
print "Done look for results in resultLimitResult.txt" | mpl-2.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/monkeypatches/bug5079.py | 16 | 1886 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.spread import pb
from twisted.python import log
from twisted.spread.interfaces import IJellyable
def patch():
log.msg("Applying patch for http://twistedmatrix.com/trac/ticket/5079")
if not hasattr(pb, '_JellyableAvatarMixin'):
log.msg("..patch not applicable; please file a bug at buildbot.net")
else:
pb._JellyableAvatarMixin._cbLogin = _fixed_cbLogin
def _fixed_cbLogin(self, (interface, avatar, logout)):
"""
Ensure that the avatar to be returned to the client is jellyable and
set up disconnection notification to call the realm's logout object.
"""
if not IJellyable.providedBy(avatar):
avatar = pb.AsReferenceable(avatar, "perspective")
puid = avatar.processUniqueID()
# only call logout once, whether the connection is dropped (disconnect)
# or a logout occurs (cleanup), and be careful to drop the reference to
# it in either case
logout = [ logout ]
def maybeLogout():
if not logout: return
fn = logout[0]
del logout[0]
fn()
self.broker._localCleanup[puid] = maybeLogout
self.broker.notifyOnDisconnect(maybeLogout)
return avatar
| gpl-2.0 |
jruiperezv/ANALYSE | common/djangoapps/terrain/stubs/tests/test_youtube_stub.py | 51 | 2492 | """
Unit test for stub YouTube implementation.
"""
import unittest
import requests
from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase):
def setUp(self):
self.server = StubYouTubeService()
self.url = "http://127.0.0.1:{0}/".format(self.server.port)
self.server.config['time_to_response'] = 0.0
self.addCleanup(self.server.shutdown)
def test_unused_url(self):
response = requests.get(self.url + 'unused_url')
self.assertEqual("Unused url", response.content)
def test_video_url(self):
response = requests.get(
self.url + 'test_youtube/OEoXaMPEzfM?v=2&alt=jsonc&callback=callback_func'
)
# YouTube metadata for video `OEoXaMPEzfM` states that duration is 116.
self.assertEqual(
'callback_func({"data": {"duration": 116, "message": "I\'m youtube.", "id": "OEoXaMPEzfM"}})',
response.content
)
def test_transcript_url_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t__eq_exist'
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
]), response.content
)
def test_transcript_url_not_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t_neq_exist',
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
]), response.content
)
def test_transcript_not_found(self):
response = requests.get(self.url + 'test_transcripts_youtube/some_id')
self.assertEqual(404, response.status_code)
def test_reset_configuration(self):
reset_config_url = self.url + 'del_config'
# add some configuration data
self.server.config['test_reset'] = 'This is a reset config test'
# reset server configuration
response = requests.delete(reset_config_url)
self.assertEqual(response.status_code, 200)
# ensure that server config dict is empty after successful reset
self.assertEqual(self.server.config, {})
| agpl-3.0 |
minghuascode/pyj | library/pyjamas/ui/BuilderPanel.py | 1 | 3810 | """ Pyjamas UI BuilderPanel: takes a PyJsGlade builder spec and adds widgets
requested using the methods just like in any other Panel class.
Copyright (C) 2010 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
The purpose of this class is to be able to set up a Panel of any type
that can be dynamically created using Builder, and then add child widgets
once again by their name as specified in the Builder spec file.
This class therefore has all of the usual Panel functions (add,
remove, insert, __iter__, getWidget) as well as those required
for it to be instantiable via Builder itself (!) such as
addIndexedItem, getIndex and getIndexedChild.
"""
from pyjamas.ui.BuilderWidget import BuilderWidget
class BuilderPanel(BuilderWidget):
def __init__(self, **kwargs):
self.panel_instance_name = None
BuilderWidget.__init__(self, **kwargs)
def add(self, child_instance_name, *args, **kwargs):
""" versatile adding-function, copes with:
HTMLPanel.add(widget, id)
HTMLTable.add(item, row, col)
HorizontalPanel.add(item)
VerticalPanel.add(item)
VerticalSplitPanel.add(item)
HorizontalSplitPanel.add(item)
DeckPanel.add(item)
TabPanel.add(item)
DockPanel.add(widget, direction)
StackPanel.add(widget, stackText, asHTML)
AbsolutePanel.add(widget, left, top)
FlowPanel.add(widget)
CaptionPanel.add(widget)
ScrollPanel.add(widget)
"""
widget = self.b.createInstance(child_instance_name, self.event_receiver)
self.getPanel().add(widget, *args, **kwargs)
return widget
def insert(self, child_instance_name, *args, **kwargs):
widget = self.b.createInstance(child_instance_name, self.event_receiver)
self.getPanel().insert(widget, *args, **kwargs)
return widget
def remove(self, widget, *args, **kwargs):
""" versatile removing-function, copes with:
HTMLPanel.remove(widget) # if it had one
HTMLTable.remove(item)
HorizontalPanel.remove(item)
VerticalPanel.remove(item)
VerticalSplitPanel.remove(item) # if it had one
HorizontalSplitPanel.remove(item) # if it had one
DeckPanel.remove(item)
TabPanel.remove(item)
DockPanel.remove(item)
StackPanel.remove(item, index=None)
AbsolutePanel.remove(item)
FlowPanel.add(widget)
"""
self.getPanel().remove(widget, *args, **kwargs)
def __iter__(self):
return self.b.__iter__()
def getChildren(self):
return self.b.getChildren()
def setPanelInstanceName(self, panel_instance_name):
self.panel_instance_name = panel_instance_name
def getPanel(self):
if self.panel_instance_name is None:
return self.widget
wids = self.b.widget_instances[self.instance_name]
return wids[self.panel_instance_name]
# these next three functions are part of the standard Builder API
# and are required for panels to be manageable by PyJsGlade.
def addIndexedItem(self, index, instance_name):
widget = self.b.createInstance(child_instance_name, self.event_receiver)
self.getPanel().addIndexedItem(index, widget)
def getIndexedChild(self, index):
return self.getPanel().getIndexedChild(index)
def getWidgetIndex(self, widget):
return self.getPanel().getWidgetIndex(widget)
def getWidget(self, *args):
return self.getPanel().getWidget(*args)
def getWidgetCount(self):
return self.getPanel().getWidgetCount()
def setWidgetPosition(self, *args):
return self.getPanel().setWidgetPosition(*args)
| apache-2.0 |
erikdejonge/newsrivr | daemons/python2/hn.py | 1 | 17227 | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import chr
from builtins import str
from builtins import range
from past.utils import old_div
from xml.sax.saxutils import escape
import urllib.request, urllib.parse, urllib.error, re, os, urllib.parse
import html.parser, feedparser
from BeautifulSoup import BeautifulSoup, Comment
from pprint import pprint
import codecs
import sys
import html.entities
streamWriter = codecs.lookup("utf-8")[-1]
sys.stdout = streamWriter(sys.stdout)
HN_RSS_FEED = "http://news.ycombinator.com/rss"
negative_str = "([A-Z,a-z,0-9,-,_ ]*comments[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*bcomments[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*meta[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*footer[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*footnote[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*foot[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*bottom[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*klasbox[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*side[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*inner[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*sidebar[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*hide[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*component[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*ad[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*ads[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*react[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcriptText[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*error[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*also[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*share[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*sideblock[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*policy[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*social[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*reflist[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*postmetadata[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*references[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*promo[A-Z,a-z,0-9,-,_ ]*)"
NEGATIVE = re.compile(negative_str)
super_negative_str = "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*voting[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reaction[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*idgedragregelsusercontent[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*vote[A-Z,a-z,0-9,-,_ ]*)"
SUPERNEGATIVE = re.compile(super_negative_str)
positive_str = "([A-Z,a-z,0-9,-,_ ]*summary[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*post[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*hentry[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*entry[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*content[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venue[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venueInfo[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venueDetails[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*body[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*bodycontent[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*content permalink[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*wrapper[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*article[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*articleblock[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*lead[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*leadarticle[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*story[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*permalink[A-Z,a-z,0-9,-,_ ]*)"
POSITIVE = re.compile(positive_str)
PUNCTUATION = re.compile("""[!"#$%&\"()*+,-./:;<=>?@[\\]^_`{|}~]""")
MAXLINKS = 50
def latin1_to_ascii (unicrap):
xlate={0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y',
0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/'
}
r = ''
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def toUTF8(data):
try:
data = data.encode("utf-8")
except:
data = latin1_to_ascii(data)
return data
def text2simpleHtml(data):
data = data.replace("<h1"," <b").replace("</h1>","</b><br><br>")
data = data.replace("<h2"," <b").replace("</h2>","</b><br>")
data = data.replace("<h3>","").replace("</h3>","<br>")
VALID_TAGS = ["strong", "b", "i", "table", "th", "tr", "td", "a", "code", "em", "p", "ul", "li", "br"]
soup = BeautifulSoup(data)
for tag in soup.findAll(True):
if tag.name not in VALID_TAGS:
tag.hidden = True
return soup.renderContents()
def _text(node):
return " ".join(node.findAll(text=True))
def get_link_density(elem):
link_length = len("".join([i.text or "" for i in elem.findAll("a")]))
text_length = len(_text(elem))
return old_div(float(link_length), max(text_length, 1))
def removeFrontBreaks(s):
try:
soup = BeautifulSoup(s)
whitespace = True
for tag in soup.findAll(True):
tagname = str(tag.name)
if tagname!="br":
whitespace=False
if tagname!="p":
whitespace=False
if tagname=="br" or tagname=="p" and whitespace:
tag.extract()
return str(soup).strip()
except Exception as e:
clog(e)
return s
def convertentity(m):
"""Convert a HTML entity into normal string (ISO-8859-1)"""
if m.group(1)=='#':
try:
return chr(int(m.group(2)))
except ValueError:
return '&#%s;' % m.group(2)
try:
return html.entities.entitydefs[m.group(2)]
except KeyError:
return '&%s;' % m.group(2)
def unquotehtml(s):
"""Convert a HTML quoted string into normal string (ISO-8859-1).
Works with &#XX; and with > etc."""
return re.sub(r'&(#?)(.+?);',convertentity,s)
def getNumLinks(s):
try:
cnt = 0
soup = BeautifulSoup(s)
for a in soup.findAll("a"):
if "href" in a:
#print a
cnt += 1
return cnt
except:
return 0
def removeEmptyParas(html):
foundempty = False
soup = BeautifulSoup(html)
for p in soup.findAll("p"):
if "id" in p:
if "error_" in p["id"]:
p.extract()
if 0==len(p.text.strip().replace("\n", "")):
if foundempty:
p.extract()
foundempty = True
else:
foundempty = False
return soup.renderContents()
def removeEmptyLis(html):
soup = BeautifulSoup(html)
for li in soup.findAll("li"):
for a in li.findAll("a"):
if len(a.contents)>0:
if len(a.contents[0])<5:
a.extract()
if len(li.renderContents().strip())==0:
li.extract()
else:
for x in li.findAll():
if len(x.renderContents().strip())==0:
li.extract()
for ul in soup.findAll("ul"):
if 0==len(ul.findAll("li")):
ul.extract()
return soup.renderContents()
def removeExtraBreaks(s):
try:
l = []
brcnt = 0
soup = BeautifulSoup(s)
for tag in soup.findAll():
if tag.name=="p":
if len(tag.text.strip().replace("\n", ""))<1:
tag.extract()
brcnt += 1
if tag.name=="br":
brcnt += 1
if brcnt>1:
tag.extract()
else:
brcnt = 0
return str(soup)
except Exception as e:
clog(e)
return s
def grabContent(link, html):
if ">" in html:
html = unquotehtml(html)
html = "<!DOCTYPE html><html><head><meta charset=\"utf-8\"></head><body>"+html+"</body></html>"
#open("usedforscoring.html", "w").write(html)
#exit(1)
replaceBrs = re.compile("<br */? *>[ \r\n]*<br */? *>")
html = re.sub(replaceBrs, "</p><p>", html)
try:
soup = BeautifulSoup(html)
except html.parser.HTMLParseError as e:
try:
soup = BeautifulSoup(text2simpleHtml(html))
except html.parser.HTMLParseError:
return ""
#print str(soup)
# REMOVE SCRIPTS
for s in soup.findAll("div"):
if get_link_density(s)>0.5 and len(s.renderContents())>1000:
s.extract()
if "id" in s:
if SUPERNEGATIVE.match(str(s["id"]).lower()):
s.extract()
if "class" in s:
if SUPERNEGATIVE.match(str(s["class"]).lower()):
s.extract()
for s in soup.findAll("script"):
s.extract()
for a in soup.findAll("a"):
if "href" in a:
if "javascript:" in a["href"]:
a.extract()
if "onclick" in a:
if "return " in a["onclick"]:
a.extract()
allParagraphs = soup.findAll("p")
topParent = None
parents = []
for paragraph in allParagraphs:
parent = paragraph.parent
if (parent not in parents):
parents.append(parent)
parent.score = 0
if ("class" in parent):
if (NEGATIVE.match(parent["class"].lower())):
#print parent["class"]
if len(parent.findAll('a'))>MAXLINKS:
parent.score -= 500
parent.score -= 50
if (POSITIVE.match(parent["class"].lower())):
if len(parent.findAll('a'))<MAXLINKS:
parent.score += 25
else:
parent.score -= 150
parent.score += 50
if ("id" in parent):
if (NEGATIVE.match(parent["id"].lower())):
#print parent["id"]
if len(parent.findAll('a'))>MAXLINKS:
parent.score -= 500
parent.score -= 50
if (POSITIVE.match(parent["id"].lower())):
if len(parent.findAll('a'))<MAXLINKS:
parent.score += 25
else:
parent.score -= 150
parent.score += 50
if (parent.score == None):
parent.score = 0
innerText = paragraph.renderContents() #"".join(paragraph.findAll(text=True))
if (len(innerText) > 10):
parent.score += 1
if (len(innerText) > 300):
parent.score += 2
parent.score += innerText.count(",")*3
parent.score += innerText.count(".")*3
for parent in parents:
#print parent.score
#print str(parent )
#print "-------------"
if ((not topParent) or (parent.score > topParent.score)):
topParent = parent
if (not topParent):
return ""
# REMOVE LINK"D STYLES
styleLinks = soup.findAll("link", attrs={"type" : "text/css"})
for s in styleLinks:
s.extract()
# REMOVE ON PAGE STYLES
for s in soup.findAll("style"):
s.extract()
# CLEAN STYLES FROM ELEMENTS IN TOP PARENT
for ele in topParent.findAll(True):
del(ele["style"])
del(ele["class"])
#print str(ele)
#print "-----"
killDivs(topParent)
clean(topParent, "form")
clean(topParent, "object")
clean(topParent, "iframe")
fixLinks(topParent, link)
for s in topParent.findAll("ul"):
if get_link_density(s)>0.3:
s.extract()
lis = topParent.findAll("li")
if len(lis)>50:
for li in lis:
li.extract()
for li in lis:
if len(li)>1:
contents = str(li.contents[1]).replace("\n", "").replace(" ", "").replace("<br>", "").replace("<br/>", "").replace("<br />", "").replace("<p></p>", "")
#print "c", contents
if len(contents)==0:
li.extract()
comments = topParent.findAll(text=lambda text:isinstance(text, Comment))
[comment.extract() for comment in comments]
html2 = topParent.renderContents()
html2 = removeFrontBreaks(html2)
html2 = html2.replace("\n", " ")
for i in range(0, 10):
html2 = html2.replace(" ", " ")
html2 = html2.replace("<div></div>", "")
html2 = html2.replace("<p>\xc2\xa0</p>", "")
html2 = html2.replace("<p></p>", "<br/>")
html2 = html2.replace("<p><br /></p>", "")
#html2 = html2.replace("\xc2\xa9", "")#
html2 = re.sub(r'© (\w+.\w+)', "", html2)
html2 = re.sub(r'© (\w+)', "", html2)
html2 = re.sub(r'\xc2\xa9 (\w+.\w+)', "", html2)
html2 = re.sub(r'\xc2\xa9 (\w+)', "", html2)
#if getNumLinks(html2)>25:
# html2 = "html ignored, more then 25 links"
#print get_link_density(BeautifulSoup(html2))
html2 = removeEmptyLis(html2)
html2 = toUTF8(text2simpleHtml(html2)).replace("a href", "a target='blank' href")
html2 = removeEmptyParas(html2)
html2 = removeExtraBreaks(html2)
html2 = html2.replace("</strong>", "</strong><br/>")
html2 = html2.replace("</b>", "</b><br/>")
#detect
return html2
def fixLinks(parent, link):
tags = parent.findAll(True)
for t in tags:
if ("href" in t):
t["href"] = urllib.parse.urljoin(link, t["href"])
if ("src" in t):
t["src"] = urllib.parse.urljoin(link, t["src"])
def clean(top, tag, minWords=10000):
tags = top.findAll(tag)
for t in tags:
if (t.renderContents().count(" ") < minWords):
t.extract()
def killDivs(parent):
divs = parent.findAll("div")
for d in divs:
p = len(d.findAll("p"))
img = len(d.findAll("img"))
li = len(d.findAll("li"))
a = len(d.findAll("a"))
embed = len(d.findAll("embed"))
pre = len(d.findAll("pre"))
#code = len(d.findAll("code"))
if (d.renderContents().count(",") < 10):
if (pre == 0):# and (code == 0)):
if ((img > p ) or (li > p) or (a > p) or (p == 0) or (embed > 0)):
d.extract()
def upgradeLink(link):
link = link.encode("utf-8")
if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))):
linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link)
if (os.path.exists(linkFile)):
return open(linkFile).read()
else:
content = ""
try:
html = urllib.request.urlopen(link).read()
content = grabContent(link, html)
filp = open(linkFile, "w")
filp.write(content)
filp.close()
except IOError:
pass
return content
else:
return ""
def upgradeFeed(feedUrl):
feedData = urllib.request.urlopen(feedUrl).read()
upgradedLinks = []
parsedFeed = feedparser.parse(feedData)
for entry in parsedFeed.entries:
upgradedLinks.append((entry, upgradeLink(entry.link)))
rss = """<rss version="2.0">
<channel>
<title>Hacker News</title>
<link>http://news.ycombinator.com/</link>
<description>Links for the intellectually curious, ranked by readers.</description>
"""
for entry, content in upgradedLinks:
rss += u"""
<item>
<title>%s</title>
<link>%s</link>
<comments>%s</comments>
<description>
<![CDATA[<a href="%s">Comments</a><br/>%s<br/><a href="%s">Comments</a>]]>
</description>
</item>
""" % (entry.title, escape(entry.link), escape(entry.comments), entry.comments, content.decode("utf-8"), entry.comments)
rss += """
</channel>
</rss>"""
return rss
def clog(s):
from time import gmtime, strftime
s= str(s)
print('\033[%93m'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+": "+s+'\033[%0m')
if __name__ == "__main__":
c = open("usedforscoring.html", "r").read()
soup = BeautifulSoup(grabContent('x', c))
clog(soup.prettify())
| gpl-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/models/webhook_create_parameters.py | 2 | 2739 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebhookCreateParameters(Model):
"""The parameters for creating a webhook.
:param tags: The tags for the webhook.
:type tags: dict[str, str]
:param location: The location of the webhook. This cannot be changed after
the resource is created.
:type location: str
:param service_uri: The service URI for the webhook to post notifications.
:type service_uri: str
:param custom_headers: Custom headers that will be added to the webhook
notifications.
:type custom_headers: dict[str, str]
:param status: The status of the webhook at the time the operation was
called. Possible values include: 'enabled', 'disabled'
:type status: str or
~azure.mgmt.containerregistry.v2017_10_01.models.WebhookStatus
:param scope: The scope of repositories where the event can be triggered.
For example, 'foo:*' means events for all tags under repository 'foo'.
'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to
'foo:latest'. Empty means all events.
:type scope: str
:param actions: The list of actions that trigger the webhook to post
notifications.
:type actions: list[str or
~azure.mgmt.containerregistry.v2017_10_01.models.WebhookAction]
"""
_validation = {
'location': {'required': True},
'service_uri': {'required': True},
'actions': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'service_uri': {'key': 'properties.serviceUri', 'type': 'str'},
'custom_headers': {'key': 'properties.customHeaders', 'type': '{str}'},
'status': {'key': 'properties.status', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'actions': {'key': 'properties.actions', 'type': '[str]'},
}
def __init__(self, location, service_uri, actions, tags=None, custom_headers=None, status=None, scope=None):
self.tags = tags
self.location = location
self.service_uri = service_uri
self.custom_headers = custom_headers
self.status = status
self.scope = scope
self.actions = actions
| mit |
havard024/prego | venv/lib/python2.7/site-packages/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| mit |
jaywreddy/django | tests/defer_regress/models.py | 282 | 2692 | """
Regression tests for defer() / only() behavior.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=15)
text = models.TextField(default="xyzzy")
value = models.IntegerField()
other_value = models.IntegerField(default=0)
def __str__(self):
return self.name
class RelatedItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
class ProxyRelated(RelatedItem):
class Meta:
proxy = True
class Child(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
@python_2_unicode_compatible
class Leaf(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.CASCADE)
second_child = models.ForeignKey(Child, models.SET_NULL, related_name="other", null=True)
value = models.IntegerField(default=42)
def __str__(self):
return self.name
class ResolveThis(models.Model):
num = models.FloatField()
name = models.CharField(max_length=16)
class Proxy(Item):
class Meta:
proxy = True
@python_2_unicode_compatible
class SimpleItem(models.Model):
name = models.CharField(max_length=15)
value = models.IntegerField()
def __str__(self):
return self.name
class Feature(models.Model):
item = models.ForeignKey(SimpleItem, models.CASCADE)
class SpecialFeature(models.Model):
feature = models.ForeignKey(Feature, models.CASCADE)
class OneToOneItem(models.Model):
item = models.OneToOneField(Item, models.CASCADE, related_name="one_to_one_item")
name = models.CharField(max_length=15)
class ItemAndSimpleItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
simple = models.ForeignKey(SimpleItem, models.CASCADE)
class Profile(models.Model):
profile1 = models.CharField(max_length=1000, default='profile1')
class Location(models.Model):
location1 = models.CharField(max_length=1000, default='location1')
class Request(models.Model):
profile = models.ForeignKey(Profile, models.SET_NULL, null=True, blank=True)
location = models.ForeignKey(Location, models.CASCADE)
items = models.ManyToManyField(Item)
request1 = models.CharField(default='request1', max_length=1000)
request2 = models.CharField(default='request2', max_length=1000)
request3 = models.CharField(default='request3', max_length=1000)
request4 = models.CharField(default='request4', max_length=1000)
class Base(models.Model):
text = models.TextField()
class Derived(Base):
other_text = models.TextField()
| bsd-3-clause |
Russell-IO/ansible | lib/ansible/modules/network/cnos/cnos_factory.py | 35 | 5267 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Reset to factory settings of Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_factory
author: "Dave Kasberg (@dkasberg)"
short_description: Reset the switch's startup configuration to default (factory) on devices running Lenovo CNOS
description:
- This module allows you to reset a switch's startup configuration. The method provides a way to reset the
startup configuration to its factory settings. This is helpful when you want to move the switch to another
topology as a new network device.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_factory.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reset to factory
cnos_factory:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Startup Config is Reset to factory settings"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save erase \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "[n]", 2, remote_conn)
output = output + cnos.waitForDeviceResponse("y" + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Startup Config is Reset to factory settings ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
ChristinaZografou/sympy | sympy/core/exprtools.py | 9 | 49175 | """Tools for manipulating of large commutative expressions. """
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import iterable, is_sequence, SYMPY_INTS, range
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic, preorder_traversal
from sympy.core.expr import Expr
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer, Number, I
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple, Dict
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
variations, ordered)
from collections import defaultdict
_eps = Dummy(positive=True)
def _isnumber(i):
return isinstance(i, (SYMPY_INTS, float)) or i.is_Number
def _monotonic_sign(self):
"""Return the value closest to 0 that ``self`` may have if all symbols
are signed and the result is uniformly the same sign for all values of symbols.
If a symbol is only signed but not known to be an
integer or the result is 0 then a symbol representative of the sign of self
will be returned. Otherwise, None is returned if a) the sign could be positive
or negative or b) self is not in one of the following forms:
- L(x, y, ...) + A: a function linear in all symbols x, y, ... with an
additive constant; if A is zero then the function can be a monomial whose
sign is monotonic over the range of the variables, e.g. (x + 1)**3 if x is
nonnegative.
- A/L(x, y, ...) + B: the inverse of a function linear in all symbols x, y, ...
that does not have a sign change from positive to negative for any set
of values for the variables.
- M(x, y, ...) + A: a monomial M whose factors are all signed and a constant, A.
- A/M(x, y, ...) + B: the inverse of a monomial and constants A and B.
- P(x): a univariate polynomial
Examples
========
>>> from sympy.core.exprtools import _monotonic_sign as F
>>> from sympy import Dummy, S
>>> nn = Dummy(integer=True, nonnegative=True)
>>> p = Dummy(integer=True, positive=True)
>>> p2 = Dummy(integer=True, positive=True)
>>> F(nn + 1)
1
>>> F(p - 1)
_nneg
>>> F(nn*p + 1)
1
>>> F(p2*p + 1)
2
>>> F(nn - 1) # could be negative, zero or positive
"""
if not self.is_real:
return
if (-self).is_Symbol:
rv = _monotonic_sign(-self)
return rv if rv is None else -rv
if not self.is_Add and self.as_numer_denom()[1].is_number:
s = self
if s.is_prime:
if s.is_odd:
return S(3)
else:
return S(2)
elif s.is_positive:
if s.is_even:
return S(2)
elif s.is_integer:
return S.One
else:
return _eps
elif s.is_negative:
if s.is_even:
return S(-2)
elif s.is_integer:
return S.NegativeOne
else:
return -_eps
if s.is_zero or s.is_nonpositive or s.is_nonnegative:
return S.Zero
return None
# univariate polynomial
free = self.free_symbols
if len(free) == 1:
if self.is_polynomial():
from sympy.polys.polytools import real_roots
from sympy.polys.polyroots import roots
from sympy.polys.polyerrors import PolynomialError
x = free.pop()
x0 = _monotonic_sign(x)
if x0 == _eps or x0 == -_eps:
x0 = S.Zero
if x0 is not None:
d = self.diff(x)
if d.is_number:
roots = []
else:
try:
roots = real_roots(d)
except (PolynomialError, NotImplementedError):
roots = [r for r in roots(d, x) if r.is_real]
y = self.subs(x, x0)
if x.is_nonnegative and all(r <= x0 for r in roots):
if y.is_nonnegative and d.is_positive:
if y:
return y if y.is_positive else Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_negative:
if y:
return y if y.is_negative else Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
elif x.is_nonpositive and all(r >= x0 for r in roots):
if y.is_nonnegative and d.is_negative:
if y:
return Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_positive:
if y:
return Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
else:
n, d = self.as_numer_denom()
den = None
if n.is_number:
den = _monotonic_sign(d)
elif not d.is_number:
if _monotonic_sign(n) is not None:
den = _monotonic_sign(d)
if den is not None and (den.is_positive or den.is_negative):
v = n*den
if v.is_positive:
return Dummy('pos', positive=True)
elif v.is_nonnegative:
return Dummy('nneg', nonnegative=True)
elif v.is_negative:
return Dummy('neg', negative=True)
elif v.is_nonpositive:
return Dummy('npos', nonpositive=True)
return None
# multivariate
c, a = self.as_coeff_Add()
v = None
if not a.is_polynomial():
# F/A or A/F where A is a number and F is a signed, rational monomial
n, d = a.as_numer_denom()
if not (n.is_number or d.is_number):
return
if (
a.is_Mul or a.is_Pow) and \
a.is_rational and \
all(p.exp.is_Integer for p in a.atoms(Pow) if p.is_Pow) and \
(a.is_positive or a.is_negative):
v = S(1)
for ai in Mul.make_args(a):
if ai.is_number:
v *= ai
continue
reps = {}
for x in ai.free_symbols:
reps[x] = _monotonic_sign(x)
if reps[x] is None:
return
v *= ai.subs(reps)
elif c:
# signed linear expression
if not any(p for p in a.atoms(Pow) if not p.is_number) and (a.is_nonpositive or a.is_nonnegative):
free = list(a.free_symbols)
p = {}
for i in free:
v = _monotonic_sign(i)
if v is None:
return
p[i] = v or (_eps if i.is_nonnegative else -_eps)
v = a.xreplace(p)
if v is not None:
rv = v + c
if v.is_nonnegative and rv.is_positive:
return rv.subs(_eps, 0)
if v.is_nonpositive and rv.is_negative:
return rv.subs(_eps, 0)
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
This is strictly only valid if the exponent from which
the integer is extracted is itself an integer or the
base is positive. These conditions are assumed and not
checked here.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
def decompose_power_rat(expr):
"""
Decompose power into symbolic base and rational exponent.
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if not exp.is_Rational:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ['factors', 'gens']
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset([2, x])
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors is None or factors is S.One:
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
if i:
factors[I] = S.One*i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = []
for k in factors:
if k is I or k in (-1, 1):
handle.append(k)
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
if S.NegativeOne not in factors:
factors[S.NegativeOne] = S.Zero
factors[S.NegativeOne] += a.exp
elif a == 1:
factors[a] = S.One
elif a == -1:
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
try:
self.gens = frozenset(factors.keys())
except AttributeError:
raise TypeError('expecting Expr or dictionary')
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
b, e = factor.as_base_exp()
if isinstance(exp, int):
e = _keep_coeff(Integer(exp), e)
elif isinstance(exp, Rational):
e = _keep_coeff(exp, e)
else:
e *= exp
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy.core.exprtools import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt == True:
factors[factor] = exp
elif lt == False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __div__(self, other): # Factors
return self.quo(other)
__truediv__ = __div__
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self.__eq__(other)
class Term(object):
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None): # Term
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self): # Term
return hash((self.coeff, self.numer, self.denom))
def __repr__(self): # Term
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self): # Term
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other): # Term
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self): # Term
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other): # Term
return self.mul(other.inv())
def pow(self, other): # Term
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other): # Term
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other): # Term
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other): # Term
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other): # Term
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other): # Term
if isinstance(other, SYMPY_INTS):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other): # Term
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other): # Term
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extrated.
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, [t for t in terms if t]))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return S.Zero, S.Zero, S.One
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(S(1)).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
``terms`` can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
``fraction``, when True (default), will put the expression over a common
denominator.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False)
(x**2/2 + 1)/(x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False, fraction=False)
(x/2 + 1/x)/y
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, sympy.polys.polytools.terms_gcd
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
if not clear:
c, _coeff = coeff.as_coeff_Mul()
if not c.is_Integer and not clear and numer.is_Add:
n, d = c.as_numer_denom()
_numer = numer/d
if any(a.as_coeff_Mul()[0].is_Integer
for a in _numer.args):
numer = _numer
coeff = n*_coeff
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
if isinstance(a, Basic):
return a.func(*[handle(i) for i in a.args])
return type(a)([handle(i) for i in a])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> from sympy import factor_terms, Symbol
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, sympy.polys.polytools.terms_gcd
"""
def do(expr):
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([do(i) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([do(i) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical, clear=clear)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if all(a.as_coeff_Mul()[0] < 0 for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
b, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
``name``, if given, is the name that will be used with numered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Notes
=====
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import symbols, Mul
>>> from sympy.core.exprtools import _mask_nc
>>> from sympy.abc import x, y
>>> A, B, C = symbols('A,B,C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(_d0**2 - x**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, None, [A, B])
An nc-object with nc-symbols but no others outside of it:
>>> _mask_nc(1 + x*Commutator(A, B), 'd')
(_d0*x + 1, {_d0: Commutator(A, B)}, [])
>>> _mask_nc(NO(Fd(x)*F(y)), 'd')
(_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
Multiple nc-objects:
>>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
>>> _mask_nc(eq, 'd')
(x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
Multiple nc-objects and nc-symbols:
>>> eq = A*Commutator(A, B) + B*Commutator(A, C)
>>> _mask_nc(eq, 'd')
(A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
If there is an object that:
- doesn't contain nc-symbols
- but has arguments which derive from Basic, not Expr
- and doesn't define an _eval_is_commutative routine
then it will give False (or None?) for the is_commutative test. Such
objects are also removed by this routine:
>>> from sympy import Basic
>>> eq = (1 + Mul(Basic(), Basic(), evaluate=False))
>>> eq.is_commutative
False
>>> _mask_nc(eq, 'd')
(_d0**2 + 1, {_d0: Basic()}, [])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from sympy import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_Symbol:
nc_syms.add(a)
elif not (a.is_Add or a.is_Mul or a.is_Pow):
if all(s.is_commutative for s in a.free_symbols):
rep.append((a, Dummy()))
else:
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol or object, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_obj) == 1 and not nc_syms:
rep.append((nc_obj.pop(), Dummy()))
elif len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, {v: k for k, v in rep} or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
Examples
========
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import powsimp
from sympy.polys import gcd, factor
def _pemexpand(expr):
"Expand with the minimal set of hints necessary to check the result."
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
if g is not S.One:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
cc[0] = cc[0]/c
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
| bsd-3-clause |
regispl/cassandra | pylib/cqlshlib/sslhandling.py | 85 | 3725 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import ConfigParser
import ssl
def ssl_settings(host, config_file, env=os.environ):
"""
Function which generates SSL setting for cassandra.Cluster
Params:
* host .........: hostname of Cassandra node.
* env ..........: environment variables. SSL factory will use, if passed,
SSL_CERTFILE and SSL_VALIDATE variables.
* config_file ..: path to cqlsh config file (usually ~/.cqlshrc).
SSL factory will use, if set, certfile and validate
options in [ssl] section, as well as host to certfile
mapping in [certfiles] section.
[certfiles] section is optional, 'validate' setting in [ssl] section is
optional too. If validation is enabled then SSL certfile must be provided
either in the config file or as an environment variable.
Environment variables override any options set in cqlsh config file.
"""
configs = ConfigParser.SafeConfigParser()
configs.read(config_file)
def get_option(section, option):
try:
return configs.get(section, option)
except ConfigParser.Error:
return None
ssl_validate = env.get('SSL_VALIDATE')
if ssl_validate is None:
ssl_validate = get_option('ssl', 'validate')
ssl_validate = ssl_validate is None or ssl_validate.lower() != 'false'
ssl_version_str = env.get('SSL_VERSION')
if ssl_version_str is None:
ssl_version_str = get_option('ssl', 'version')
if ssl_version_str is None:
ssl_version_str = "TLSv1"
ssl_version = getattr(ssl, "PROTOCOL_%s" % ssl_version_str, None)
if ssl_version is None:
sys.exit("%s is not a valid SSL protocol, please use one of SSLv23, "
"TLSv1, TLSv1.1, or TLSv1.2" % (ssl_version_str,))
ssl_certfile = env.get('SSL_CERTFILE')
if ssl_certfile is None:
ssl_certfile = get_option('certfiles', host)
if ssl_certfile is None:
ssl_certfile = get_option('ssl', 'certfile')
if ssl_validate and ssl_certfile is None:
sys.exit("Validation is enabled; SSL transport factory requires a valid certfile "
"to be specified. Please provide path to the certfile in [ssl] section "
"as 'certfile' option in %s (or use [certfiles] section) or set SSL_CERTFILE "
"environment variable." % (config_file,))
if ssl_certfile is not None:
ssl_certfile = os.path.expanduser(ssl_certfile)
userkey = get_option('ssl', 'userkey')
if userkey:
userkey = os.path.expanduser(userkey)
usercert = get_option('ssl', 'usercert')
if usercert:
usercert = os.path.expanduser(usercert)
return dict(ca_certs=ssl_certfile,
cert_reqs=ssl.CERT_REQUIRED if ssl_validate else ssl.CERT_NONE,
ssl_version=ssl_version,
keyfile=userkey, certfile=usercert)
| apache-2.0 |
looker/sentry | src/sentry/plugins/bases/issue2.py | 1 | 15435 | from __future__ import absolute_import
import six
from rest_framework.response import Response
from social_auth.models import UserSocialAuth
from django.conf import settings
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from sentry.api.serializers.models.plugin import PluginSerializer
# api compat
from sentry.exceptions import PluginError # NOQA
from sentry.models import Activity, Event, GroupMeta
from sentry.plugins import Plugin
from sentry.plugins.base.configuration import react_plugin_config
from sentry.plugins.endpoints import PluginGroupEndpoint
from sentry.signals import issue_tracker_used
from sentry.utils.auth import get_auth_providers
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
# TODO(dcramer): remove this in favor of GroupEndpoint
class IssueGroupActionEndpoint(PluginGroupEndpoint):
view_method_name = None
plugin = None
def _handle(self, request, group, *args, **kwargs):
GroupMeta.objects.populate_cache([group])
return getattr(self.plugin, self.view_method_name)(request, group, *args, **kwargs)
class IssueTrackingPlugin2(Plugin):
auth_provider = None
allowed_actions = ('create', 'link', 'unlink')
# we default this to None to support legacy integrations, but newer style
# should explicitly call out what is stored
issue_fields = None
# issue_fields = frozenset(['id', 'title', 'url'])
def configure(self, project, request):
return react_plugin_config(self, project, request)
def get_plugin_type(self):
return 'issue-tracking'
def has_project_conf(self):
return True
def get_group_body(self, request, group, event, **kwargs):
result = []
for interface in six.itervalues(event.interfaces):
output = safe_execute(interface.to_string, event, _with_transaction=False)
if output:
result.append(output)
return '\n\n'.join(result)
def get_group_description(self, request, group, event):
output = [
absolute_uri(group.get_absolute_url()),
]
body = self.get_group_body(request, group, event)
if body:
output.extend([
'',
'```',
body,
'```',
])
return '\n'.join(output)
def get_group_title(self, request, group, event):
return event.error()
def is_configured(self, request, project, **kwargs):
raise NotImplementedError
def get_group_urls(self):
_urls = []
for action in self.allowed_actions:
view_method_name = 'view_%s' % action
_urls.append(
url(
r'^%s/' % action,
PluginGroupEndpoint.as_view(
view=getattr(self, view_method_name),
),
)
)
return _urls
def get_auth_for_user(self, user, **kwargs):
"""
Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``.
"""
assert self.auth_provider, 'There is no auth provider configured for this plugin.'
if not user.is_authenticated():
return None
try:
return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0]
except IndexError:
return None
def needs_auth(self, request, project, **kwargs):
"""
Return ``True`` if the authenticated user needs to associate an auth service before
performing actions with this plugin.
"""
if self.auth_provider is None:
return False
if not request.user.is_authenticated():
return True
return not UserSocialAuth.objects.filter(
user=request.user, provider=self.auth_provider
).exists()
def get_new_issue_fields(self, request, group, event, **kwargs):
"""
If overriding, supported properties include 'readonly': true
"""
return [
{
'name': 'title',
'label': 'Title',
'default': self.get_group_title(request, group, event),
'type': 'text'
}, {
'name': 'description',
'label': 'Description',
'default': self.get_group_description(request, group, event),
'type': 'textarea'
}
]
def get_link_existing_issue_fields(self, request, group, event, **kwargs):
return []
def _get_issue_url_compat(self, group, issue, **kwargs):
if self.issue_fields is None:
return self.get_issue_url(group, issue['id'])
return self.get_issue_url(group, issue)
def _get_issue_label_compat(self, group, issue, **kwargs):
if self.issue_fields is None:
return self.get_issue_label(group, issue['id'])
return self.get_issue_label(group, issue)
def get_issue_url(self, group, issue, **kwargs):
"""
Given an issue context (issue_id string or issue dict) return an absolute URL to the issue's details
page.
"""
raise NotImplementedError
def get_issue_label(self, group, issue, **kwargs):
"""
Given an issue context (issue_id string or issue dict) return a string representing the issue.
e.g. GitHub represents issues as GH-XXX
"""
if isinstance(issue, dict):
return '#{}'.format(issue['id'])
return '#{}'.format(issue)
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
Returns ``{'id': '1', 'title': issue_title}``
"""
raise NotImplementedError
def link_issue(self, request, group, form_data, **kwargs):
"""
Can be overridden for any actions needed when linking issues
(like adding a comment to an existing issue).
Returns ``{'id': '1', 'title': issue_title}``
"""
pass
def has_auth_configured(self, **kwargs):
if not self.auth_provider:
return True
return self.auth_provider in get_auth_providers()
def validate_form(self, fields, form_data):
errors = {}
for field in fields:
if field.get('required', True) and not field.get('readonly'):
value = form_data.get(field['name'])
if value is None or value == '':
errors[field['name']] = u'%s is a required field.' % field['label']
return errors
def get_issue_field_map(self):
# XXX(dcramer): legacy support
conf_key = self.get_conf_key()
if self.issue_fields is None:
return {
'id': '{}:tid'.format(conf_key)
}
return {
key: '{}:issue_{}'.format(
conf_key,
key,
)
for key in self.issue_fields
}
def build_issue(self, group):
issue_field_map = self.get_issue_field_map()
issue = {}
for key, meta_name in six.iteritems(issue_field_map):
issue[key] = GroupMeta.objects.get_value(group, meta_name, None)
if not any(issue.values()):
return None
return issue
def has_linked_issue(self, group):
return bool(self.build_issue(group))
def unlink_issue(self, request, group, issue, **kwargs):
issue_field_map = self.get_issue_field_map()
for meta_name in six.itervalues(issue_field_map):
GroupMeta.objects.unset_value(group, meta_name)
return self.redirect(group.get_absolute_url())
def view_create(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
event = group.get_latest_event()
if event is None:
return Response({
'message': 'Unable to create issues: there are '
'no events associated with this group',
}, status=400)
Event.objects.bind_nodes([event], 'data')
try:
fields = self.get_new_issue_fields(request, group, event, **kwargs)
except Exception as e:
return self.handle_api_error(e)
if request.method == 'GET':
return Response(fields)
errors = self.validate_form(fields, request.DATA)
if errors:
return Response({'error_type': 'validation', 'errors': errors}, status=400)
try:
issue = self.create_issue(
group=group,
form_data=request.DATA,
request=request,
)
except Exception as e:
return self.handle_api_error(e)
if not isinstance(issue, dict):
issue = {'id': issue}
issue_field_map = self.get_issue_field_map()
for key, meta_name in six.iteritems(issue_field_map):
if key in issue:
GroupMeta.objects.set_value(group, meta_name, issue[key])
else:
GroupMeta.objects.unset_value(group, meta_name)
issue_information = {
'title': issue.get('title') or request.DATA.get('title') or self._get_issue_label_compat(group, issue),
'provider': self.get_title(),
'location': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
issue_tracker_used.send(
plugin=self, project=group.project, user=request.user,
sender=type(self)
)
return Response({'issue_url': self.get_issue_url(group, issue)})
def view_link(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
event = group.get_latest_event()
if event is None:
return Response({
'message': 'Unable to create issues: there are '
'no events associated with this group',
}, status=400)
Event.objects.bind_nodes([event], 'data')
try:
fields = self.get_link_existing_issue_fields(request, group, event, **kwargs)
except Exception as e:
return self.handle_api_error(e)
if request.method == 'GET':
return Response(fields)
errors = self.validate_form(fields, request.DATA)
if errors:
return Response({'error_type': 'validation', 'errors': errors}, status=400)
try:
issue = self.link_issue(
group=group,
form_data=request.DATA,
request=request,
) or {}
except Exception as e:
return self.handle_api_error(e)
# HACK(dcramer): maintain data for legacy issues
if 'id' not in issue and 'issue_id' in request.DATA:
issue['id'] = request.DATA['issue_id']
issue_field_map = self.get_issue_field_map()
for key, meta_name in six.iteritems(issue_field_map):
if key in issue:
GroupMeta.objects.set_value(group, meta_name, issue[key])
else:
GroupMeta.objects.unset_value(group, meta_name)
issue_information = {
'title': issue.get('title') or self._get_issue_label_compat(group, issue),
'provider': self.get_title(),
'location': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
return Response({'message': 'Successfully linked issue.'})
def view_unlink(self, request, group, **kwargs):
auth_errors = self.check_config_and_auth(request, group)
if auth_errors:
return Response(auth_errors, status=400)
issue = self.build_issue(group)
if issue and 'unlink' in self.allowed_actions:
self.unlink_issue(request, group, issue)
return Response({'message': 'Successfully unlinked issue.'})
return Response({'message': 'No issues to unlink.'}, status=400)
def plugin_issues(self, request, group, plugin_issues, **kwargs):
if not self.is_configured(request=request, project=group.project):
return plugin_issues
item = {
'slug': self.slug,
'allowed_actions': self.allowed_actions,
'title': self.get_title()
}
issue = self.build_issue(group)
if issue:
item['issue'] = {
'issue_id': issue.get('id'),
'url': self._get_issue_url_compat(group, issue),
'label': self._get_issue_label_compat(group, issue),
}
item.update(PluginSerializer(group.project).serialize(self, None, request.user))
plugin_issues.append(item)
return plugin_issues
def get_config(self, *args, **kwargs):
# TODO(dcramer): update existing plugins to just use get_config
# TODO(dcramer): remove request kwarg after sentry-plugins has been
# updated
kwargs.setdefault('request', None)
return self.get_configure_plugin_fields(*args, **kwargs)
def check_config_and_auth(self, request, group):
has_auth_configured = self.has_auth_configured()
if not (has_auth_configured and self.is_configured(
project=group.project, request=request)):
if self.auth_provider:
required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider]
else:
required_auth_settings = None
return {
'error_type': 'config',
'has_auth_configured': has_auth_configured,
'auth_provider': self.auth_provider,
'required_auth_settings': required_auth_settings,
}
if self.needs_auth(project=group.project, request=request):
return {
'error_type': 'auth',
'auth_url': reverse('socialauth_associate', args=[self.auth_provider])
}
# TODO: should we get rid of this (move it to react?)
def tags(self, request, group, tag_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return tag_list
issue = self.build_issue(group)
if not issue:
return tag_list
tag_list.append(
format_html(
'<a href="{}">{}</a>',
self._get_issue_url_compat(group, issue),
self._get_issue_label_compat(group, issue),
)
)
return tag_list
IssuePlugin2 = IssueTrackingPlugin2
| bsd-3-clause |
olgabot/poshsplice | docs/conf.py | 1 | 8434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# poshsplice documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import poshsplice
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PoshSplice'
copyright = u'2015, Olga Botvinnik'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = poshsplice.__version__
# The full version, including alpha/beta/rc tags.
release = poshsplice.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'poshsplicedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'poshsplice.tex',
u'PoshSplice Documentation',
u'Olga Botvinnik', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'poshsplice',
u'PoshSplice Documentation',
[u'Olga Botvinnik'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'poshsplice',
u'PoshSplice Documentation',
u'Olga Botvinnik',
'poshsplice',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
Freso/listenbrainz-server | listenbrainz/tests/utils.py | 2 | 1125 | # coding=utf-8
from datetime import datetime
import time
import sys
import os
from listenbrainz.listen import Listen
import uuid
import listenbrainz.db.user as db_user
def generate_data(from_date, num_records, user_name):
test_data = []
current_date = to_epoch(from_date)
artist_msid = str(uuid.uuid4())
user = db_user.get_by_mb_id(user_name)
if not user:
db_user.create(user_name)
user = db_user.get_by_mb_id(user_name)
for i in range(num_records):
current_date += 1 # Add one second
item = Listen(
user_id=user['id'],
user_name=user_name,
timestamp=datetime.utcfromtimestamp(current_date),
artist_msid=artist_msid,
recording_msid=str(uuid.uuid4()),
release_msid=str(uuid.uuid4()),
data={
'artist_name': 'Test Artist Pls ignore',
'track_name': 'Hello Goodbye',
'additional_info': {},
},
)
test_data.append(item)
return test_data
def to_epoch(date):
return int(time.mktime(date.timetuple()))
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/util/testing.py | 3 | 92623 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
StringIO, PY3
)
from pandas.core.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel, Panel4D)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
try:
import pytest
slow = pytest.mark.slow
except ImportError:
# Should be ok to just ignore. If you actually need
# slow then you'll hit an import error long before getting here.
pass
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specifed by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specifed by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default True
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{0} Expected type {1}, found {2} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import pytest
if is_platform_32bit():
pytest.skip("skipping for 32 bit")
def _skip_module_if_no_mpl():
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg", warn=False)
def _skip_if_no_mpl():
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import pytest
pytest.skip("matplotlib not installed")
def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
mpl.use("Agg", warn=False)
def _skip_if_no_scipy():
try:
import scipy.stats # noqa
except ImportError:
import pytest
pytest.skip("no scipy.stats module")
try:
import scipy.interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate missing')
try:
import scipy.sparse # noqa
except ImportError:
import pytest
pytest.skip('scipy.sparse missing')
def _check_if_lzma():
try:
return compat.import_lzma()
except ImportError:
return False
def _skip_if_no_lzma():
import pytest
return _check_if_lzma() or pytest.skip('need backports.lzma to run')
def _skip_if_no_xarray():
try:
import xarray
except ImportError:
import pytest
pytest.skip("xarray not installed")
v = xarray.__version__
if v < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray not version is too low: {0}".format(v))
def _skip_if_no_pytz():
try:
import pytz # noqa
except ImportError:
import pytest
pytest.skip("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil # noqa
except ImportError:
import pytest
pytest.skip("dateutil not installed")
def _skip_if_windows_python_3():
if PY3 and is_platform_windows():
import pytest
pytest.skip("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import pytest
pytest.skip("Running on Windows")
def _skip_if_no_pathlib():
try:
from pathlib import Path # noqa
except ImportError:
import pytest
pytest.skip("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath # noqa
except ImportError:
import pytest
pytest.skip("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum', 'prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
_NUMEXPR_INSTALLED)
if engine == 'numexpr':
if not _USE_NUMEXPR:
import pytest
pytest.skip("numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_not_us_locale():
import locale
lang, _ = locale.getlocale()
if lang != 'en_US':
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_no_mock():
try:
import mock # noqa
except ImportError:
try:
from unittest import mock # noqa
except ImportError:
import nose
raise nose.SkipTest("mock is not installed")
def _skip_if_no_ipython():
try:
import IPython # noqa
except ImportError:
import nose
raise nose.SkipTest("IPython not installed")
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
raise_assert_detail(obj, '{0} levels are different'.format(obj),
'{0}, {1}'.format(left.nlevels, left),
'{0}, {1}'.format(right.nlevels, right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, '{0} length are different'.format(obj),
'{0}, {1}'.format(len(left), left),
'{0}, {1}'.format(len(right), right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{0}]'.format(level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{0} classes are not equivalent'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{0} classes are different'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, '
'type encountered {0!r}')
assert isinstance(el, (plt.Axes, dict)), msg.format(
el.__class__.__name__)
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{0}.categories'.format(obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{0}.codes'.format(obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{0}.categories'.format(obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{0}.values'.format(obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{0} are different
{1}
[left]: {2}
[right]: {3}""".format(obj, message, left, right)
if diff is not None:
msg = msg + "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "%r is not %r" % (left_base, right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "%r is %r" % (left_base, right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{0} shapes are different'
.format(obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, 'Series length are different',
'{0}, {1}'.format(len(left), left.index),
'{0}, {1}'.format(len(right), right.index))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{0}'.format(obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = '[datetimelike_compat=True] {0} is not equal to {1}.'
raise AssertionError(msg.format(left.values, right.values))
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
l = pd.IntervalIndex(left)
r = pd.IntervalIndex(right)
assert_index_equal(l, r, obj='{0}.index'.format(obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{0}'.format(obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'({0}, {1})'.format(*left.shape),
'({0}, {1})'.format(*right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
assert_func : function for comparing data
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_index_equal(left.columns, right.columns,
obj='{0}.columns'.format(obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
def assert_sp_list_equal(left, right):
assert isinstance(left, pd.SparseList)
assert isinstance(right, pd.SparseList)
assert_sp_array_equal(left.to_array(), right.to_array())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, ("Expected object %r and "
"object %r to be different "
"objects, were same."
% (type(elem1), type(elem2)))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name)
def makeIntervalIndex(k=10, name=None):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None):
return RangeIndex(0, k, 1, name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
with warnings.catch_warnings(record=True):
d = dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper))
return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checker when running tests.
#
# Copied this from nipy/nipype
# Copyright of respective developers, License: BSD-3
def skip_if_no_package(pkg_name, min_version=None, max_version=None,
app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
If the package check fails, the test is automatically skipped.
Parameters
----------
pkg_name : string
Name of the required package.
min_version : string, optional
Minimal version number for required package.
max_version : string, optional
Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
--------
package_check('numpy', '1.3')
"""
import pytest
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if min_version:
msg += ' with version >= %s' % (min_version,)
if max_version:
msg += ' with version < %s' % (max_version,)
try:
mod = __import__(pkg_name)
except ImportError:
mod = None
try:
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for %s' % pkg_name)
if min_version and checker(have_version) < checker(min_version):
pytest.skip(msg)
if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
skip("Skipping test because exception "
"message is known and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`.
This is a port of the `assertRaisesRegexp` function from unittest in
Python 2.7. However, with our migration to `pytest`, please refrain
from using this. Instead, use the following paradigm:
with pytest.raises(_exception) as exc_info:
func(*args, **kwargs)
exc_info.matches(reg_exp)
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {0} != {1}. "
"Warning message: {2}".format(
actual_warning.filename, caller.filename,
actual_warning.message))
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
if is_platform_windows():
import pytest
pytest.skip("timezone setting not supported on windows")
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
| mit |
harry-7/Plinth | plinth/modules/upgrades/forms.py | 9 | 1246 | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Forms for configuring unattended-upgrades.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
class ConfigureForm(forms.Form):
"""Configuration form to enable/disable automatic upgrades."""
auto_upgrades_enabled = forms.BooleanField(
label=_('Enable automatic upgrades'), required=False,
help_text=_('When enabled, the unattended-upgrades program will be run '
'once per day. It will attempt to perform any package '
'upgrades that are available.'))
| agpl-3.0 |
pixelmatix/fadecandy | firmware/benchmark.py | 16 | 2605 | #!/usr/bin/env python
#
# Read the device's performance counters, calculate benchmark values.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import usb.core
import usb.util
import time, struct, sys
dev = usb.core.find(idVendor=0x1d50, idProduct=0x607a)
if not dev:
raise IOError("No Fadecandy interfaces found")
dev.set_configuration()
print "Serial number: %s" % usb.util.get_string(dev, 255, dev.iSerialNumber)
def readCounter(index):
return struct.unpack('<I', dev.ctrl_transfer(0xC0, 0x01, 0, index, 4, 1000))[0]
def counterDiff(c1, c2):
return (c2 - c1) & 0xFFFFFFFF
def initLUT():
# Set up a default color LUT
lut = [0] * (64 * 25)
for index in range(25):
lut[index*64] = index | 0x40
lut[24*64] |= 0x20
for channel in range(3):
for row in range(257):
value = min(0xFFFF, int((row / 256.0) * 0x10000))
i = channel * 257 + row
packetNum = i / 31
packetIndex = i % 31
lut[packetNum*64 + 2 + packetIndex*2] = value & 0xFF
lut[packetNum*64 + 3 + packetIndex*2] = value >> 8
lutPackets = ''.join(map(chr, lut))
dev.write(1, lutPackets)
print "LUT programmed"
def dummyFrame(byte):
buffer = ''
for i in range(25):
final = i == 24
buffer += chr( (final << 5) | i ) + chr(byte) * 63
return buffer
def measureFrameRate():
sys.stderr.write("Calculating frame rate average...\n")
duration = 8.0
t1 = t2 = time.time()
c1 = readCounter(0) # Rendered frames
k1 = readCounter(1) # Received keyframes
# Dummy frames
data = (dummyFrame(0) + dummyFrame(0x80)) * 200
# Use number of keyframes handled per second to estimate USB bandwidth
megabitsPerFrame = len(dummyFrame(0)) * (8.0 / 1e6)
# Average the frame rate for up to 'duration' seconds.
try:
while (t2 - t1) < duration:
# Keep the EP1 OUT pipe full-ish, so we measure it while the USB driver is busy
dev.write(1, data)
t2 = time.time()
c2 = readCounter(0)
k2 = readCounter(1)
fps = counterDiff(c1, c2) / (t2 - t1)
kfps = counterDiff(k1, k2) / (t2 - t1)
mbps = kfps * megabitsPerFrame
# Not sure how accurate this USB throughput is, the blocking write() is likely a problem.
sys.stderr.write("\r %.2f FPS, %.2f KFPS, %.2f Mbps " % (fps, kfps, mbps))
except KeyboardInterrupt:
pass
sys.stderr.write("\n")
return fps
initLUT()
measureFrameRate()
| mit |
DIRACGrid/DIRAC | integration_tests.py | 1 | 25030 | #!/usr/bin/env python
import fnmatch
import os
from pathlib import Path
import re
import shlex
import subprocess
import sys
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from typing import Optional
import click
import git
import typer
import yaml
from packaging.version import Version
from typer import colors as c
# Editable configuration
DEFAULT_HOST_OS = "cc7"
DEFAULT_MYSQL_VER = "8.0"
DEFAULT_ES_VER = "7.9.1"
FEATURE_VARIABLES = {
"DIRACOSVER": "master",
"DIRACOS_TARBALL_PATH": None,
"TEST_HTTPS": "No",
"DIRAC_FEWER_CFG_LOCKS": None,
"DIRAC_USE_NEWTHREADPOOL": None,
"USE_PYTHON3": None,
}
DEFAULT_MODULES = {
"DIRAC": Path(__file__).parent.absolute(),
}
# Static configuration
DB_USER = "Dirac"
DB_PASSWORD = "Dirac"
DB_ROOTUSER = "root"
DB_ROOTPWD = "password"
DB_HOST = "mysql"
DB_PORT = "3306"
# Implementation details
LOG_LEVEL_MAP = {
"ALWAYS": (c.BLACK, c.WHITE),
"NOTICE": (None, c.MAGENTA),
"INFO": (None, c.GREEN),
"VERBOSE": (None, c.CYAN),
"DEBUG": (None, c.BLUE),
"WARN": (None, c.YELLOW),
"ERROR": (None, c.RED),
"FATAL": (c.RED, c.BLACK),
}
LOG_PATTERN = re.compile(r"^[\d\-]{10} [\d:]{8} UTC [^\s]+ ([A-Z]+):")
class NaturalOrderGroup(click.Group):
"""Group for showing subcommands in the correct order"""
def list_commands(self, ctx):
return self.commands.keys()
app = typer.Typer(
cls=NaturalOrderGroup,
help=f"""Run the DIRAC integration tests.
A local DIRAC setup can be created and tested by running:
\b
./integration_tests.py create
This is equivalent to running:
\b
./integration_tests.py prepare-environment
./integration_tests.py install-server
./integration_tests.py install-client
./integration_tests.py test-server
./integration_tests.py test-client
The test setup can be shutdown using:
\b
./integration_tests.py destroy
See below for additional subcommands which are useful during local development.
## Features
The currently known features and their default values are:
\b
HOST_OS: {DEFAULT_HOST_OS!r}
MYSQL_VER: {DEFAULT_MYSQL_VER!r}
ES_VER: {DEFAULT_ES_VER!r}
{(os.linesep + ' ').join(['%s: %r' % x for x in FEATURE_VARIABLES.items()])}
All features can be prefixed with "SERVER_" or "CLIENT_" to limit their scope.
## Extensions
Integration tests can be ran for extensions to DIRAC by specifying the module
name and path such as:
\b
./integration_tests.py create --extra-module MyDIRAC=/path/to/MyDIRAC
This will modify the setup process based on the contents of
`MyDIRAC/tests/.dirac-ci-config.yaml`. See the Vanilla DIRAC file for the
available options.
## Command completion
Command competion of typer based scripts can be enabled by running:
typer --install-completion
After restarting your terminal you command completion is available using:
typer ./integration_tests.py run ...
"""
)
@app.command()
def create(
flags: Optional[list[str]] = typer.Argument(None),
editable: Optional[bool] = None,
extra_module: Optional[list[str]] = None,
release_var: Optional[str] = None,
run_server_tests: bool = True,
run_client_tests: bool = True,
):
"""Start a local instance of the integration tests"""
prepare_environment(flags, editable, extra_module, release_var)
install_server()
install_client()
exit_code = 0
if run_server_tests:
try:
test_server()
except TestExit as e:
exit_code += e.exit_code
else:
raise NotImplementedError()
if run_client_tests:
try:
test_client()
except TestExit as e:
exit_code += e.exit_code
else:
raise NotImplementedError()
if exit_code != 0:
typer.secho("One or more tests failed", err=True, fg=c.RED)
raise typer.Exit(exit_code)
@app.command()
def destroy():
"""Destroy a local instance of the integration tests"""
typer.secho("Shutting down and removing containers", err=True, fg=c.GREEN)
with _gen_docker_compose(DEFAULT_MODULES) as docker_compose_fn:
os.execvpe(
"docker-compose",
["docker-compose", "-f", docker_compose_fn, "down", "--remove-orphans", "-t", "0"],
_make_env({}),
)
@app.command()
def prepare_environment(
flags: Optional[list[str]] = typer.Argument(None),
editable: Optional[bool] = None,
extra_module: Optional[list[str]] = None,
release_var: Optional[str] = None,
):
"""Prepare the local environment for installing DIRAC."""
_check_containers_running(is_up=False)
if editable is None:
editable = sys.stdout.isatty()
typer.secho(
f"No value passed for --[no-]editable, automatically detected: {editable}",
fg=c.YELLOW,
)
typer.echo(f"Preparing environment")
modules = DEFAULT_MODULES | dict(f.split("=", 1) for f in extra_module)
modules = {k: Path(v).absolute() for k, v in modules.items()}
flags = dict(f.split("=", 1) for f in flags)
docker_compose_env = _make_env(flags)
server_flags = {}
client_flags = {}
for key, value in flags.items():
if key.startswith("SERVER_"):
server_flags[key[len("SERVER_"):]] = value
elif key.startswith("CLIENT_"):
client_flags[key[len("CLIENT_"):]] = value
else:
server_flags[key] = value
client_flags[key] = value
server_config = _make_config(modules, server_flags, release_var, editable)
client_config = _make_config(modules, client_flags, release_var, editable)
typer.secho("Running docker-compose to create containers", fg=c.GREEN)
with _gen_docker_compose(modules) as docker_compose_fn:
subprocess.run(
["docker-compose", "-f", docker_compose_fn, "up", "-d"],
check=True,
env=docker_compose_env,
)
typer.secho("Creating users in server and client containers", fg=c.GREEN)
for container_name in ["server", "client"]:
if os.getuid() == 0:
continue
cmd = _build_docker_cmd(container_name, use_root=True, cwd="/")
gid = str(os.getgid())
uid = str(os.getuid())
ret = subprocess.run(cmd + ["groupadd", "--gid", gid, "dirac"], check=False)
if ret.returncode != 0:
typer.secho(f"Failed to add add group dirac with id={gid}", fg=c.YELLOW)
subprocess.run(
cmd
+ [
"useradd",
"--uid",
uid,
"--gid",
gid,
"-s",
"/bin/bash",
"-d",
"/home/dirac",
"dirac",
],
check=True,
)
subprocess.run(cmd + ["chown", "dirac", "/home/dirac"], check=True)
typer.secho("Creating MySQL user", fg=c.GREEN)
cmd = ["docker", "exec", "mysql", "mysql", f"--password={DB_ROOTPWD}", "-e"]
# It sometimes takes a while for MySQL to be ready so wait for a while if needed
for _ in range(10):
ret = subprocess.run(
cmd + [f"CREATE USER '{DB_USER}'@'%' IDENTIFIED BY '{DB_PASSWORD}';"],
check=False,
)
if ret.returncode != 0:
typer.secho("Failed to connect to MySQL, will retry in 10 seconds", fg=c.YELLOW)
time.sleep(10)
break
else:
raise Exception(ret)
subprocess.run(
cmd + [f"CREATE USER '{DB_USER}'@'localhost' IDENTIFIED BY '{DB_PASSWORD}';"],
check=True,
)
subprocess.run(
cmd + [f"CREATE USER '{DB_USER}'@'mysql' IDENTIFIED BY '{DB_PASSWORD}';"],
check=True,
)
typer.secho("Copying files to containers", fg=c.GREEN)
for name, config in [("server", server_config), ("client", client_config)]:
if path := config.get("DIRACOS_TARBALL_PATH"):
path = Path(path)
if config["USE_PYTHON3"]:
config["DIRACOS_TARBALL_PATH"] = f"/{path.name}"
subprocess.run(
["docker", "cp", str(path), f"{name}:/{config['DIRACOS_TARBALL_PATH']}"],
check=True,
)
else:
md5_fn = Path(str(path).replace(".tar.gz", ".md5"))
if not md5_fn.exists():
typer.secho(
"Failed to find MD5 filename for DIRACOS_TARBALL_PATH. "
f"Expected at: {md5_fn}",
err=True,
fg=c.RED,
)
raise typer.Exit(1)
subprocess.run(["docker", "cp", str(path), f"{name}:/{path.name}"], check=True)
subprocess.run(["docker", "cp", str(md5_fn), f"{name}:/{md5_fn.name}"], check=True)
config["DIRACOS_TARBALL_PATH"] = "/"
config["DIRACOSVER"] = md5_fn.stem.split("-", 1)[1]
config_as_shell = _dict_to_shell(config)
typer.secho(f"## {name.title()} config is:", fg=c.BRIGHT_WHITE, bg=c.BLACK)
typer.secho(config_as_shell)
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "CONFIG"
path.write_text(config_as_shell)
subprocess.run(
["docker", "cp", str(path), f"{name}:/home/dirac"],
check=True,
)
for module_name, module_configs in _load_module_configs(modules).items():
for command in module_configs.get("commands", {}).get("post-prepare", []):
typer.secho(
f"Running post-prepare command for {module_name}: {command}",
err=True,
fg=c.GREEN,
)
subprocess.run(command, check=True, shell=True)
@app.command()
def install_server():
"""Install DIRAC in the server container."""
_check_containers_running()
typer.secho("Running server installation", fg=c.GREEN)
base_cmd = _build_docker_cmd("server", tty=False)
subprocess.run(
base_cmd
+ ["bash", "/home/dirac/LocalRepo/TestCode/DIRAC/tests/CI/install_server.sh"],
check=True,
)
typer.secho("Copying credentials and certificates", fg=c.GREEN)
base_cmd = _build_docker_cmd("client", tty=False)
subprocess.run(
base_cmd
+ [
"mkdir",
"-p",
"/home/dirac/ServerInstallDIR/user",
"/home/dirac/ClientInstallDIR/etc",
"/home/dirac/.globus",
],
check=True,
)
for path in [
"etc/grid-security",
"user/client.pem",
"user/client.key",
f"/tmp/x509up_u{os.getuid()}",
]:
source = os.path.join("/home/dirac/ServerInstallDIR", path)
ret = subprocess.run(
["docker", "cp", f"server:{source}", "-"],
check=True,
text=False,
stdout=subprocess.PIPE,
)
if path.startswith("user/"):
dest = f"client:/home/dirac/ServerInstallDIR/{os.path.dirname(path)}"
elif path.startswith("/"):
dest = f"client:{os.path.dirname(path)}"
else:
dest = f"client:/home/dirac/ClientInstallDIR/{os.path.dirname(path)}"
subprocess.run(
["docker", "cp", "-", dest], check=True, text=False, input=ret.stdout
)
subprocess.run(
base_cmd
+ [
"bash",
"-c",
"cp /home/dirac/ServerInstallDIR/user/client.* /home/dirac/.globus/",
],
check=True,
)
@app.command()
def install_client():
"""Install DIRAC in the client container."""
_check_containers_running()
typer.secho("Running client installation", fg=c.GREEN)
base_cmd = _build_docker_cmd("client")
subprocess.run(
base_cmd
+ ["bash", "/home/dirac/LocalRepo/TestCode/DIRAC/tests/CI/install_client.sh"],
check=True,
)
@app.command()
def test_server():
"""Run the server integration tests."""
_check_containers_running()
typer.secho("Running server tests", err=True, fg=c.GREEN)
base_cmd = _build_docker_cmd("server")
ret = subprocess.run(
base_cmd + ["bash", "TestCode/DIRAC/tests/CI/run_tests.sh"], check=False
)
color = c.GREEN if ret.returncode == 0 else c.RED
typer.secho(f"Server tests finished with {ret.returncode}", err=True, fg=color)
raise TestExit(ret.returncode)
@app.command()
def test_client():
"""Run the client integration tests."""
_check_containers_running()
typer.secho("Running client tests", err=True, fg=c.GREEN)
base_cmd = _build_docker_cmd("client")
ret = subprocess.run(
base_cmd + ["bash", "TestCode/DIRAC/tests/CI/run_tests.sh"], check=False
)
color = c.GREEN if ret.returncode == 0 else c.RED
typer.secho(f"Client tests finished with {ret.returncode}", err=True, fg=color)
raise TestExit(ret.returncode)
@app.command()
def exec_server():
"""Start an interactive session in the server container."""
_check_containers_running()
cmd = _build_docker_cmd("server")
cmd += [
"bash",
"-c",
". $HOME/CONFIG && . $HOME/ServerInstallDIR/bashrc && exec bash",
]
typer.secho("Opening prompt inside server container", err=True, fg=c.GREEN)
os.execvp(cmd[0], cmd)
@app.command()
def exec_client():
"""Start an interactive session in the client container."""
_check_containers_running()
cmd = _build_docker_cmd("client")
cmd += [
"bash",
"-c",
". $HOME/CONFIG && . $HOME/ClientInstallDIR/bashrc && exec bash",
]
typer.secho("Opening prompt inside client container", err=True, fg=c.GREEN)
os.execvp(cmd[0], cmd)
@app.command()
def exec_mysql():
"""Start an interactive session in the server container."""
_check_containers_running()
cmd = _build_docker_cmd("mysql", use_root=True, cwd='/')
cmd += [
"bash",
"-c",
f"exec mysql --user={DB_USER} --password={DB_PASSWORD}",
]
typer.secho("Opening prompt inside server container", err=True, fg=c.GREEN)
os.execvp(cmd[0], cmd)
@app.command()
def list_services():
"""List the services which have been running.
Only the services for which /log/current exists are shown.
"""
_check_containers_running()
typer.secho("Known services:", err=True)
for service in _list_services():
typer.secho(f"* {service}", err=True)
@app.command()
def runsvctrl(command: str, pattern: str):
"""Execute runsvctrl inside the server container."""
_check_containers_running()
cmd = _build_docker_cmd("server", cwd="/home/dirac/ServerInstallDIR/runit")
services = fnmatch.filter(_list_services(), pattern)
if not services:
typer.secho(f"No services match {pattern!r}", fg=c.RED)
raise typer.Exit(code=1)
cmd += ["runsvctrl", command] + services
os.execvp(cmd[0], cmd)
@app.command()
def logs(pattern: str = "*", lines: int = 10, follow: bool = True):
"""Show DIRAC's logs from the service container.
For services matching [--pattern] show the most recent [--lines] from the
logs. If [--follow] is True, continiously stream the logs.
"""
_check_containers_running()
services = _list_services()
base_cmd = _build_docker_cmd("server", tty=False) + ["tail"]
base_cmd += [f"--lines={lines}"]
if follow:
base_cmd += ["-f"]
with ThreadPoolExecutor(len(services)) as pool:
for service in fnmatch.filter(services, pattern):
cmd = base_cmd + [f"ServerInstallDIR/runit/{service}/log/current"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None, text=True)
pool.submit(_log_popen_stdout, p)
class TestExit(typer.Exit):
pass
@contextmanager
def _gen_docker_compose(modules):
# Load the docker-compose configuration and mount the necessary volumes
input_fn = Path(__file__).parent / "tests/CI/docker-compose.yml"
docker_compose = yaml.safe_load(input_fn.read_text())
volumes = [
f"{path}:/home/dirac/LocalRepo/ALTERNATIVE_MODULES/{name}"
for name, path in modules.items()
]
volumes += [
f"{path}:/home/dirac/LocalRepo/TestCode/{name}"
for name, path in modules.items()
]
docker_compose["services"]["dirac-server"]["volumes"] = volumes[:]
docker_compose["services"]["dirac-client"]["volumes"] = volumes[:]
# Add any extension services
for module_name, module_configs in _load_module_configs(modules).items():
for service_name, service_config in module_configs["extra-services"].items():
typer.secho(f"Adding service {service_name} for {module_name}", err=True, fg=c.GREEN)
docker_compose["services"][service_name] = service_config.copy()
docker_compose["services"][service_name]["volumes"] = volumes[:]
# Write to a tempory file with the appropriate profile name
prefix = "ci"
with tempfile.TemporaryDirectory() as tmpdir:
output_fn = Path(tmpdir) / prefix / "docker-compose.yml"
output_fn.parent.mkdir()
output_fn.write_text(yaml.safe_dump(docker_compose, sort_keys=False))
yield output_fn
def _check_containers_running(*, is_up=True):
with _gen_docker_compose(DEFAULT_MODULES) as docker_compose_fn:
running_containers = subprocess.run(
["docker-compose", "-f", docker_compose_fn, "ps", "-q", "-a"],
stdout=subprocess.PIPE,
check=True,
text=True,
).stdout.split("\n")
if is_up:
if not any(running_containers):
typer.secho(
f"No running containers found, environment must be prepared first!",
err=True,
fg=c.RED,
)
raise typer.Exit(code=1)
else:
if any(running_containers):
typer.secho(
f"Running instance already found, it must be destroyed first!",
err=True,
fg=c.RED,
)
raise typer.Exit(code=1)
def _find_dirac_release_and_branch():
# Start by looking for the GitHub/GitLab environment variables
ref = os.environ.get("CI_COMMIT_REF_NAME", os.environ.get("GITHUB_REF"))
if ref == "refs/heads/integration":
return "integration", ""
ref = os.environ.get(
"CI_MERGE_REQUEST_TARGET_BRANCH_NAME", os.environ.get("GITHUB_BASE_REF")
)
if ref == "integration":
return "integration", ""
repo = git.Repo(os.getcwd())
# Try to make sure the upstream remote is up to date
try:
upstream = repo.remote("upstream")
except ValueError:
typer.secho("No upstream remote found, adding", err=True, fg=c.YELLOW)
upstream = repo.create_remote(
"upstream", "https://github.com/DIRACGrid/DIRAC.git"
)
try:
upstream.fetch()
except Exception:
typer.secho("Failed to fetch from remote 'upstream'", err=True, fg=c.YELLOW)
# Find the most recent tag on the current branch
version = Version(
repo.git.describe(
dirty=True,
tags=True,
long=True,
match="*[0-9]*",
exclude=["v[0-9]r*", "v[0-9][0-9]r*"],
).split("-")[0]
)
# See if there is a remote branch named "rel-vXrY"
version_branch = f"rel-v{version.major}r{version.minor}"
try:
upstream.refs[version_branch]
except IndexError:
typer.secho(
f"Failed to find branch for {version_branch}, defaulting to integration",
err=True,
fg=c.YELLOW,
)
return "integration", ""
else:
return "", f"v{version.major}r{version.minor}"
def _make_env(flags):
env = os.environ.copy()
env["DIRAC_UID"] = str(os.getuid())
env["DIRAC_GID"] = str(os.getgid())
env["HOST_OS"] = flags.pop("HOST_OS", DEFAULT_HOST_OS)
env["CI_REGISTRY_IMAGE"] = flags.pop("CI_REGISTRY_IMAGE", "diracgrid")
env["MYSQL_VER"] = flags.pop("MYSQL_VER", DEFAULT_MYSQL_VER)
env["ES_VER"] = flags.pop("ES_VER", DEFAULT_ES_VER)
return env
def _dict_to_shell(variables):
lines = []
for name, value in variables.items():
if value is None:
continue
elif isinstance(value, list):
lines += [f"declare -a {name}"]
lines += [f"{name}+=({shlex.quote(v)})" for v in value]
elif isinstance(value, bool):
lines += [f"export {name}={'Yes' if value else 'No'}"]
elif isinstance(value, str):
lines += [f"export {name}={shlex.quote(value)}"]
else:
raise NotImplementedError(name, value, type(value))
return "\n".join(lines)
def _make_config(modules, flags, release_var, editable):
config = {
"DEBUG": "True",
# MYSQL Settings
"DB_USER": DB_USER,
"DB_PASSWORD": DB_PASSWORD,
"DB_ROOTUSER": DB_ROOTUSER,
"DB_ROOTPWD": DB_ROOTPWD,
"DB_HOST": DB_HOST,
"DB_PORT": DB_PORT,
# ElasticSearch settings
"NoSQLDB_HOST": "elasticsearch",
"NoSQLDB_PORT": "9200",
# Hostnames
"SERVER_HOST": "server",
"CLIENT_HOST": "client",
# Test specific variables
"WORKSPACE": "/home/dirac",
}
if editable:
config["PIP_INSTALL_EXTRA_ARGS"] = "-e"
required_feature_flags = []
for module_name, module_ci_config in _load_module_configs(modules).items():
config |= module_ci_config["config"]
required_feature_flags += module_ci_config.get("required-feature-flags", [])
config["DIRAC_CI_SETUP_SCRIPT"] = "/home/dirac/LocalRepo/TestCode/" + config["DIRAC_CI_SETUP_SCRIPT"]
# This can likely be removed after the Python 3 migration
if release_var:
config |= dict([release_var.split("=", 1)])
else:
config["DIRAC_RELEASE"], config["DIRACBRANCH"] = _find_dirac_release_and_branch()
for key, default_value in FEATURE_VARIABLES.items():
config[key] = flags.pop(key, default_value)
for key in required_feature_flags:
try:
config[key] = flags.pop(key)
except KeyError:
typer.secho(f"Required feature variable {key!r} is missing", err=True, fg=c.RED)
raise typer.Exit()
config["TESTREPO"] = [
f"/home/dirac/LocalRepo/TestCode/{name}" for name in modules
]
config["ALTERNATIVE_MODULES"] = [
f"/home/dirac/LocalRepo/ALTERNATIVE_MODULES/{name}" for name in modules
]
if not config["USE_PYTHON3"]:
config["ALTERNATIVE_MODULES"] = [
f"{x}/src/{Path(x).name}" for x in config["ALTERNATIVE_MODULES"]
]
# Exit with an error if there are unused feature flags remaining
if flags:
typer.secho(f"Unrecognised feature flags {flags!r}", err=True, fg=c.RED)
raise typer.Exit(code=1)
return config
def _load_module_configs(modules):
module_ci_configs = {}
for module_name, module_path in modules.items():
module_ci_config_path = module_path / "tests/.dirac-ci-config.yaml"
if not module_ci_config_path.exists():
continue
module_ci_configs[module_name] = yaml.safe_load(module_ci_config_path.read_text())
return module_ci_configs
def _build_docker_cmd(container_name, *, use_root=False, cwd="/home/dirac", tty=True):
if use_root or os.getuid() == 0:
user = "root"
else:
user = "dirac"
cmd = ["docker", "exec"]
if tty:
if sys.stdout.isatty():
cmd += ["-it"]
else:
typer.secho(
'Not passing "-it" to docker as stdout is not a tty',
err=True,
fg=c.YELLOW,
)
cmd += [
"-e=TERM=xterm-color",
"-e=INSTALLROOT=/home/dirac",
f"-e=INSTALLTYPE={container_name}",
f"-u={user}",
f"-w={cwd}",
container_name,
]
return cmd
def _list_services():
cmd = _build_docker_cmd("server")
cmd += [
"bash",
"-c",
'cd ServerInstallDIR/runit/ && for fn in */*/log/current; do echo "$(dirname "$(dirname "$fn")")"; done'
]
ret = subprocess.run(cmd, check=False, stdout=subprocess.PIPE, text=True)
if ret.returncode:
typer.secho("Failed to find list of available services", err=True, fg=c.RED)
typer.secho(f"stdout was: {ret.stdout!r}", err=True)
typer.secho(f"stderr was: {ret.stderr!r}", err=True)
raise typer.Exit(1)
return ret.stdout.split()
def _log_popen_stdout(p):
while p.poll() is None:
line = p.stdout.readline().rstrip()
if not line:
continue
bg, fg = None, None
if match := LOG_PATTERN.match(line):
bg, fg = LOG_LEVEL_MAP.get(match.groups()[0], (bg, fg))
typer.secho(line, err=True, bg=bg, fg=fg)
if __name__ == "__main__":
app()
| gpl-3.0 |
mostaphaRoudsari/Honeybee | src/Honeybee_EnergyPlus Window Air Gap.py | 1 | 3032 | #
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to create a custom material for a window air gap, which can be plugged into the "Honeybee_EnergyPlus Construction" component.
_
It is important to note that this component only creates gaps of air and not other gasses.
Also, the material out of this component represents only a single layer of air, which can be combined with the "Honeybee_EnergyPlus Glass Material" to make multi-pane windows.
If you have specifications for a whole window element and not individual panes of glass and gas, you are better-off using the "Honeybee_EnergyPlus Window Material" component instead of this one.
-
Provided by Honeybee 0.0.66
Args:
_name: A text name for your window air gap material.
_thickness_: A number that represents the thickness of the air gap in meters. The default is set to 0.0125 meters (1.25 cm).
Returns:
EPMaterial: A window air gap material that can be plugged into the "Honeybee_EnergyPlus Construction" component.
"""
ghenv.Component.Name = "Honeybee_EnergyPlus Window Air Gap"
ghenv.Component.NickName = 'EPWindowAirGap'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "06 | Energy | Material | Construction"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
def main(name, thickness):
if name == None: name = "AIRGAP"
gasType = "AIR"
if thickness == None: thickness = .0125
values = [name.upper(), gasType, thickness]
comments = ["Name", "Gas type", "Thickness {m}"]
materialStr = "WindowMaterial:Gas,\n"
for count, (value, comment) in enumerate(zip(values, comments)):
if count!= len(values) - 1:
materialStr += str(value) + ", !-" + str(comment) + "\n"
else:
materialStr += str(value) + "; !-" + str(comment)
return materialStr
EPMaterial = main(_name_, _thickness_)
| gpl-3.0 |
orlov-vo/mtasa | vendor/google-breakpad/src/tools/gyp/test/copies/gyptest-slash.py | 249 | 1433 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies with a trailing slash in the destination directory.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies-slash.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies-slash.gyp', chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.pass_test()
| gpl-3.0 |
huchoi/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/caching_descriptor_system.py | 3 | 7547 | import sys
import logging
from xblock.runtime import KvsFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locator import BlockUsageLocator, LocalId, CourseLocator
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xmodule.modulestore.split_mongo import encode_key_for_mongo
from ..exceptions import ItemNotFoundError
from .split_mongo_kvs import SplitMongoKVS
log = logging.getLogger(__name__)
class CachingDescriptorSystem(MakoDescriptorSystem):
"""
A system that has a cache of a course version's json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data.
Computes the settings (nee 'metadata') inheritance upon creation.
"""
def __init__(self, modulestore, course_entry, default_class, module_data, lazy, **kwargs):
"""
Computes the settings inheritance and sets up the cache.
modulestore: the module store that can be used to retrieve additional
modules
course_entry: the originally fetched enveloped course_structure w/ branch and course id info.
Callers to _load_item provide an override but that function ignores the provided structure and
only looks at the branch and course id
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
"""
super(CachingDescriptorSystem, self).__init__(
field_data=None,
load_item=self._load_item,
**kwargs
)
self.modulestore = modulestore
self.course_entry = course_entry
self.lazy = lazy
self.module_data = module_data
# Compute inheritance
modulestore.inherit_settings(
course_entry['structure'].get('blocks', {}),
course_entry['structure'].get('blocks', {}).get(
encode_key_for_mongo(course_entry['structure'].get('root'))
)
)
self.default_class = default_class
self.local_modules = {}
def _load_item(self, block_id, course_entry_override=None):
if isinstance(block_id, BlockUsageLocator):
if isinstance(block_id.block_id, LocalId):
try:
return self.local_modules[block_id]
except KeyError:
raise ItemNotFoundError
else:
block_id = block_id.block_id
json_data = self.module_data.get(block_id)
if json_data is None:
# deeper than initial descendant fetch or doesn't exist
course_info = course_entry_override or self.course_entry
course_key = CourseLocator(
course_info.get('org'), course_info.get('course'), course_info.get('run'), course_info.get('branch'),
course_info['structure']['_id']
)
self.modulestore.cache_items(self, [block_id], course_key, lazy=self.lazy)
json_data = self.module_data.get(block_id)
if json_data is None:
raise ItemNotFoundError(block_id)
class_ = self.load_block_type(json_data.get('category'))
return self.xblock_from_json(class_, block_id, json_data, course_entry_override)
# xblock's runtime does not always pass enough contextual information to figure out
# which named container (course x branch) or which parent is requesting an item. Because split allows
# a many:1 mapping from named containers to structures and because item's identities encode
# context as well as unique identity, this function must sometimes infer whether the access is
# within an unspecified named container. In most cases, course_entry_override will give the
# explicit context; however, runtime.get_block(), e.g., does not. HOWEVER, there are simple heuristics
# which will work 99.999% of the time: a runtime is thread & even context specific. The likelihood that
# the thread is working with more than one named container pointing to the same specific structure is
# low; thus, the course_entry is most likely correct. If the thread is looking at > 1 named container
# pointing to the same structure, the access is likely to be chunky enough that the last known container
# is the intended one when not given a course_entry_override; thus, the caching of the last branch/course id.
def xblock_from_json(self, class_, block_id, json_data, course_entry_override=None):
if course_entry_override is None:
course_entry_override = self.course_entry
else:
# most recent retrieval is most likely the right one for next caller (see comment above fn)
self.course_entry['branch'] = course_entry_override['branch']
self.course_entry['org'] = course_entry_override['org']
self.course_entry['course'] = course_entry_override['course']
self.course_entry['run'] = course_entry_override['run']
# most likely a lazy loader or the id directly
definition = json_data.get('definition', {})
definition_id = self.modulestore.definition_locator(definition)
# If no usage id is provided, generate an in-memory id
if block_id is None:
block_id = LocalId()
block_locator = BlockUsageLocator(
CourseLocator(
version_guid=course_entry_override['structure']['_id'],
org=course_entry_override.get('org'),
course=course_entry_override.get('course'),
run=course_entry_override.get('run'),
branch=course_entry_override.get('branch'),
),
block_type=json_data.get('category'),
block_id=block_id,
)
converted_fields = self.modulestore.convert_references_to_keys(
block_locator.course_key, class_, json_data.get('fields', {}), self.course_entry['structure']['blocks'],
)
kvs = SplitMongoKVS(
definition,
converted_fields,
json_data.get('_inherited_settings'),
)
field_data = KvsFieldData(kvs)
try:
module = self.construct_xblock_from_class(
class_,
ScopeIds(None, json_data.get('category'), definition_id, block_locator),
field_data,
)
except Exception:
log.warning("Failed to load descriptor", exc_info=True)
return ErrorDescriptor.from_json(
json_data,
self,
BlockUsageLocator(
CourseLocator(version_guid=course_entry_override['structure']['_id']),
block_type='error',
block_id=block_id
),
error_msg=exc_info_to_str(sys.exc_info())
)
edit_info = json_data.get('edit_info', {})
module.edited_by = edit_info.get('edited_by')
module.edited_on = edit_info.get('edited_on')
module.previous_version = edit_info.get('previous_version')
module.update_version = edit_info.get('update_version')
module.definition_locator = definition_id
# decache any pending field settings
module.save()
# If this is an in-memory block, store it in this system
if isinstance(block_locator.block_id, LocalId):
self.local_modules[block_locator] = module
return module
| agpl-3.0 |
justajeffy/anim-studio-tools | probe/sources/probe/fixtures/mock_tank.py | 5 | 6419 | #
# Copyright 2009 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), its
# affiliates and/or its licensors.
#
class MockLabels(object):
"""
Mock labels object, to aid in unit testing.
Emulates Tank behavior where labels are also properties!
.. versionadded:: 0.2.0
"""
def __init__(self, labels={}):
self.labels = labels
@property
def RoleType(self):
return self.labels['RoleType']
class MockSystemObject(object):
"""
Mock tank revision system object, to aid in unit testing.
.. note::
Presently only supports the system name and type name.
.. versionadded:: 0.2.0
.. versionchanged:: 0.15.1
Added support for tkid property.
"""
def __init__(self, tkid="@ABC_123", name="test_name", type_name="test_type"):
self.tkid = tkid
self.name = name
self.type_name = type_name
class MockAssetObject(object):
"""
Mock tank revision asset object, to aid in unit testing.
.. note::
Presently only supports the asset container, system object and labels.
.. versionadded:: 0.2.0
.. versionchanged:: 0.12.0
Added support for asset container properties.
"""
def __init__(self, container="test_asset", labels={}):
self.container = container # supports strings or objects for simplified testing ...
self.system = MockSystemObject()
self.labels = MockLabels(labels)
self.properties = MockProperties()
def save(self):
"""
Pretend to save changes to the asset to Tank.
.. versionadded:: 0.12.0
"""
return True
def __repr__(self):
if type(self.container) == str:
return self.container
else:
return str(self.container)
class MockProperties(object):
"""
Mock tank revision properties, to aid in unit testing.
>>> revision = MockRevision({'system.type_name':'RevisionA', 'properties':{'pipeline_data':None}})
>>>
>>> assert revision.properties.pipeline_data == None
.. note::
Presently only supports the "contents" and "pipeline_data" properties.
.. versionadded:: 0.2.0
.. versionchanged:: 0.7.0
Added support for the "is_locked" property (currently only available on department roles)
"""
is_locked = None
def __init__(self, properties={}):
self.properties = properties
if self.properties.has_key('is_locked'):
self.is_locked = self.properties['is_locked']
@property
def contents(self):
return self.properties['contents']
@property
def pipeline_data(self):
return self.properties['pipeline_data']
def keys(self):
return self.properties.keys()
class MockRevision(object):
"""
Mock tank revision, to aid in unit testing. This mock object implements (often
in quite a hacky way) a very limited subset of the behaviour supported by regular
tank revision objects.
>>> revision = MockRevision({'system.type_name':'RevisionA', 'asset.system.type_name':'ContainerA_v1'})
>>>
>>> assert revision.system.type_name == 'RevisionA' # check the type of the revision
>>> assert revision.asset.system.type_name == 'ContainerA_v1' # check the type of the revision's container
.. versionadded:: 0.2.0
.. versionchanged:: 0.7.0
Added an empty save() wrapper method.
.. versionchanged:: 0.12.0
Added support for asset properties.
.. versionchanged:: 0.15.1
Added support for tkid property.
.. todo::
Ultimately, the need for this mock object may be removed via the implementation
of an "automated" tank scratch for unit testing environment. In such a case,
actual tank revision objects against a controlled set of test data may be used
instead.
.. todo::
Is the behaviour of __repr__ actually correct (with regard to equivalent tank functionality) ...?
"""
def __init__(self, data):
self.asset = MockAssetObject()
self.system = MockSystemObject()
if data.has_key('asset'):
self.asset.container = data['asset']
if data.has_key('asset.labels'):
self.asset.labels = MockLabels(data['asset.labels'])
if data.has_key('asset.properties'):
self.asset.properties = MockProperties(data['asset.properties'])
if data.has_key('asset.system.type_name'):
self.asset.system.type_name = data['asset.system.type_name']
if data.has_key('system.tkid'):
self.system.tkid = data['system.tkid']
if data.has_key('system.name'):
self.system.name = data['system.name']
if data.has_key('system.type_name'):
self.system.type_name = data['system.type_name']
if data.has_key('properties'):
self.properties = MockProperties(data['properties'])
def save(self):
"""
Pretend to save changes to the revision to Tank.
.. versionadded:: 0.7.0
"""
return True
def __repr__(self):
"""
Return a representation of this revision.
.. versionchanged:: 0.15.1
Updated implementation to be consistent with behaviour in Tank.
"""
return "%s(%s, %s)" % (self.system.type_name, self.system.name, self.asset)
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
klmitch/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py | 4 | 3317 | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
from neutron._i18n import _LI
from neutron.agent.common import ovs_lib
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow \
import br_cookie
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ofswitch
LOG = logging.getLogger(__name__)
class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin,
br_cookie.OVSBridgeCookieMixin, ovs_lib.OVSBridge):
"""Common code for bridges used by OVS agent"""
_cached_dpid = None
def _get_dp(self):
"""Get (dp, ofp, ofpp) tuple for the switch.
A convenient method for openflow message composers.
"""
while True:
dpid_int = self._cached_dpid
if dpid_int is None:
dpid_str = self.get_datapath_id()
LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"),
{"br_name": self.br_name, "dpid": dpid_str})
dpid_int = int(dpid_str, 16)
try:
dp = self._get_dp_by_dpid(dpid_int)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctx:
self._cached_dpid = None
# Retry if dpid has been changed.
# NOTE(yamamoto): Open vSwitch change its dpid on
# some events.
# REVISIT(yamamoto): Consider to set dpid statically.
new_dpid_str = self.get_datapath_id()
if new_dpid_str != dpid_str:
LOG.info(_LI("Bridge %(br_name)s changed its "
"datapath-ID from %(old)s to %(new)s"), {
"br_name": self.br_name,
"old": dpid_str,
"new": new_dpid_str,
})
ctx.reraise = False
else:
self._cached_dpid = dpid_int
return dp, dp.ofproto, dp.ofproto_parser
def setup_controllers(self, conf):
controllers = [
"tcp:%(address)s:%(port)s" % {
"address": conf.OVS.of_listen_address,
"port": conf.OVS.of_listen_port,
}
]
self.set_protocols(ovs_consts.OPENFLOW13)
self.set_controller(controllers)
def drop_port(self, in_port):
self.install_drop(priority=2, in_port=in_port)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.