repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
franek/weboob | modules/cragr/web/pages.py | 1 | 7464 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from decimal import Decimal
from weboob.capabilities.bank import Account
from weboob.tools.browser import BasePage
from weboob.tools.capabilities.bank.transactions import FrenchTransaction as Transaction
__all__ = ['HomePage', 'LoginPage', 'LoginErrorPage', 'AccountsPage', 'TransactionsPage']
class HomePage(BasePage):
def get_post_url(self):
for script in self.document.xpath('//script'):
text = script.text
if text is None or not 'CCPTE' in text:
continue
m = re.search(r'var chemin = "([^"]+)"', text, re.MULTILINE)
if m:
return m.group(1)
return None
class LoginPage(BasePage):
def login(self, password):
assert password.isdigit()
assert len(password) == 6
imgmap = {}
for td in self.document.xpath('//table[@id="pave-saisie-code"]/tr/td'):
a = td.find('a')
num = a.text.strip()
if num.isdigit():
imgmap[num] = int(a.attrib['tabindex']) - 1
self.browser.select_form(name='formulaire')
self.browser.set_all_readonly(False)
self.browser['CCCRYC'] = ','.join(['%02d' % imgmap[c] for c in password])
self.browser['CCCRYC2'] = '0' * len(password)
self.browser.submit(nologin=True)
def get_result_url(self):
return self.parser.tocleanstring(self.document.getroot())
class LoginErrorPage(BasePage):
pass
class _AccountsPage(BasePage):
COL_LABEL = 0
COL_ID = 2
COL_VALUE = 4
COL_CURRENCY = 5
def get_list(self):
for tr in self.document.xpath('//table[@class="ca-table"]/tr'):
if not tr.attrib.get('class', '').startswith('colcelligne'):
continue
cols = tr.findall('td')
if not cols:
continue
account = Account()
account.id = self.parser.tocleanstring(cols[self.COL_ID])
account.label = self.parser.tocleanstring(cols[self.COL_LABEL])
account.balance = Decimal(Transaction.clean_amount(self.parser.tocleanstring(cols[self.COL_VALUE])))
account.currency = account.get_currency(self.parser.tocleanstring(cols[self.COL_CURRENCY]))
account._link = None
a = cols[0].find('a')
if a is not None:
account._link = a.attrib['href'].replace(' ', '%20')
yield account
class AccountsPage(_AccountsPage):
pass
class SavingsPage(_AccountsPage):
COL_ID = 1
class TransactionsPage(BasePage):
def get_next_url(self):
links = self.document.xpath('//span[@class="pager"]/a[@class="liennavigationcorpspage"]')
if len(links) < 1:
return None
img = links[-1].find('img')
if img.attrib.get('alt', '') == 'Page suivante':
return links[-1].attrib['href']
return None
COL_DATE = 0
COL_TEXT = 1
COL_DEBIT = None
COL_CREDIT = -1
TYPES = {'Paiement Par Carte': Transaction.TYPE_CARD,
'Retrait Au Distributeur': Transaction.TYPE_WITHDRAWAL,
'Frais': Transaction.TYPE_BANK,
'Cotisation': Transaction.TYPE_BANK,
'Virement Emis': Transaction.TYPE_TRANSFER,
'Virement': Transaction.TYPE_TRANSFER,
'Cheque Emis': Transaction.TYPE_CHECK,
'Remise De Cheque': Transaction.TYPE_DEPOSIT,
'Prelevement': Transaction.TYPE_ORDER,
}
def get_history(self, date_guesser):
i = 0
for tr in self.document.xpath('//table[@class="ca-table"]//tr'):
parent = tr.getparent()
while parent is not None and parent.tag != 'table':
parent = parent.getparent()
if parent.attrib.get('class', '') != 'ca-table':
continue
if tr.attrib.get('class', '') == 'tr-thead':
heads = tr.findall('th')
for i, head in enumerate(heads):
key = self.parser.tocleanstring(head)
if key == u'Débit':
self.COL_DEBIT = i - len(heads)
if key == u'Crédit':
self.COL_CREDIT = i - len(heads)
if key == u'Libellé':
self.COL_TEXT = i
if not tr.attrib.get('class', '').startswith('ligne-'):
continue
cols = tr.findall('td')
# On loan accounts, there is a ca-table with a summary. Skip it.
if tr.find('th') is not None or len(cols) < 3:
continue
t = Transaction(i)
date = self.parser.tocleanstring(cols[self.COL_DATE])
raw = self.parser.tocleanstring(cols[self.COL_TEXT])
credit = self.parser.tocleanstring(cols[self.COL_CREDIT])
if self.COL_DEBIT is not None:
debit = self.parser.tocleanstring(cols[self.COL_DEBIT])
else:
debit = ''
day, month = map(int, date.split('/', 1))
t.date = date_guesser.guess_date(day, month)
t.rdate = t.date
t.raw = raw
# On some accounts' history page, there is a <font> tag in columns.
col_text = cols[self.COL_TEXT]
if col_text.find('font') is not None:
col_text = col_text.find('font')
t.category = unicode(col_text.text.strip())
t.label = col_text.find('br').tail
if t.label is not None:
t.label = t.label.strip()
else:
# If there is only one line, try to separate category from label.
t.label = re.sub('(.*) (.*)', r'\2', t.category).strip()
# Sometimes, the category contains the label, even if there is another line with it again.
t.category = re.sub('(.*) .*', r'\1', t.category).strip()
t.type = self.TYPES.get(t.category, t.TYPE_UNKNOWN)
# Parse operation date in label (for card transactions for example)
m = re.match('(.*) (\d{2})/(\d{2})$', t.label)
if m:
if t.type == t.TYPE_CARD:
t.rdate = date_guesser.guess_date(int(m.group(2)), int(m.group(3)), change_current_date=False)
t.label = m.group(1).strip()
# Strip city or other useless information from label.
t.label = re.sub('(.*) .*', r'\1', t.label).strip()
t.set_amount(credit, debit)
yield t
i += 1
| agpl-3.0 |
anokata/pythonPetProjects | paging.py | 1 | 3409 | input_file = 'input'
DEBUG = True
DEBUG = False
input_file = '/home/ksi/Downloads/dataset_44327_15(2).txt'
memory = {}
qr = list()
with open(input_file) as fin:
m = fin.read
memory_n, queries_n, table_r_addr = [int(x) for x in next(fin).split()]
for i in range(memory_n):
paddr, value = [int(x) for x in next(fin).split()]
memory[paddr] = value
for i in range(queries_n):
qr.append(int(fin.readline()))
#memory_n = m # phys addr = value(8byte)
def mem(phys_addr):
if DEBUG:
print(' mem @ ', phys_addr)
value = memory.get(phys_addr, 0)
if DEBUG:
print(' mem getted ')
if DEBUG:
print(' mem @ ', phys_addr, 'value ', value)
return value
if DEBUG:
print('root table @', table_r_addr, hex(table_r_addr))
print('TEST root table first rec', mem(table_r_addr))
def get_addr_from_record(r):
if DEBUG:
print(" get record at", r)
ps = r & 0x40
is_present = r & 1
if DEBUG:
print("PS", ps)
if not is_present:
print('fault')
raise Exception()
#addr = r & 0xFFFFF000
addr = r & 0xFFFFFFFFF000
if DEBUG:
print(" value from record (next addr)", addr, hex(addr))
return addr
def get_addr(laddr):
# Remember where addr and where value, where logic addr
# parse logic addr
#Use bits 39-47 (9 bits) as an index into P4
#laddr & 0x01FF
# test 0x?
# 2^9 = 512 records
# offset 12 bit!!
offset = laddr & 0xFFF
p1_idx = (laddr >> 12) & 0x1FF
p2_idx = (laddr >> 21) & 0x1FF
p3_idx = (laddr >> 30) & 0x1FF
p4_idx = (laddr >> 39) & 0x1FF
# get record from root table at p4 idx:
if DEBUG:
print()
print('LOGIC: ', laddr, hex(laddr))
print('offset: ', offset)
print("p4 idx", p4_idx)
print("p3 idx", p3_idx)
print("p2 idx", p2_idx)
print("p1 idx", p1_idx)
rec = mem(table_r_addr + (p4_idx * 8))
if DEBUG:
print('get rec at ', table_r_addr, ' idx ', p4_idx)
t3addr = get_addr_from_record(rec)
if DEBUG:
print('rec ', rec)
print('table 3 phys addr', t3addr)
rec = mem(t3addr + (p3_idx * 8))
if DEBUG:
print('get rec at ', t3addr, ' idx ', p3_idx)
t2addr = get_addr_from_record(rec)
if DEBUG:
print('rec ', rec)
print('table 2 phys addr', t2addr)
rec = mem(t2addr + (p2_idx * 8))
t1addr = get_addr_from_record(rec)
if DEBUG:
print('rec ', hex(rec))
print('table 2 phys addr', t2addr)
print('rec ', hex(rec))
print('table 1 phys addr', t1addr)
rec = mem(t1addr + (p1_idx * 8))
# print('rec ', hex(rec), rec)
phys = get_addr_from_record(rec)
phys += offset
#print('PHYS ', hex(phys), phys)
print(phys)
# https://github.com/0xAX/linux-insides/blob/master/Theory/Paging.md
# http://os.phil-opp.com/entering-longmode.html#paging
# https://stepik.org/lesson/%D0%A1%D1%82%D1%80%D0%B0%D0%BD%D0%B8%D1%87%D0%BD%D0%B0%D1%8F-%D0%BE%D1%80%D0%B3%D0%B0%D0%BD%D0%B8%D0%B7%D0%B0%D1%86%D0%B8%D1%8F-%D0%BF%D0%B0%D0%BC%D1%8F%D1%82%D0%B8-44327/step/15?course=%D0%9E%D0%BF%D0%B5%D1%80%D0%B0%D1%86%D0%B8%D0%BE%D0%BD%D0%BD%D1%8B%D0%B5-%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D1%8B&unit=22137
i = 2
for q in qr:
try:
get_addr(q)
except:
pass
i -= 1
if not i:
pass
#exit()
| mit |
jart/tensorflow | tensorflow/python/ops/sets_impl.py | 27 | 11713 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_set_ops
from tensorflow.python.util.tf_export import tf_export
_VALID_DTYPES = set([
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.string])
@tf_export("sets.set_size")
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`.
Returns:
`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
# pylint: disable=protected-access
return gen_set_ops.set_size(
a.indices, a.values, a.dense_shape, validate_indices)
ops.NotDifferentiable("SetSize")
ops.NotDifferentiable("DenseToDenseSetOperation")
ops.NotDifferentiable("DenseToSparseSetOperation")
ops.NotDifferentiable("SparseToSparseSetOperation")
def _convert_to_tensors_or_sparse_tensors(a, b):
"""Convert to tensor types, and flip order if necessary.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`.
b: `Tensor` or `SparseTensor` of the same type as `a`.
Returns:
Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to
`Tensor` or `SparseTensor`, and `flipped` indicates whether the order has
been flipped to make it dense,sparse instead of sparse,dense (since the set
ops do not support the latter).
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if a.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("'a' invalid dtype %s." % a.dtype)
b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
if b.dtype.base_dtype != a.dtype.base_dtype:
raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
if (isinstance(a, sparse_tensor.SparseTensor) and
not isinstance(b, sparse_tensor.SparseTensor)):
return b, a, True
return a, b, False
def _set_operation(a, b, set_operation, validate_indices=True):
"""Compute set operation of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
set_operation: String indicating set operation. See
SetOperationOp::SetOperationFromContext for valid values.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the results
of the set operation.
Raises:
TypeError: If inputs are invalid types.
ValueError: If `a` is sparse and `b` is dense.
"""
if isinstance(a, sparse_tensor.SparseTensor):
if isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
set_operation, validate_indices)
else:
raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
"Please flip the order of your inputs.")
elif isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(
a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
else:
indices, values, shape = gen_set_ops.dense_to_dense_set_operation(
a, b, set_operation, validate_indices)
return sparse_tensor.SparseTensor(indices, values, shape)
@tf_export("sets.set_intersection")
def set_intersection(a, b, validate_indices=True):
"""Compute set intersection of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# Represent the following array of sets as a sparse tensor:
# a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]])
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2,2,2])
# b = np.array([[{1}, {}], [{4}, {5, 6, 7, 8}]])
b = collections.OrderedDict([
((0, 0, 0), 1),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `tf.sets.set_intersection` is applied to each aligned pair of sets.
tf.sets.set_intersection(a, b)
# The result will be equivalent to either of:
#
# np.array([[{1}, {}], [{4}, {5, 6}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 1),
# ((1, 0, 0), 4),
# ((1, 1, 0), 5),
# ((1, 1, 1), 6),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
intersections.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "intersection", validate_indices)
@tf_export("sets.set_difference")
def set_difference(a, b, aminusb=True, validate_indices=True):
"""Compute set difference of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# Represent the following array of sets as a sparse tensor:
# a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]])
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2])
# np.array([[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]])
b = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 3),
((0, 1, 0), 2),
((1, 0, 0), 4),
((1, 0, 1), 5),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `set_difference` is applied to each aligned pair of sets.
tf.sets.set_difference(a, b)
# The result will be equivalent to either of:
#
# np.array([[{2}, {3}], [{}, {}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 2),
# ((0, 1, 0), 3),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
aminusb: Whether to subtract `b` from `a`, vs vice versa.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
differences.
"""
a, b, flipped = _convert_to_tensors_or_sparse_tensors(a, b)
if flipped:
aminusb = not aminusb
return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices)
@tf_export("sets.set_union")
def set_union(a, b, validate_indices=True):
"""Compute set union of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
```python
import tensorflow as tf
import collections
# [[{1, 2}, {3}], [{4}, {5, 6}]]
a = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 2),
((0, 1, 0), 3),
((1, 0, 0), 4),
((1, 1, 0), 5),
((1, 1, 1), 6),
])
a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2])
# [[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]]
b = collections.OrderedDict([
((0, 0, 0), 1),
((0, 0, 1), 3),
((0, 1, 0), 2),
((1, 0, 0), 4),
((1, 0, 1), 5),
((1, 1, 0), 5),
((1, 1, 1), 6),
((1, 1, 2), 7),
((1, 1, 3), 8),
])
b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4])
# `set_union` is applied to each aligned pair of sets.
tf.sets.set_union(a, b)
# The result will be a equivalent to either of:
#
# np.array([[{1, 2, 3}, {2, 3}], [{4, 5}, {5, 6, 7, 8}]])
#
# collections.OrderedDict([
# ((0, 0, 0), 1),
# ((0, 0, 1), 2),
# ((0, 0, 2), 3),
# ((0, 1, 0), 2),
# ((0, 1, 1), 3),
# ((1, 0, 0), 4),
# ((1, 0, 1), 5),
# ((1, 1, 0), 5),
# ((1, 1, 1), 6),
# ((1, 1, 2), 7),
# ((1, 1, 3), 8),
# ])
```
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
unions.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "union", validate_indices)
| apache-2.0 |
ludwiktrammer/odoo | addons/sale_timesheet/models/sale_timesheet.py | 3 | 6624 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import models, api, fields
from openerp.tools.translate import _
from openerp.exceptions import UserError
class ResCompany(models.Model):
_inherit = 'res.company'
@api.model
def _get_uom_hours(self):
try:
return self.env.ref("product.product_uom_hour")
except ValueError:
return False
project_time_mode_id = fields.Many2one('product.uom', string='Timesheet UoM', default=_get_uom_hours)
class HrEmployee(models.Model):
_inherit = 'hr.employee'
# FIXME: this field should be in module hr_timesheet, not sale_timesheet
timesheet_cost = fields.Float(string='Timesheet Cost', default=0.0)
class ProductTemplate(models.Model):
_inherit = 'product.template'
track_service = fields.Selection(selection_add=[('timesheet', 'Timesheets on contract')])
@api.onchange('type', 'invoice_policy')
def onchange_type_timesheet(self):
if self.type == 'service' and self.invoice_policy == 'cost':
self.track_service = 'timesheet'
if self.type != 'service':
self.track_service = 'manual'
return {}
class AccountAnalyticLine(models.Model):
_inherit = 'account.analytic.line'
def _get_sale_order_line(self, vals=None):
result = dict(vals or {})
if self.is_timesheet:
if result.get('so_line'):
sol = self.env['sale.order.line'].browse([result['so_line']])
else:
sol = self.so_line
if not sol and self.account_id:
sol = self.env['sale.order.line'].search([
('order_id.project_id', '=', self.account_id.id),
('state', '=', 'sale'),
('product_id.track_service', '=', 'timesheet'),
('product_id.type', '=', 'service')],
limit=1)
if sol:
result.update({
'so_line': sol.id,
'product_id': sol.product_id.id,
})
result = self._get_timesheet_cost(result)
result = super(AccountAnalyticLine, self)._get_sale_order_line(vals=result)
return result
def _get_timesheet_cost(self, vals=None):
result = dict(vals or {})
if result.get('is_timesheet') or self.is_timesheet:
if result.get('amount'):
return result
unit_amount = result.get('unit_amount', 0.0) or self.unit_amount
user_id = result.get('user_id') or self.user_id.id
user = self.env['res.users'].browse([user_id])
emp = self.env['hr.employee'].search([('user_id', '=', user_id)], limit=1)
cost = emp and emp.timesheet_cost or 0.0
uom = (emp or user).company_id.project_time_mode_id
# Nominal employee cost = 1 * company project UoM (project_time_mode_id)
result.update(
amount=(-unit_amount * cost),
product_uom_id=uom.id
)
return result
@api.multi
def write(self, values):
for line in self:
values = line._get_timesheet_cost(vals=values)
super(AccountAnalyticLine, line).write(values)
return True
@api.model
def create(self, values):
values = self._get_timesheet_cost(vals=values)
return super(AccountAnalyticLine, self).create(values)
class SaleOrder(models.Model):
_inherit = 'sale.order'
timesheet_ids = fields.Many2many('account.analytic.line', compute='_compute_timesheet_ids', string='Timesheet activities associated to this sale')
timesheet_count = fields.Float(string='Timesheet activities', compute='_compute_timesheet_ids')
@api.multi
@api.depends('project_id.line_ids')
def _compute_timesheet_ids(self):
for order in self:
order.timesheet_ids = self.env['account.analytic.line'].search([('is_timesheet', '=', True), ('account_id', '=', order.project_id.id)]) if order.project_id else []
order.timesheet_count = round(sum([line.unit_amount for line in order.timesheet_ids]), 2)
@api.multi
@api.constrains('order_line')
def _check_multi_timesheet(self):
for order in self:
count = 0
for line in order.order_line:
if line.product_id.track_service == 'timesheet':
count += 1
if count > 1:
raise UserError(_("You can use only one product on timesheet within the same sale order. You should split your order to include only one contract based on time and material."))
return {}
@api.multi
def action_confirm(self):
result = super(SaleOrder, self).action_confirm()
for order in self:
if not order.project_id:
for line in order.order_line:
if line.product_id.track_service == 'timesheet':
order._create_analytic_account(prefix=order.product_id.default_code or None)
break
return result
@api.multi
def action_view_timesheet(self):
self.ensure_one()
imd = self.env['ir.model.data']
action = imd.xmlid_to_object('hr_timesheet.act_hr_timesheet_line_evry1_all_form')
list_view_id = imd.xmlid_to_res_id('hr_timesheet.hr_timesheet_line_tree')
form_view_id = imd.xmlid_to_res_id('hr_timesheet.hr_timesheet_line_form')
result = {
'name': action.name,
'help': action.help,
'type': action.type,
'views': [[list_view_id, 'tree'], [form_view_id, 'form']],
'target': action.target,
'context': action.context,
'res_model': action.res_model,
}
if self.timesheet_count > 0:
result['domain'] = "[('id','in',%s)]" % self.timesheet_ids.ids
else:
result = {'type': 'ir.actions.act_window_close'}
return result
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
@api.multi
def _compute_analytic(self, domain=None):
if not domain:
# To filter on analyic lines linked to an expense
domain = [('so_line', 'in', self.ids), '|', ('amount', '<=', 0.0), ('is_timesheet', '=', True)]
return super(SaleOrderLine, self)._compute_analytic(domain=domain)
@api.model
def _get_analytic_track_service(self):
return super(SaleOrderLine, self)._get_analytic_track_service() + ['timesheet']
| agpl-3.0 |
leadbrick/django-lfs | lfs/customer/forms.py | 3 | 4537 | # payment imports
import datetime
# django imports
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.db.models import Q
from django.forms.util import ErrorList
from django.utils.translation import ugettext_lazy as _
# lfs imports
import lfs.payment.settings
from lfs.customer.models import BankAccount
from lfs.customer.models import CreditCard
from lfs.payment.models import PaymentMethod
class BankAccountForm(forms.ModelForm):
"""
The default bank account form which is displayed within the checkout form
if a shop customer selects a payment method of type ``bank``.
"""
class Meta:
model = BankAccount
exclude = ("customer", )
def clean(self):
msg = _(u"This field is required.")
payment_method_id = self.data.get("payment_method")
payment_method = PaymentMethod.objects.get(pk=payment_method_id)
if payment_method.type == lfs.payment.settings.PM_BANK:
if self.cleaned_data.get("account_number", "") == "":
self._errors["account_number"] = ErrorList([msg])
if self.cleaned_data.get("bank_identification_code", "") == "":
self._errors["bank_identification_code"] = ErrorList([msg])
if self.cleaned_data.get("bank_name", "") == "":
self._errors["bank_name"] = ErrorList([msg])
if self.cleaned_data.get("depositor", "") == "":
self._errors["depositor"] = ErrorList([msg])
return self.cleaned_data
class CreditCardForm(forms.ModelForm):
"""
The default credit card form which is displayed within the checkout form
if a shop customer selects a payment method of type ``credit card``.
"""
verification = forms.CharField(label=_(u"Verification Number"), max_length=4, required=False, widget=forms.TextInput(attrs={"size": 4}))
class Meta:
model = CreditCard
exclude = ("customer", )
def __init__(self, *args, **kwargs):
super(CreditCardForm, self).__init__(*args, **kwargs)
year = datetime.datetime.now().year
self.fields["type"].widget = forms.Select(choices=lfs.payment.settings.CREDIT_CARD_TYPE_CHOICES)
self.fields["expiration_date_month"].widget = forms.Select(choices=[(i, i) for i in range(1, 13)])
self.fields["expiration_date_year"].widget = forms.Select(choices=[(i, i) for i in range(year, year + 10)])
def clean(self):
msg = _(u"This field is required.")
# Check data of selected payment method
payment_method_id = self.data.get("payment_method")
payment_method = PaymentMethod.objects.get(pk=payment_method_id)
if payment_method.type == lfs.payment.settings.PM_CREDIT_CARD:
if self.cleaned_data.get("owner", "") == "":
self._errors["owner"] = ErrorList([msg])
if self.cleaned_data.get("number", "") == "":
self._errors["number"] = ErrorList([msg])
if self.cleaned_data.get("verification", "") == "":
self._errors["verification"] = ErrorList([msg])
return self.cleaned_data
class EmailForm(forms.Form):
"""Form to edit email address
"""
email = forms.EmailField(label=_(u"E-mail"), max_length=75)
class CustomerAuthenticationForm(AuthenticationForm):
username = forms.CharField(label=_("E-mail"), max_length=75)
class RegisterForm(forms.Form):
"""Form to register a customer.
"""
email = forms.EmailField(label=_(u"E-mail"), max_length=75)
password_1 = forms.CharField(
label=_(u"Password"), widget=forms.PasswordInput(), max_length=20)
password_2 = forms.CharField(
label=_(u"Confirm password"), widget=forms.PasswordInput(), max_length=20)
def clean_password_2(self):
"""Validates that password 1 and password 2 are the same.
"""
p1 = self.cleaned_data.get('password_1')
p2 = self.cleaned_data.get('password_2')
if not (p1 and p2 and p1 == p2):
raise forms.ValidationError(_(u"The two passwords do not match."))
return p2
def clean_email(self):
"""Validates that the entered e-mail is unique.
"""
email = self.cleaned_data.get("email")
if email and User.objects.filter(Q(email=email) | Q(username=email)).count() > 0:
raise forms.ValidationError(
_(u"That email address is already in use."))
return email
| bsd-3-clause |
Galexrt/zulip | zerver/tests/test_outgoing_webhook_system.py | 2 | 6144 | # -*- coding: utf-8 -*-
import logging
import mock
import requests
from builtins import object
from django.test import override_settings
from requests import Response
from typing import Any, Dict, Tuple, Text, Optional
from zerver.lib.outgoing_webhook import do_rest_call, OutgoingWebhookServiceInterface
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_realm, get_user
class ResponseMock(object):
def __init__(self, status_code, data, content):
# type: (int, Any, str) -> None
self.status_code = status_code
self.data = data
self.content = content
def request_exception_error(http_method, final_url, data, **request_kwargs):
# type: (Any, Any, Any, **Any) -> Any
raise requests.exceptions.RequestException("I'm a generic exception :(")
def timeout_error(http_method, final_url, data, **request_kwargs):
# type: (Any, Any, Any, **Any) -> Any
raise requests.exceptions.Timeout("Time is up!")
class MockServiceHandler(OutgoingWebhookServiceInterface):
def process_success(self, response, event):
# type: (Response, Dict[Text, Any]) -> Optional[str]
return "Success!"
service_handler = MockServiceHandler(None, None, None, None)
class DoRestCallTests(ZulipTestCase):
def setUp(self):
# type: () -> None
realm = get_realm("zulip")
user_profile = get_user("outgoing-webhook@zulip.com", realm)
self.mock_event = {
# In the tests there is no active queue processor, so retries don't get processed.
# Therefore, we need to emulate `retry_event` in the last stage when the maximum
# retries have been exceeded.
'failed_tries': 3,
'message': {'display_recipient': 'Verona',
'subject': 'Foo',
'id': '',
'type': 'stream'},
'user_profile_id': user_profile.id,
'command': '',
'service_name': ''}
self.rest_operation = {'method': "POST",
'relative_url_path': "",
'request_kwargs': {},
'base_url': ""}
self.bot_user = self.example_user('outgoing_webhook_bot')
logging.disable(logging.WARNING)
@mock.patch('zerver.lib.outgoing_webhook.succeed_with_message')
def test_successful_request(self, mock_succeed_with_message):
# type: (mock.Mock) -> None
response = ResponseMock(200, {"message": "testing"}, '')
with mock.patch('requests.request', return_value=response):
do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None)
self.assertTrue(mock_succeed_with_message.called)
def test_retry_request(self):
# type: (mock.Mock) -> None
response = ResponseMock(500, {"message": "testing"}, '')
self.mock_event['failed_tries'] = 3
with mock.patch('requests.request', return_value=response):
do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None)
bot_owner_notification = self.get_last_message()
self.assertEqual(bot_owner_notification.content,
'''[A message](http://zulip.testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.
The webhook got a response with status code *500*.''')
self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id)
self.mock_event['failed_tries'] = 0
@mock.patch('zerver.lib.outgoing_webhook.fail_with_message')
def test_fail_request(self, mock_fail_with_message):
# type: (mock.Mock) -> None
response = ResponseMock(400, {"message": "testing"}, '')
with mock.patch('requests.request', return_value=response):
do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None)
bot_owner_notification = self.get_last_message()
self.assertTrue(mock_fail_with_message.called)
self.assertEqual(bot_owner_notification.content,
'''[A message](http://zulip.testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.
The webhook got a response with status code *400*.''')
self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id)
@mock.patch('logging.info')
@mock.patch('requests.request', side_effect=timeout_error)
def test_timeout_request(self, mock_requests_request, mock_logger):
# type: (mock.Mock, mock.Mock, mock.Mock) -> None
do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None)
bot_owner_notification = self.get_last_message()
self.assertEqual(bot_owner_notification.content,
'''[A message](http://zulip.testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.
When trying to send a request to the webhook service, an exception of type Timeout occured:
```
Time is up!
```''')
self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id)
@mock.patch('logging.exception')
@mock.patch('requests.request', side_effect=request_exception_error)
@mock.patch('zerver.lib.outgoing_webhook.fail_with_message')
def test_request_exception(self, mock_fail_with_message, mock_requests_request, mock_logger):
# type: (mock.Mock, mock.Mock, mock.Mock) -> None
do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None)
bot_owner_notification = self.get_last_message()
self.assertTrue(mock_fail_with_message.called)
self.assertEqual(bot_owner_notification.content,
'''[A message](http://zulip.testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.
When trying to send a request to the webhook service, an exception of type RequestException occured:
```
I'm a generic exception :(
```''')
self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id)
| apache-2.0 |
bzamecnik/sms-tools | lectures/06-Harmonic-model/plots-code/oboe-autocorrelation.py | 1 | 1074 | import essentia.standard as ess
# matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smst.utils import audio
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
M = 500
start = .8 * fs
xp = x[start:start + M] / float(max(x[start:start + M]))
r = ess.AutoCorrelation(normalization='standard')(xp)
r = r / max(r)
peaks = ess.PeakDetection(threshold=.2, interpolate=False, minPosition=.01)(r)
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
plt.plot(np.arange(M) / float(fs), xp, lw=1.5)
plt.axis([0, (M - 1) / float(fs), min(xp), max(xp)])
plt.xlabel('time (sec)')
plt.ylabel('amplitude')
plt.title('x (oboe-A4.wav)')
plt.subplot(212)
plt.plot(np.arange(M) / float(fs), r, 'r', lw=1.5)
plt.plot(peaks[0] * (M - 1) / float(fs), peaks[1], 'x', color='k', markeredgewidth=1.5)
plt.axis([0, (M - 1) / float(fs), min(r), max(r)])
plt.title('autocorrelation function + peaks')
plt.xlabel('lag time (sec)')
plt.ylabel('correlation')
plt.tight_layout()
plt.savefig('oboe-autocorrelation.png')
| agpl-3.0 |
louietsai/python-for-android | python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/main_rsa.py | 126 | 7137 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import cgi
import os
import gdata.auth
import gdata.docs
import gdata.docs.service
import gdata.alt.appengine
from appengine_utilities.sessions import Session
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
SETTINGS = {
'APP_NAME': 'google-GDataOAuthAppEngine-v1',
'CONSUMER_KEY': 'YOUR_CONSUMER_KEY',
'SIG_METHOD': gdata.auth.OAuthSignatureMethod.RSA_SHA1,
'SCOPES': ['http://docs.google.com/feeds/',
'https://docs.google.com/feeds/']
}
f = open('/path/to/your/rsa_private_key.pem')
RSA_KEY = f.read()
f.close()
gdocs = gdata.docs.service.DocsService(source=SETTINGS['APP_NAME'])
gdocs.SetOAuthInputParameters(SETTINGS['SIG_METHOD'], SETTINGS['CONSUMER_KEY'],
rsa_key=RSA_KEY)
gdata.alt.appengine.run_on_appengine(gdocs)
class MainPage(webapp.RequestHandler):
"""Main page displayed to user."""
# GET /
def get(self):
if not users.get_current_user():
self.redirect(users.create_login_url(self.request.uri))
access_token = gdocs.token_store.find_token('%20'.join(SETTINGS['SCOPES']))
if isinstance(access_token, gdata.auth.OAuthToken):
form_action = '/fetch_data'
form_value = 'Now fetch my docs!'
revoke_token_link = True
else:
form_action = '/get_oauth_token'
form_value = 'Give this website access to my Google Docs'
revoke_token_link = None
template_values = {
'form_action': form_action,
'form_value': form_value,
'user': users.get_current_user(),
'revoke_token_link': revoke_token_link,
'oauth_token': access_token,
'consumer': gdocs.GetOAuthInputParameters().GetConsumer(),
'sig_method': gdocs.GetOAuthInputParameters().GetSignatureMethod().get_name()
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class OAuthDance(webapp.RequestHandler):
"""Handler for the 3 legged OAuth dance, v1.0a."""
"""This handler is responsible for fetching an initial OAuth request token,
redirecting the user to the approval page. When the user grants access, they
will be redirected back to this GET handler and their authorized request token
will be exchanged for a long-lived access token."""
# GET /get_oauth_token
def get(self):
"""Invoked after we're redirected back from the approval page."""
self.session = Session()
oauth_token = gdata.auth.OAuthTokenFromUrl(self.request.uri)
if oauth_token:
oauth_token.oauth_input_params = gdocs.GetOAuthInputParameters()
gdocs.SetOAuthToken(oauth_token)
# 3.) Exchange the authorized request token for an access token
oauth_verifier = self.request.get('oauth_verifier', default_value='')
access_token = gdocs.UpgradeToOAuthAccessToken(
oauth_verifier=oauth_verifier)
# Remember the access token in the current user's token store
if access_token and users.get_current_user():
gdocs.token_store.add_token(access_token)
elif access_token:
gdocs.current_token = access_token
gdocs.SetOAuthToken(access_token)
self.redirect('/')
# POST /get_oauth_token
def post(self):
"""Fetches a request token and redirects the user to the approval page."""
self.session = Session()
if users.get_current_user():
# 1.) REQUEST TOKEN STEP. Provide the data scope(s) and the page we'll
# be redirected back to after the user grants access on the approval page.
req_token = gdocs.FetchOAuthRequestToken(
scopes=SETTINGS['SCOPES'], oauth_callback=self.request.uri)
# Generate the URL to redirect the user to. Add the hd paramter for a
# better user experience. Leaving it off will give the user the choice
# of what account (Google vs. Google Apps) to login with.
domain = self.request.get('domain', default_value='default')
approval_page_url = gdocs.GenerateOAuthAuthorizationURL(
extra_params={'hd': domain})
# 2.) APPROVAL STEP. Redirect to user to Google's OAuth approval page.
self.redirect(approval_page_url)
class FetchData(OAuthDance):
"""Fetches the user's data."""
"""This class inherits from OAuthDance in order to utilize OAuthDance.post()
in case of a request error (e.g. the user has a bad token)."""
# GET /fetch_data
def get(self):
self.redirect('/')
# POST /fetch_data
def post(self):
"""Fetches the user's data."""
try:
feed = gdocs.GetDocumentListFeed()
json = []
for entry in feed.entry:
if entry.lastModifiedBy is not None:
last_modified_by = entry.lastModifiedBy.email.text
else:
last_modified_by = ''
if entry.lastViewed is not None:
last_viewed = entry.lastViewed.text
else:
last_viewed = ''
json.append({'title': entry.title.text,
'links': {'alternate': entry.GetHtmlLink().href},
'published': entry.published.text,
'updated': entry.updated.text,
'resourceId': entry.resourceId.text,
'type': entry.GetDocumentType(),
'lastModifiedBy': last_modified_by,
'lastViewed': last_viewed
})
self.response.out.write(simplejson.dumps(json))
except gdata.service.RequestError, error:
OAuthDance.post(self)
class RevokeToken(webapp.RequestHandler):
# GET /revoke_token
def get(self):
"""Revokes the current user's OAuth access token."""
try:
gdocs.RevokeOAuthToken()
except gdata.service.RevokingOAuthTokenFailed:
pass
gdocs.token_store.remove_all_tokens()
self.redirect('/')
def main():
application = webapp.WSGIApplication([('/', MainPage),
('/get_oauth_token', OAuthDance),
('/fetch_data', FetchData),
('/revoke_token', RevokeToken)],
debug=True)
run_wsgi_app(application)
| apache-2.0 |
vvuk/skia | PRESUBMIT.py | 11 | 7822 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Skia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import fnmatch
import os
import re
import sys
import traceback
REVERT_CL_SUBJECT_PREFIX = 'Revert '
SKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com'
PUBLIC_API_OWNERS = (
'reed@chromium.org',
'reed@google.com',
'bsalomon@chromium.org',
'bsalomon@google.com',
)
AUTHORS_FILE_NAME = 'AUTHORS'
def _CheckChangeHasEol(input_api, output_api, source_file_filter=None):
"""Checks that files end with atleast one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in atleast one newline character.
if len(contents) > 1 and contents[-1:] != '\n':
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in a newline character:',
items=eof_files)]
return []
def _CommonChecks(input_api, output_api):
"""Presubmit checks common to upload and commit."""
results = []
sources = lambda x: (x.LocalPath().endswith('.h') or
x.LocalPath().endswith('.gypi') or
x.LocalPath().endswith('.gyp') or
x.LocalPath().endswith('.py') or
x.LocalPath().endswith('.sh') or
x.LocalPath().endswith('.cpp'))
results.extend(
_CheckChangeHasEol(
input_api, output_api, source_file_filter=sources))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def _CheckTreeStatus(input_api, output_api, json_url):
"""Check whether to allow commit.
Args:
input_api: input related apis.
output_api: output related apis.
json_url: url to download json style status.
"""
tree_status_results = input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api, json_url=json_url)
if not tree_status_results:
# Check for caution state only if tree is not closed.
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if ('caution' in status['message'].lower() and
os.isatty(sys.stdout.fileno())):
# Display a prompt only if we are in an interactive shell. Without this
# check the commit queue behaves incorrectly because it considers
# prompts to be failures.
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
tree_status_results.append(
output_api.PresubmitPromptWarning(
message=short_text, long_text=long_text))
else:
# Tree status is closed. Put in message about contacting sheriff.
connection = input_api.urllib2.urlopen(
SKIA_TREE_STATUS_URL + '/current-sheriff')
sheriff_details = input_api.json.loads(connection.read())
if sheriff_details:
tree_status_results[0]._message += (
'\n\nPlease contact the current Skia sheriff (%s) if you are trying '
'to submit a build fix\nand do not know how to submit because the '
'tree is closed') % sheriff_details['username']
return tree_status_results
def _CheckOwnerIsInAuthorsFile(input_api, output_api):
results = []
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=False)
owner_email = issue_properties['owner_email']
try:
authors_content = ''
for line in open(AUTHORS_FILE_NAME):
if not line.startswith('#'):
authors_content += line
email_fnmatches = re.findall('<(.*)>', authors_content)
for email_fnmatch in email_fnmatches:
if fnmatch.fnmatch(owner_email, email_fnmatch):
# Found a match, the user is in the AUTHORS file break out of the loop
break
else:
# TODO(rmistry): Remove the below CLA messaging once a CLA checker has
# been added to the CQ.
results.append(
output_api.PresubmitError(
'The email %s is not in Skia\'s AUTHORS file.\n'
'Issue owner, this CL must include an addition to the Skia AUTHORS '
'file.\n'
'Googler reviewers, please check that the AUTHORS entry '
'corresponds to an email address in http://goto/cla-signers. If it '
'does not then ask the issue owner to sign the CLA at '
'https://developers.google.com/open-source/cla/individual '
'(individual) or '
'https://developers.google.com/open-source/cla/corporate '
'(corporate).'
% owner_email))
except IOError:
# Do not fail if authors file cannot be found.
traceback.print_exc()
input_api.logging.error('AUTHORS file not found!')
return results
def _CheckLGTMsForPublicAPI(input_api, output_api):
"""Check LGTMs for public API changes.
For public API files make sure there is an LGTM from the list of owners in
PUBLIC_API_OWNERS.
"""
results = []
requires_owner_check = False
for affected_svn_file in input_api.AffectedFiles():
affected_file_path = affected_svn_file.AbsoluteLocalPath()
file_path, file_ext = os.path.splitext(affected_file_path)
# We only care about files that end in .h and are under the include dir.
if file_ext == '.h' and 'include' in file_path.split(os.path.sep):
requires_owner_check = True
if not requires_owner_check:
return results
lgtm_from_owner = False
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=True)
if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties['subject'], re.I):
# It is a revert CL, ignore the public api owners check.
return results
if issue_properties['owner_email'] in PUBLIC_API_OWNERS:
# An owner created the CL that is an automatic LGTM.
lgtm_from_owner = True
messages = issue_properties.get('messages')
if messages:
for message in messages:
if (message['sender'] in PUBLIC_API_OWNERS and
'lgtm' in message['text'].lower()):
# Found an lgtm in a message from an owner.
lgtm_from_owner = True
break;
if not lgtm_from_owner:
results.append(
output_api.PresubmitError(
'Since the CL is editing public API, you must have an LGTM from '
'one of: %s' % str(PUBLIC_API_OWNERS)))
return results
def CheckChangeOnCommit(input_api, output_api):
"""Presubmit checks for the change on commit.
The following are the presubmit checks:
* Check change has one and only one EOL.
* Ensures that the Skia tree is open in
http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'
state and an error if it is in 'Closed' state.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
_CheckTreeStatus(input_api, output_api, json_url=(
SKIA_TREE_STATUS_URL + '/banner-status?format=json')))
results.extend(_CheckLGTMsForPublicAPI(input_api, output_api))
results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api))
return results
| bsd-3-clause |
simone-campagna/zirkon | tests/integration/test_schema_validation.py | 2 | 1832 | # -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
from zirkon.schema import Schema
from zirkon.config import Config
def test_Schema_validate_depend_up():
schema = Schema.from_string("""\
ax = Int()
[sub]
sx = Int(default=10)
""")
config = Config.from_string("""\
ax = SECTION["sub"]["sx"] * 5
""")
validation = schema.validate(config)
assert not validation
assert config["sub"]["sx"] == 10
assert config["ax"] == 50
def test_Schema_validate_depend_down():
schema = Schema.from_string("""\
ax = Int(default=10)
[sub]
sx = Int()
""")
config = Config.from_string("""\
[sub]
sx = SECTION.parent["ax"] * 5
""")
validation = schema.validate(config)
assert not validation
assert config["ax"] == 10
assert config["sub"]["sx"] == 50
def test_Schema_validate_depend_up_down():
schema = Schema.from_string("""\
ax = Int(default=10)
ay = Int(default=SECTION["sub"]["sy"] * 7)
[sub]
sx = Int(default=SECTION.parent["ax"] * 3)
sy = Int(default=100)
""")
config = Config()
validation = schema.validate(config)
assert not validation
assert config["ax"] == 10
assert config["ay"] == 700
assert config["sub"]["sx"] == 30
assert config["sub"]["sy"] == 100
| apache-2.0 |
yephper/django | tests/signals/models.py | 1 | 1032 | """
Testing signals before/after saving and deleting.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20)
model = models.CharField(max_length=20)
def __str__(self):
return "%s %s" % (self.make, self.model)
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
| bsd-3-clause |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/auth/tests/models.py | 318 | 1493 | from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User, SiteProfileNotAvailable
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
| mit |
noamelf/Open-Knesset | committees/migrations/0008_auto__add_topic.py | 14 | 23038 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('links', '0001_initial'),
('events', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'Topic'
db.create_table('committees_topic', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('log', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
))
db.send_create_signal('committees', ['Topic'])
# Adding M2M table for field editors on 'Topic'
db.create_table('committees_topic_editors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('topic', models.ForeignKey(orm['committees.topic'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('committees_topic_editors', ['topic_id', 'user_id'])
# Adding M2M table for field committees on 'Topic'
db.create_table('committees_topic_committees', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('topic', models.ForeignKey(orm['committees.topic'], null=False)),
('committee', models.ForeignKey(orm['committees.committee'], null=False))
))
db.create_unique('committees_topic_committees', ['topic_id', 'committee_id'])
# Adding M2M table for field meetings on 'Topic'
db.create_table('committees_topic_meetings', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('topic', models.ForeignKey(orm['committees.topic'], null=False)),
('committeemeeting', models.ForeignKey(orm['committees.committeemeeting'], null=False))
))
db.create_unique('committees_topic_meetings', ['topic_id', 'committeemeeting_id'])
def backwards(self, orm):
# Deleting model 'Topic'
db.delete_table('committees_topic')
# Removing M2M table for field editors on 'Topic'
db.delete_table('committees_topic_editors')
# Removing M2M table for field committees on 'Topic'
db.delete_table('committees_topic_committees')
# Removing M2M table for field meetings on 'Topic'
db.delete_table('committees_topic_meetings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'chaired_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'replacing_in_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'committees.protocolpart': {
'Meta': {'object_name': 'ProtocolPart'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['committees.CommitteeMeeting']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'protocol_parts'", 'null': 'True', 'to': "orm['persons.Person']"})
},
'committees.topic': {
'Meta': {'object_name': 'Topic'},
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'topics'", 'symmetrical': 'False', 'to': "orm['committees.Committee']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'editing_topics'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meetings': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['committees']
| bsd-3-clause |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/storage/operations/account.py | 1 | 46055 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Custom operations for storage account commands"""
import os
from azure.cli.command_modules.storage._client_factory import storage_client_factory, cf_sa_for_keys
from azure.cli.core.util import get_file_json, shell_safe_json_parse, find_child_item
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
def check_name_availability(cmd, client, name):
StorageAccountCheckNameAvailabilityParameters = cmd.get_models('StorageAccountCheckNameAvailabilityParameters')
account_name = StorageAccountCheckNameAvailabilityParameters(name=name)
return client.check_name_availability(account_name)
def regenerate_key(cmd, client, account_name, key_name, resource_group_name=None):
StorageAccountRegenerateKeyParameters = cmd.get_models('StorageAccountRegenerateKeyParameters')
regenerate_key_parameters = StorageAccountRegenerateKeyParameters(key_name=key_name)
return client.regenerate_key(resource_group_name, account_name, regenerate_key_parameters)
def generate_sas(client, services, resource_types, permission, expiry, start=None, ip=None, protocol=None):
from azure.cli.core.azclierror import RequiredArgumentMissingError
if not client.account_name or not client.account_key:
error_msg = """
Missing/Invalid credentials to access storage service. The following variations are accepted:
(1) account name and key (--account-name and --account-key options or
set AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variables)
(2) account name (--account-name option or AZURE_STORAGE_ACCOUNT environment variable;
this will make calls to query for a storage account key using login credentials)
(3) connection string (--connection-string option or
set AZURE_STORAGE_CONNECTION_STRING environment variable); some shells will require
quoting to preserve literal character interpretation.
"""
raise RequiredArgumentMissingError(error_msg)
return client.generate_shared_access_signature(services, resource_types, permission, expiry,
start=start, ip=ip, protocol=protocol)
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
def create_storage_account(cmd, resource_group_name, account_name, sku=None, location=None, kind=None,
tags=None, custom_domain=None, encryption_services=None, encryption_key_source=None,
encryption_key_name=None, encryption_key_vault=None, encryption_key_version=None,
access_tier=None, https_only=None,
enable_files_aadds=None, bypass=None, default_action=None, assign_identity=False,
enable_large_file_share=None, enable_files_adds=None, domain_name=None,
net_bios_domain_name=None, forest_name=None, domain_guid=None, domain_sid=None,
azure_storage_sid=None, enable_hierarchical_namespace=None,
encryption_key_type_for_table=None, encryption_key_type_for_queue=None,
routing_choice=None, publish_microsoft_endpoints=None, publish_internet_endpoints=None,
require_infrastructure_encryption=None, allow_blob_public_access=None,
min_tls_version=None, allow_shared_key_access=None, edge_zone=None,
identity_type=None, user_identity_id=None, key_vault_user_identity_id=None,
sas_expiration_period=None, key_expiration_period_in_days=None):
StorageAccountCreateParameters, Kind, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountCreateParameters', 'Kind', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
'Encryption', 'NetworkRuleSet')
scf = storage_client_factory(cmd.cli_ctx)
if kind is None:
logger.warning("The default kind for created storage account will change to 'StorageV2' from 'Storage' "
"in the future")
params = StorageAccountCreateParameters(sku=Sku(name=sku), kind=Kind(kind), location=location, tags=tags,
encryption=Encryption())
# TODO: remove this part when server side remove the constraint
if encryption_services is None:
params.encryption.services = {'blob': {}}
if custom_domain:
params.custom_domain = CustomDomain(name=custom_domain, use_sub_domain=None)
# Encryption
if encryption_services:
params.encryption = Encryption(services=encryption_services)
if encryption_key_source is not None:
params.encryption.key_source = encryption_key_source
if params.encryption.key_source and params.encryption.key_source == "Microsoft.Keyvault":
if params.encryption.key_vault_properties is None:
KeyVaultProperties = cmd.get_models('KeyVaultProperties')
params.encryption.key_vault_properties = KeyVaultProperties(key_name=encryption_key_name,
key_vault_uri=encryption_key_vault,
key_version=encryption_key_version)
if identity_type and 'UserAssigned' in identity_type and user_identity_id:
params.identity = Identity(type=identity_type, user_assigned_identities={user_identity_id: {}})
elif identity_type:
params.identity = Identity(type=identity_type)
if key_vault_user_identity_id is not None:
EncryptionIdentity = cmd.get_models('EncryptionIdentity')
params.encryption.encryption_identity = EncryptionIdentity(
encryption_user_assigned_identity=key_vault_user_identity_id)
if access_tier:
params.access_tier = AccessTier(access_tier)
if assign_identity:
params.identity = Identity(type='SystemAssigned')
if https_only is not None:
params.enable_https_traffic_only = https_only
if enable_hierarchical_namespace is not None:
params.is_hns_enabled = enable_hierarchical_namespace
AzureFilesIdentityBasedAuthentication = cmd.get_models('AzureFilesIdentityBasedAuthentication')
if enable_files_aadds is not None:
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AADDS' if enable_files_aadds else 'None')
if enable_files_adds is not None:
ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
if enable_files_adds: # enable AD
if not (domain_name and net_bios_domain_name and forest_name and domain_guid and domain_sid and
azure_storage_sid):
raise CLIError("To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
active_directory_properties = ActiveDirectoryProperties(domain_name=domain_name,
net_bios_domain_name=net_bios_domain_name,
forest_name=forest_name, domain_guid=domain_guid,
domain_sid=domain_sid,
azure_storage_sid=azure_storage_sid)
# TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AD',
active_directory_properties=active_directory_properties)
else: # disable AD
if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid: # pylint: disable=too-many-boolean-expressions
raise CLIError("To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
if enable_large_file_share:
LargeFileSharesState = cmd.get_models('LargeFileSharesState')
params.large_file_shares_state = LargeFileSharesState("Enabled")
if NetworkRuleSet and (bypass or default_action):
if bypass and not default_action:
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = NetworkRuleSet(bypass=bypass, default_action=default_action, ip_rules=None,
virtual_network_rules=None)
if encryption_key_type_for_table is not None or encryption_key_type_for_queue is not None:
EncryptionServices = cmd.get_models('EncryptionServices')
EncryptionService = cmd.get_models('EncryptionService')
params.encryption = Encryption()
params.encryption.services = EncryptionServices()
if encryption_key_type_for_table is not None:
table_encryption_service = EncryptionService(enabled=True, key_type=encryption_key_type_for_table)
params.encryption.services.table = table_encryption_service
if encryption_key_type_for_queue is not None:
queue_encryption_service = EncryptionService(enabled=True, key_type=encryption_key_type_for_queue)
params.encryption.services.queue = queue_encryption_service
if any([routing_choice, publish_microsoft_endpoints, publish_internet_endpoints]):
RoutingPreference = cmd.get_models('RoutingPreference')
params.routing_preference = RoutingPreference(
routing_choice=routing_choice,
publish_microsoft_endpoints=publish_microsoft_endpoints,
publish_internet_endpoints=publish_internet_endpoints
)
if allow_blob_public_access is not None:
params.allow_blob_public_access = allow_blob_public_access
if require_infrastructure_encryption:
params.encryption.require_infrastructure_encryption = require_infrastructure_encryption
if min_tls_version:
params.minimum_tls_version = min_tls_version
if allow_shared_key_access is not None:
params.allow_shared_key_access = allow_shared_key_access
if edge_zone is not None:
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
params.extended_location = ExtendedLocation(name=edge_zone,
type=ExtendedLocationTypes.EDGE_ZONE)
if key_expiration_period_in_days is not None:
KeyPolicy = cmd.get_models('KeyPolicy')
params.key_policy = KeyPolicy(key_expiration_period_in_days=key_expiration_period_in_days)
if sas_expiration_period:
SasPolicy = cmd.get_models('SasPolicy')
params.sas_policy = SasPolicy(sas_expiration_period=sas_expiration_period)
return scf.storage_accounts.begin_create(resource_group_name, account_name, params)
def list_storage_accounts(cmd, resource_group_name=None):
scf = storage_client_factory(cmd.cli_ctx)
if resource_group_name:
accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)
else:
accounts = scf.storage_accounts.list()
return list(accounts)
def show_storage_account_connection_string(cmd, resource_group_name, account_name, protocol='https', blob_endpoint=None,
file_endpoint=None, queue_endpoint=None, table_endpoint=None, sas_token=None,
key_name='primary'):
endpoint_suffix = cmd.cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={}'.format(protocol, endpoint_suffix)
if account_name is not None:
scf = cf_sa_for_keys(cmd.cli_ctx, None)
obj = scf.list_keys(resource_group_name, account_name, logging_enable=False) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
connection_string = '{}{}{}'.format(
connection_string,
';AccountName={}'.format(account_name),
';AccountKey={}'.format(keys[0] if key_name == 'primary' else keys[1])) # pylint: disable=no-member
connection_string = '{}{}'.format(connection_string,
';BlobEndpoint={}'.format(blob_endpoint) if blob_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';FileEndpoint={}'.format(file_endpoint) if file_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';QueueEndpoint={}'.format(queue_endpoint) if queue_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';TableEndpoint={}'.format(table_endpoint) if table_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';SharedAccessSignature={}'.format(sas_token) if sas_token else '')
return {'connectionString': connection_string}
def show_storage_account_usage(cmd, location):
scf = storage_client_factory(cmd.cli_ctx)
try:
client = scf.usages
except NotImplementedError:
client = scf.usage
return next((x for x in client.list_by_location(location) if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
def show_storage_account_usage_no_location(cmd):
scf = storage_client_factory(cmd.cli_ctx)
return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
def get_storage_account_properties(cli_ctx, account_id):
scf = storage_client_factory(cli_ctx)
from msrestazure.tools import parse_resource_id
result = parse_resource_id(account_id)
return scf.storage_accounts.get_properties(result['resource_group'], result['name'])
# pylint: disable=too-many-locals, too-many-statements, too-many-branches, too-many-boolean-expressions
def update_storage_account(cmd, instance, sku=None, tags=None, custom_domain=None, use_subdomain=None,
encryption_services=None, encryption_key_source=None, encryption_key_version=None,
encryption_key_name=None, encryption_key_vault=None,
access_tier=None, https_only=None, enable_files_aadds=None, assign_identity=False,
bypass=None, default_action=None, enable_large_file_share=None, enable_files_adds=None,
domain_name=None, net_bios_domain_name=None, forest_name=None, domain_guid=None,
domain_sid=None, azure_storage_sid=None, routing_choice=None,
publish_microsoft_endpoints=None, publish_internet_endpoints=None,
allow_blob_public_access=None, min_tls_version=None, allow_shared_key_access=None,
identity_type=None, user_identity_id=None, key_vault_user_identity_id=None,
sas_expiration_period=None, key_expiration_period_in_days=None):
StorageAccountUpdateParameters, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountUpdateParameters', 'Sku', 'CustomDomain', 'AccessTier', 'Identity', 'Encryption',
'NetworkRuleSet')
domain = instance.custom_domain
if custom_domain is not None:
domain = CustomDomain(name=custom_domain)
if use_subdomain is not None:
domain.use_sub_domain_name = use_subdomain == 'true'
encryption = instance.encryption
if not encryption and any((encryption_services, encryption_key_source, encryption_key_name,
encryption_key_vault, encryption_key_version is not None)):
encryption = Encryption()
if encryption_services:
encryption.services = encryption_services
if encryption_key_source:
encryption.key_source = encryption_key_source
if encryption.key_source and encryption.key_source == "Microsoft.Keyvault":
if encryption.key_vault_properties is None:
KeyVaultProperties = cmd.get_models('KeyVaultProperties')
encryption.key_vault_properties = KeyVaultProperties()
else:
if any([encryption_key_name, encryption_key_vault, encryption_key_version]):
raise ValueError(
'Specify `--encryption-key-source=Microsoft.Keyvault` to configure key vault properties.')
if encryption.key_vault_properties is not None:
encryption.key_vault_properties = None
if encryption_key_name:
encryption.key_vault_properties.key_name = encryption_key_name
if encryption_key_vault:
encryption.key_vault_properties.key_vault_uri = encryption_key_vault
if encryption_key_version is not None:
encryption.key_vault_properties.key_version = encryption_key_version
params = StorageAccountUpdateParameters(
sku=Sku(name=sku) if sku is not None else instance.sku,
tags=tags if tags is not None else instance.tags,
custom_domain=domain,
encryption=encryption,
access_tier=AccessTier(access_tier) if access_tier is not None else instance.access_tier,
enable_https_traffic_only=https_only if https_only is not None else instance.enable_https_traffic_only
)
if identity_type and 'UserAssigned' in identity_type and user_identity_id:
user_assigned_identities = {user_identity_id: {}}
if instance.identity.user_assigned_identities:
for item in instance.identity.user_assigned_identities:
if item != user_identity_id:
user_assigned_identities[item] = None
params.identity = Identity(type=identity_type, user_assigned_identities=user_assigned_identities)
elif identity_type:
params.identity = Identity(type=identity_type)
if key_vault_user_identity_id is not None:
EncryptionIdentity = cmd.get_models('EncryptionIdentity')
params.encryption.encryption_identity = EncryptionIdentity(
encryption_user_assigned_identity=key_vault_user_identity_id)
AzureFilesIdentityBasedAuthentication = cmd.get_models('AzureFilesIdentityBasedAuthentication')
if enable_files_aadds is not None:
if enable_files_aadds: # enable AADDS
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if origin_storage_account.azure_files_identity_based_authentication and \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
raise CLIError("The Storage account already enabled ActiveDirectoryDomainServicesForFile, "
"please disable it by running this cmdlets with \"--enable-files-adds false\" "
"before enable AzureActiveDirectoryDomainServicesForFile.")
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AADDS' if enable_files_aadds else 'None')
else: # Only disable AADDS and keep others unchanged
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if not origin_storage_account.azure_files_identity_based_authentication or \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options\
== 'AADDS':
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
else:
params.azure_files_identity_based_authentication = \
origin_storage_account.azure_files_identity_based_authentication
if enable_files_adds is not None:
ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
if enable_files_adds: # enable AD
if not(domain_name and net_bios_domain_name and forest_name and domain_guid and domain_sid and
azure_storage_sid):
raise CLIError("To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if origin_storage_account.azure_files_identity_based_authentication and \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options \
== 'AADDS':
raise CLIError("The Storage account already enabled AzureActiveDirectoryDomainServicesForFile, "
"please disable it by running this cmdlets with \"--enable-files-aadds false\" "
"before enable ActiveDirectoryDomainServicesForFile.")
active_directory_properties = ActiveDirectoryProperties(domain_name=domain_name,
net_bios_domain_name=net_bios_domain_name,
forest_name=forest_name, domain_guid=domain_guid,
domain_sid=domain_sid,
azure_storage_sid=azure_storage_sid)
# TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AD',
active_directory_properties=active_directory_properties)
else: # disable AD
if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid:
raise CLIError("To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
# Only disable AD and keep others unchanged
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if not origin_storage_account.azure_files_identity_based_authentication or \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
else:
params.azure_files_identity_based_authentication = \
origin_storage_account.azure_files_identity_based_authentication
if assign_identity:
params.identity = Identity(type='SystemAssigned')
if enable_large_file_share:
LargeFileSharesState = cmd.get_models('LargeFileSharesState')
params.large_file_shares_state = LargeFileSharesState("Enabled")
if NetworkRuleSet:
acl = instance.network_rule_set
if acl:
if bypass:
acl.bypass = bypass
if default_action:
acl.default_action = default_action
elif default_action:
acl = NetworkRuleSet(bypass=bypass, virtual_network_rules=None, ip_rules=None,
default_action=default_action)
elif bypass:
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = acl
if hasattr(params, 'routing_preference') and any([routing_choice, publish_microsoft_endpoints,
publish_internet_endpoints]):
if params.routing_preference is None:
RoutingPreference = cmd.get_models('RoutingPreference')
params.routing_preference = RoutingPreference()
if routing_choice is not None:
params.routing_preference.routing_choice = routing_choice
if publish_microsoft_endpoints is not None:
params.routing_preference.publish_microsoft_endpoints = publish_microsoft_endpoints
if publish_internet_endpoints is not None:
params.routing_preference.publish_internet_endpoints = publish_internet_endpoints
if allow_blob_public_access is not None:
params.allow_blob_public_access = allow_blob_public_access
if min_tls_version:
params.minimum_tls_version = min_tls_version
if allow_shared_key_access is not None:
params.allow_shared_key_access = allow_shared_key_access
if key_expiration_period_in_days is not None:
KeyPolicy = cmd.get_models('KeyPolicy')
params.key_policy = KeyPolicy(key_expiration_period_in_days=key_expiration_period_in_days)
if sas_expiration_period:
SasPolicy = cmd.get_models('SasPolicy')
params.sas_policy = SasPolicy(sas_expiration_period=sas_expiration_period)
return params
def list_network_rules(client, resource_group_name, account_name):
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
delattr(rules, 'bypass')
delattr(rules, 'default_action')
return rules
def add_network_rule(cmd, client, resource_group_name, account_name, action='Allow', subnet=None,
vnet_name=None, ip_address=None, tenant_id=None, resource_id=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
from msrestazure.tools import is_valid_resource_id
if not is_valid_resource_id(subnet):
raise CLIError("Expected fully qualified resource ID: got '{}'".format(subnet))
VirtualNetworkRule = cmd.get_models('VirtualNetworkRule')
if not rules.virtual_network_rules:
rules.virtual_network_rules = []
rules.virtual_network_rules = [r for r in rules.virtual_network_rules
if r.virtual_network_resource_id.lower() != subnet.lower()]
rules.virtual_network_rules.append(VirtualNetworkRule(virtual_network_resource_id=subnet, action=action))
if ip_address:
IpRule = cmd.get_models('IPRule')
if not rules.ip_rules:
rules.ip_rules = []
rules.ip_rules = [r for r in rules.ip_rules if r.ip_address_or_range != ip_address]
rules.ip_rules.append(IpRule(ip_address_or_range=ip_address, action=action))
if resource_id:
ResourceAccessRule = cmd.get_models('ResourceAccessRule')
if not rules.resource_access_rules:
rules.resource_access_rules = []
rules.resource_access_rules = [r for r in rules.resource_access_rules if r.resource_id !=
resource_id or r.tenant_id != tenant_id]
rules.resource_access_rules.append(ResourceAccessRule(tenant_id=tenant_id, resource_id=resource_id))
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def remove_network_rule(cmd, client, resource_group_name, account_name, ip_address=None, subnet=None,
vnet_name=None, tenant_id=None, resource_id=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
rules.virtual_network_rules = [x for x in rules.virtual_network_rules
if not x.virtual_network_resource_id.endswith(subnet)]
if ip_address:
rules.ip_rules = [x for x in rules.ip_rules if x.ip_address_or_range != ip_address]
if resource_id:
rules.resource_access_rules = [x for x in rules.resource_access_rules if
not (x.tenant_id == tenant_id and x.resource_id == resource_id)]
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def _update_private_endpoint_connection_status(cmd, client, resource_group_name, account_name,
private_endpoint_connection_name, is_approved=True, description=None):
from azure.core.exceptions import HttpResponseError
PrivateEndpointServiceConnectionStatus = cmd.get_models('PrivateEndpointServiceConnectionStatus')
private_endpoint_connection = client.get(resource_group_name=resource_group_name, account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name)
old_status = private_endpoint_connection.private_link_service_connection_state.status
new_status = PrivateEndpointServiceConnectionStatus.approved \
if is_approved else PrivateEndpointServiceConnectionStatus.rejected
private_endpoint_connection.private_link_service_connection_state.status = new_status
private_endpoint_connection.private_link_service_connection_state.description = description
try:
return client.put(resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
properties=private_endpoint_connection)
except HttpResponseError as ex:
if ex.response.status_code == 400:
if new_status == "Approved" and old_status == "Rejected":
raise CLIError(ex.response, "You cannot approve the connection request after rejection. Please create "
"a new connection for approval.")
if new_status == "Approved" and old_status == "Approved":
raise CLIError(ex.response, "Your connection is already approved. No need to approve again.")
raise ex
def approve_private_endpoint_connection(cmd, client, resource_group_name, account_name,
private_endpoint_connection_name, description=None):
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name=resource_group_name, account_name=account_name, is_approved=True,
private_endpoint_connection_name=private_endpoint_connection_name, description=description
)
def reject_private_endpoint_connection(cmd, client, resource_group_name, account_name, private_endpoint_connection_name,
description=None):
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name=resource_group_name, account_name=account_name, is_approved=False,
private_endpoint_connection_name=private_endpoint_connection_name, description=description
)
def create_management_policies(cmd, client, resource_group_name, account_name, policy):
if os.path.exists(policy):
policy = get_file_json(policy)
else:
policy = shell_safe_json_parse(policy)
ManagementPolicyName = cmd.get_models('ManagementPolicyName')
management_policy = cmd.get_models('ManagementPolicy')(policy=policy)
return client.create_or_update(resource_group_name, account_name,
ManagementPolicyName.DEFAULT, properties=management_policy)
def get_management_policy(cmd, client, resource_group_name, account_name):
ManagementPolicyName = cmd.get_models('ManagementPolicyName')
return client.get(resource_group_name, account_name, ManagementPolicyName.DEFAULT)
def delete_management_policy(cmd, client, resource_group_name, account_name):
ManagementPolicyName = cmd.get_models('ManagementPolicyName')
return client.delete(resource_group_name, account_name, ManagementPolicyName.DEFAULT)
def update_management_policies(cmd, client, resource_group_name, account_name, parameters=None):
ManagementPolicyName = cmd.get_models('ManagementPolicyName')
return client.create_or_update(resource_group_name, account_name,
ManagementPolicyName.DEFAULT, properties=parameters)
# TODO: support updating other properties besides 'enable_change_feed,delete_retention_policy'
def update_blob_service_properties(cmd, instance, enable_change_feed=None, change_feed_retention_days=None,
enable_delete_retention=None, delete_retention_days=None,
enable_restore_policy=None, restore_days=None,
enable_versioning=None, enable_container_delete_retention=None,
container_delete_retention_days=None, default_service_version=None):
if enable_change_feed is not None:
if enable_change_feed is False:
change_feed_retention_days = None
instance.change_feed = cmd.get_models('ChangeFeed')(
enabled=enable_change_feed, retention_in_days=change_feed_retention_days)
if enable_container_delete_retention is not None:
if enable_container_delete_retention is False:
container_delete_retention_days = None
instance.container_delete_retention_policy = cmd.get_models('DeleteRetentionPolicy')(
enabled=enable_container_delete_retention, days=container_delete_retention_days)
if enable_delete_retention is not None:
if enable_delete_retention is False:
delete_retention_days = None
instance.delete_retention_policy = cmd.get_models('DeleteRetentionPolicy')(
enabled=enable_delete_retention, days=delete_retention_days)
if enable_restore_policy is not None:
if enable_restore_policy is False:
restore_days = None
instance.restore_policy = cmd.get_models('RestorePolicyProperties')(
enabled=enable_restore_policy, days=restore_days)
if enable_versioning is not None:
instance.is_versioning_enabled = enable_versioning
if default_service_version is not None:
instance.default_service_version = default_service_version
return instance
def update_file_service_properties(cmd, instance, enable_delete_retention=None,
delete_retention_days=None, enable_smb_multichannel=None):
from azure.cli.core.azclierror import ValidationError
params = {}
# set delete retention policy according input
if enable_delete_retention is not None:
if enable_delete_retention is False:
delete_retention_days = None
instance.share_delete_retention_policy = cmd.get_models('DeleteRetentionPolicy')(
enabled=enable_delete_retention, days=delete_retention_days)
# If already enabled, only update days
if enable_delete_retention is None and delete_retention_days is not None:
if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled:
instance.share_delete_retention_policy.days = delete_retention_days
else:
raise ValidationError(
"Delete Retention Policy hasn't been enabled, and you cannot set delete retention days. "
"Please set --enable-delete-retention as true to enable Delete Retention Policy.")
# Fix the issue in server when delete_retention_policy.enabled=False, the returned days is 0
# TODO: remove it when server side return null not 0 for days
if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled is False:
instance.share_delete_retention_policy.days = None
if instance.share_delete_retention_policy:
params['share_delete_retention_policy'] = instance.share_delete_retention_policy
# set protocol settings
if enable_smb_multichannel is not None:
instance.protocol_settings = cmd.get_models('ProtocolSettings')()
instance.protocol_settings.smb = cmd.get_models('SmbSetting')(
multichannel=cmd.get_models('Multichannel')(enabled=enable_smb_multichannel))
if instance.protocol_settings.smb.multichannel:
params['protocol_settings'] = instance.protocol_settings
return params
def create_encryption_scope(cmd, client, resource_group_name, account_name, encryption_scope_name,
key_source=None, key_uri=None, require_infrastructure_encryption=None):
EncryptionScope = cmd.get_models('EncryptionScope')
if key_source:
encryption_scope = EncryptionScope(source=key_source)
if key_uri:
EncryptionScopeKeyVaultProperties = cmd.get_models('EncryptionScopeKeyVaultProperties')
encryption_scope.key_vault_properties = EncryptionScopeKeyVaultProperties(key_uri=key_uri)
if require_infrastructure_encryption is not None:
encryption_scope.require_infrastructure_encryption = require_infrastructure_encryption
return client.put(resource_group_name=resource_group_name, account_name=account_name,
encryption_scope_name=encryption_scope_name, encryption_scope=encryption_scope)
def update_encryption_scope(cmd, client, resource_group_name, account_name, encryption_scope_name,
key_source=None, key_uri=None, state=None):
EncryptionScope, EncryptionScopeState = cmd.get_models('EncryptionScope', 'EncryptionScopeState')
encryption_scope = EncryptionScope()
if key_source:
encryption_scope.source = key_source
if key_uri:
EncryptionScopeKeyVaultProperties = cmd.get_models('EncryptionScopeKeyVaultProperties')
encryption_scope.key_vault_properties = EncryptionScopeKeyVaultProperties(key_uri=key_uri)
if state is not None:
encryption_scope.state = EncryptionScopeState(state)
return client.patch(resource_group_name=resource_group_name, account_name=account_name,
encryption_scope_name=encryption_scope_name, encryption_scope=encryption_scope)
# pylint: disable=no-member
def create_or_policy(cmd, client, account_name, resource_group_name=None, properties=None, source_account=None,
destination_account=None, policy_id="default", rule_id=None, source_container=None,
destination_container=None, min_creation_time=None, prefix_match=None):
from azure.core.exceptions import HttpResponseError
ObjectReplicationPolicy = cmd.get_models('ObjectReplicationPolicy')
if properties is None:
rules = []
ObjectReplicationPolicyRule, ObjectReplicationPolicyFilter = \
cmd.get_models('ObjectReplicationPolicyRule', 'ObjectReplicationPolicyFilter')
if source_container and destination_container:
rule = ObjectReplicationPolicyRule(
rule_id=rule_id,
source_container=source_container,
destination_container=destination_container,
filters=ObjectReplicationPolicyFilter(prefix_match=prefix_match, min_creation_time=min_creation_time)
)
rules.append(rule)
or_policy = ObjectReplicationPolicy(source_account=source_account,
destination_account=destination_account,
rules=rules)
else:
or_policy = properties
try:
return client.create_or_update(resource_group_name=resource_group_name, account_name=account_name,
object_replication_policy_id=policy_id, properties=or_policy)
except HttpResponseError as ex:
if ex.error.code == 'InvalidRequestPropertyValue' and policy_id == 'default' \
and account_name == or_policy.source_account:
raise CLIError(
'ValueError: Please specify --policy-id with auto-generated policy id value on destination account.')
def update_or_policy(client, parameters, resource_group_name, account_name, object_replication_policy_id=None,
properties=None, source_account=None, destination_account=None, ):
if source_account is not None:
parameters.source_account = source_account
if destination_account is not None:
parameters.destination_account = destination_account
if properties is not None:
parameters = properties
if "policyId" in properties.keys() and properties["policyId"]:
object_replication_policy_id = properties["policyId"]
return client.create_or_update(resource_group_name=resource_group_name, account_name=account_name,
object_replication_policy_id=object_replication_policy_id, properties=parameters)
def get_or_policy(client, resource_group_name, account_name, policy_id='default'):
return client.get(resource_group_name=resource_group_name, account_name=account_name,
object_replication_policy_id=policy_id)
def add_or_rule(cmd, client, resource_group_name, account_name, policy_id,
source_container, destination_container, min_creation_time=None, prefix_match=None):
"""
Initialize rule for or policy
"""
policy_properties = client.get(resource_group_name, account_name, policy_id)
ObjectReplicationPolicyRule, ObjectReplicationPolicyFilter = \
cmd.get_models('ObjectReplicationPolicyRule', 'ObjectReplicationPolicyFilter')
new_or_rule = ObjectReplicationPolicyRule(
source_container=source_container,
destination_container=destination_container,
filters=ObjectReplicationPolicyFilter(prefix_match=prefix_match, min_creation_time=min_creation_time)
)
policy_properties.rules.append(new_or_rule)
return client.create_or_update(resource_group_name, account_name, policy_id, policy_properties)
def remove_or_rule(client, resource_group_name, account_name, policy_id, rule_id):
or_policy = client.get(resource_group_name=resource_group_name,
account_name=account_name,
object_replication_policy_id=policy_id)
rule = find_child_item(or_policy, rule_id, path='rules', key_path='rule_id')
or_policy.rules.remove(rule)
return client.create_or_update(resource_group_name, account_name, policy_id, or_policy)
def get_or_rule(client, resource_group_name, account_name, policy_id, rule_id):
policy_properties = client.get(resource_group_name, account_name, policy_id)
for rule in policy_properties.rules:
if rule.rule_id == rule_id:
return rule
raise CLIError("{} does not exist.".format(rule_id))
def list_or_rules(client, resource_group_name, account_name, policy_id):
policy_properties = client.get(resource_group_name, account_name, policy_id)
return policy_properties.rules
def update_or_rule(client, resource_group_name, account_name, policy_id, rule_id, source_container=None,
destination_container=None, min_creation_time=None, prefix_match=None):
policy_properties = client.get(resource_group_name, account_name, policy_id)
for i, rule in enumerate(policy_properties.rules):
if rule.rule_id == rule_id:
if destination_container is not None:
policy_properties.rules[i].destination_container = destination_container
if source_container is not None:
policy_properties.rules[i].source_container = source_container
if min_creation_time is not None:
policy_properties.rules[i].filters.min_creation_time = min_creation_time
if prefix_match is not None:
policy_properties.rules[i].filters.prefix_match = prefix_match
client.create_or_update(resource_group_name=resource_group_name, account_name=account_name,
object_replication_policy_id=policy_id, properties=policy_properties)
return get_or_rule(client, resource_group_name=resource_group_name, account_name=account_name,
policy_id=policy_id, rule_id=rule_id)
| mit |
danielshahaf/offlineimap | offlineimap/repository/IMAP.py | 4 | 15123 | # IMAP repository support
# Copyright (C) 2002-2011 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from offlineimap.repository.Base import BaseRepository
from offlineimap import folder, imaputil, imapserver, OfflineImapError
from offlineimap.folder.UIDMaps import MappedIMAPFolder
from offlineimap.threadutil import ExitNotifyThread
from threading import Event
import os
from sys import exc_info
import netrc
import errno
class IMAPRepository(BaseRepository):
def __init__(self, reposname, account):
"""Initialize an IMAPRepository object."""
BaseRepository.__init__(self, reposname, account)
# self.ui is being set by the BaseRepository
self._host = None
self.imapserver = imapserver.IMAPServer(self)
self.folders = None
if self.getconf('sep', None):
self.ui.info("The 'sep' setting is being ignored for IMAP "
"repository '%s' (it's autodetected)" % self)
def startkeepalive(self):
keepalivetime = self.getkeepalive()
if not keepalivetime: return
self.kaevent = Event()
self.kathread = ExitNotifyThread(target = self.imapserver.keepalive,
name = "Keep alive " + self.getname(),
args = (keepalivetime, self.kaevent))
self.kathread.setDaemon(1)
self.kathread.start()
def stopkeepalive(self):
if not hasattr(self, 'kaevent'):
# Keepalive is not active.
return
self.kaevent.set()
del self.kathread
del self.kaevent
def holdordropconnections(self):
if not self.getholdconnectionopen():
self.dropconnections()
def dropconnections(self):
self.imapserver.close()
def getholdconnectionopen(self):
if self.getidlefolders():
return 1
return self.getconfboolean("holdconnectionopen", 0)
def getkeepalive(self):
num = self.getconfint("keepalive", 0)
if num == 0 and self.getidlefolders():
return 29*60
else:
return num
def getsep(self):
"""Return the folder separator for the IMAP repository
This requires that self.imapserver has been initialized with an
acquireconnection() or it will still be `None`"""
assert self.imapserver.delim != None, "'%s' " \
"repository called getsep() before the folder separator was " \
"queried from the server" % self
return self.imapserver.delim
def gethost(self):
"""Return the configured hostname to connect to
:returns: hostname as string or throws Exception"""
if self._host: # use cached value if possible
return self._host
# 1) check for remotehosteval setting
if self.config.has_option(self.getsection(), 'remotehosteval'):
host = self.getconf('remotehosteval')
try:
host = self.localeval.eval(host)
except Exception as e:
raise OfflineImapError("remotehosteval option for repository "\
"'%s' failed:\n%s" % (self, e),
OfflineImapError.ERROR.REPO)
if host:
self._host = host
return self._host
# 2) check for plain remotehost setting
host = self.getconf('remotehost', None)
if host != None:
self._host = host
return self._host
# no success
raise OfflineImapError("No remote host for repository "\
"'%s' specified." % self,
OfflineImapError.ERROR.REPO)
def get_remote_identity(self):
"""
Remote identity is used for certain SASL mechanisms
(currently -- PLAIN) to inform server about the ID
we want to authorize as instead of our login name.
"""
return self.getconf('remote_identity', default=None)
def get_auth_mechanisms(self):
supported = ["GSSAPI", "CRAM-MD5", "PLAIN", "LOGIN"]
# Mechanisms are ranged from the strongest to the
# weakest ones.
# TODO: we need DIGEST-MD5, it must come before CRAM-MD5
# TODO: due to the chosen-plaintext resistance.
default = ["GSSAPI", "CRAM-MD5", "PLAIN", "LOGIN"]
mechs = self.getconflist('auth_mechanisms', r',\s*',
default)
for m in mechs:
if m not in supported:
raise OfflineImapError("Repository %s: " % self + \
"unknown authentication mechanism '%s'" % m,
OfflineImapError.ERROR.REPO)
self.ui.debug('imap', "Using authentication mechanisms %s" % mechs)
return mechs
def getuser(self):
user = None
localeval = self.localeval
if self.config.has_option(self.getsection(), 'remoteusereval'):
user = self.getconf('remoteusereval')
if user != None:
return localeval.eval(user)
user = self.getconf('remoteuser')
if user != None:
return user
try:
netrcentry = netrc.netrc().authenticators(self.gethost())
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
else:
if netrcentry:
return netrcentry[0]
try:
netrcentry = netrc.netrc('/etc/netrc').authenticators(self.gethost())
except IOError as inst:
if inst.errno not in (errno.ENOENT, errno.EACCES):
raise
else:
if netrcentry:
return netrcentry[0]
def getport(self):
port = None
if self.config.has_option(self.getsection(), 'remoteporteval'):
port = self.getconf('remoteporteval')
if port != None:
return self.localeval.eval(port)
return self.getconfint('remoteport', None)
def getssl(self):
return self.getconfboolean('ssl', 0)
def getsslclientcert(self):
return self.getconf('sslclientcert', None)
def getsslclientkey(self):
return self.getconf('sslclientkey', None)
def getsslcacertfile(self):
"""Return the absolute path of the CA certfile to use, if any"""
cacertfile = self.getconf('sslcacertfile', None)
if cacertfile is None:
return None
cacertfile = os.path.expanduser(cacertfile)
cacertfile = os.path.abspath(cacertfile)
if not os.path.isfile(cacertfile):
raise SyntaxWarning("CA certfile for repository '%s' could "
"not be found. No such file: '%s'" \
% (self.name, cacertfile))
return cacertfile
def getsslversion(self):
return self.getconf('ssl_version', None)
def get_ssl_fingerprint(self):
return self.getconf('cert_fingerprint', None)
def getpreauthtunnel(self):
return self.getconf('preauthtunnel', None)
def gettransporttunnel(self):
return self.getconf('transporttunnel', None)
def getreference(self):
return self.getconf('reference', '')
def getidlefolders(self):
localeval = self.localeval
return localeval.eval(self.getconf('idlefolders', '[]'))
def getmaxconnections(self):
num1 = len(self.getidlefolders())
num2 = self.getconfint('maxconnections', 1)
return max(num1, num2)
def getexpunge(self):
return self.getconfboolean('expunge', 1)
def getpassword(self):
"""Return the IMAP password for this repository.
It tries to get passwords in the following order:
1. evaluate Repository 'remotepasseval'
2. read password from Repository 'remotepass'
3. read password from file specified in Repository 'remotepassfile'
4. read password from ~/.netrc
5. read password from /etc/netrc
On success we return the password.
If all strategies fail we return None.
"""
# 1. evaluate Repository 'remotepasseval'
passwd = self.getconf('remotepasseval', None)
if passwd != None:
return self.localeval.eval(passwd)
# 2. read password from Repository 'remotepass'
password = self.getconf('remotepass', None)
if password != None:
return password
# 3. read password from file specified in Repository 'remotepassfile'
passfile = self.getconf('remotepassfile', None)
if passfile != None:
fd = open(os.path.expanduser(passfile))
password = fd.readline().strip()
fd.close()
return password
# 4. read password from ~/.netrc
try:
netrcentry = netrc.netrc().authenticators(self.gethost())
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
else:
if netrcentry:
user = self.getconf('remoteuser')
if user == None or user == netrcentry[0]:
return netrcentry[2]
# 5. read password from /etc/netrc
try:
netrcentry = netrc.netrc('/etc/netrc').authenticators(self.gethost())
except IOError as inst:
if inst.errno not in (errno.ENOENT, errno.EACCES):
raise
else:
if netrcentry:
user = self.getconf('remoteuser')
if user == None or user == netrcentry[0]:
return netrcentry[2]
# no strategy yielded a password!
return None
def getfolder(self, foldername):
return self.getfoldertype()(self.imapserver, foldername, self)
def getfoldertype(self):
return folder.IMAP.IMAPFolder
def connect(self):
imapobj = self.imapserver.acquireconnection()
self.imapserver.releaseconnection(imapobj)
def forgetfolders(self):
self.folders = None
def getfolders(self):
if self.folders != None:
return self.folders
retval = []
imapobj = self.imapserver.acquireconnection()
# check whether to list all folders, or subscribed only
listfunction = imapobj.list
if self.getconfboolean('subscribedonly', False):
listfunction = imapobj.lsub
try:
listresult = listfunction(directory = self.imapserver.reference)[1]
finally:
self.imapserver.releaseconnection(imapobj)
for string in listresult:
if string == None or \
(isinstance(string, basestring) and string == ''):
# Bug in imaplib: empty strings in results from
# literals. TODO: still relevant?
continue
flags, delim, name = imaputil.imapsplit(string)
flaglist = [x.lower() for x in imaputil.flagsplit(flags)]
if '\\noselect' in flaglist:
continue
foldername = imaputil.dequote(name)
retval.append(self.getfoldertype()(self.imapserver, foldername,
self))
# Add all folderincludes
if len(self.folderincludes):
imapobj = self.imapserver.acquireconnection()
try:
for foldername in self.folderincludes:
try:
imapobj.select(foldername, readonly = True)
except OfflineImapError as e:
# couldn't select this folderinclude, so ignore folder.
if e.severity > OfflineImapError.ERROR.FOLDER:
raise
self.ui.error(e, exc_info()[2],
'Invalid folderinclude:')
continue
retval.append(self.getfoldertype()(self.imapserver,
foldername,
self))
finally:
self.imapserver.releaseconnection(imapobj)
if self.foldersort is None:
# default sorting by case insensitive transposed name
retval.sort(key=lambda x: str.lower(x.getvisiblename()))
else:
# do foldersort in a python3-compatible way
# http://bytes.com/topic/python/answers/844614-python-3-sorting-comparison-function
def cmp2key(mycmp):
"""Converts a cmp= function into a key= function
We need to keep cmp functions for backward compatibility"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __cmp__(self, other):
return mycmp(self.obj.getvisiblename(), other.obj.getvisiblename())
return K
retval.sort(key=cmp2key(self.foldersort))
self.folders = retval
return self.folders
def makefolder(self, foldername):
"""Create a folder on the IMAP server
This will not update the list cached in :meth:`getfolders`. You
will need to invoke :meth:`forgetfolders` to force new caching
when you are done creating folders yourself.
:param foldername: Full path of the folder to be created."""
if self.getreference():
foldername = self.getreference() + self.getsep() + foldername
if not foldername: # Create top level folder as folder separator
foldername = self.getsep()
self.ui.makefolder(self, foldername)
if self.account.dryrun:
return
imapobj = self.imapserver.acquireconnection()
try:
result = imapobj.create(foldername)
if result[0] != 'OK':
raise OfflineImapError("Folder '%s'[%s] could not be created. "
"Server responded: %s" % \
(foldername, self, str(result)),
OfflineImapError.ERROR.FOLDER)
finally:
self.imapserver.releaseconnection(imapobj)
class MappedIMAPRepository(IMAPRepository):
def getfoldertype(self):
return MappedIMAPFolder
| gpl-2.0 |
wfxiang08/sqlalchemy | test/orm/test_immediate_load.py | 33 | 1693 | """basic tests of lazy loaded attributes"""
from sqlalchemy import testing
from sqlalchemy.orm import mapper, relationship, create_session, immediateload
from sqlalchemy.testing import eq_
from test.orm import _fixtures
class ImmediateTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
def test_basic_option(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address)
})
sess = create_session()
l = sess.query(User).options(immediateload(User.addresses)).filter(users.c.id==7).all()
eq_(len(sess.identity_map), 2)
sess.close()
eq_(
[User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])],
l
)
def test_basic(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address, lazy='immediate')
})
sess = create_session()
l = sess.query(User).filter(users.c.id==7).all()
eq_(len(sess.identity_map), 2)
sess.close()
eq_(
[User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])],
l
)
| mit |
garyfeng/pybrain | pybrain/rl/environments/simple/tasks.py | 26 | 1027 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from pybrain.rl.environments import EpisodicTask
class MinimizeTask(EpisodicTask):
def __init__(self, environment):
EpisodicTask.__init__(self, environment)
self.N = 15
self.t = 0
self.state = [0.0] * environment.dim
self.action = [0.0] * environment.dim
def reset(self):
EpisodicTask.reset(self)
self.t = 0
def isFinished(self):
if self.t >= self.N:
self.t = 0
return True
else:
self.t += 1
return False
def getObservation(self):
self.state = EpisodicTask.getObservation(self)
return self.state
def performAction(self, action):
EpisodicTask.performAction(self, action)
self.action = action
def getReward(self):
# sleep(0.01)
# print(self.state, self.action)
reward = self.env.f([s + 0.1 * a for s, a in zip(self.state, self.action)])
return - sum(reward)
| bsd-3-clause |
Marcdnd/electrum-cesc | gui/qt/address_list.py | 1 | 8918 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import webbrowser
from util import *
from electrum_cesc.i18n import _
from electrum_cesc.util import block_explorer_URL, format_satoshis, format_time
from electrum_cesc.plugins import run_hook
from electrum_cesc.bitcoin import is_address
class AddressList(MyTreeWidget):
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [ _('Address'), _('Label'), _('Balance'), _('Tx')], 1)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
def on_update(self):
self.wallet = self.parent.wallet
self.accounts_expanded = self.wallet.storage.get('accounts_expanded', {})
item = self.currentItem()
current_address = item.data(0, Qt.UserRole).toString() if item else None
self.clear()
accounts = self.wallet.get_accounts()
if self.parent.current_account is None:
account_items = sorted(accounts.items())
else:
account_items = [(self.parent.current_account, accounts.get(self.parent.current_account))]
for k, account in account_items:
if len(accounts) > 1:
name = self.wallet.get_account_name(k)
c, u, x = self.wallet.get_account_balance(k)
account_item = QTreeWidgetItem([ name, '', self.parent.format_amount(c + u + x), ''])
account_item.setData(0, Qt.UserRole, k)
self.addTopLevelItem(account_item)
account_item.setExpanded(self.accounts_expanded.get(k, True))
else:
account_item = self
sequences = [0,1] if account.has_change() else [0]
for is_change in sequences:
if len(sequences) > 1:
name = _("Receiving") if not is_change else _("Change")
seq_item = QTreeWidgetItem( [ name, '', '', '', ''] )
account_item.addChild(seq_item)
if not is_change:
seq_item.setExpanded(True)
else:
seq_item = account_item
used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] )
used_flag = False
addr_list = account.get_addresses(is_change)
for address in addr_list:
num = len(self.wallet.history.get(address,[]))
is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address,'')
c, u, x = self.wallet.get_addr_balance(address)
balance = self.parent.format_amount(c + u + x)
address_item = QTreeWidgetItem([address, label, balance, "%d"%num])
address_item.setFont(0, QFont(MONOSPACE_FONT))
address_item.setData(0, Qt.UserRole, address)
address_item.setData(0, Qt.UserRole+1, True) # label can be edited
if self.wallet.is_frozen(address):
address_item.setBackgroundColor(0, QColor('lightblue'))
if self.wallet.is_beyond_limit(address, account, is_change):
address_item.setBackgroundColor(0, QColor('red'))
if is_used:
if not used_flag:
seq_item.insertChild(0, used_item)
used_flag = True
used_item.addChild(address_item)
else:
seq_item.addChild(address_item)
if address == current_address:
self.setCurrentItem(address_item)
# add utxos
utxos = self.wallet.get_addr_utxo(address)
for x in utxos:
h = x.get('prevout_hash')
s = h + ":%d"%x.get('prevout_n')
label = self.wallet.get_label(h)
utxo_item = QTreeWidgetItem([s, label, self.parent.format_amount(x['value'])])
utxo_item.setFont(0, QFont(MONOSPACE_FONT))
address_item.addChild(utxo_item)
def create_menu(self, position):
from electrum_cesc.wallet import Multisig_Wallet
is_multisig = isinstance(self.wallet, Multisig_Wallet)
selected = self.selectedItems()
multi_select = len(selected) > 1
addrs = [unicode(item.text(0)) for item in selected]
if not addrs:
return
if not multi_select:
item = self.itemAt(position)
col = self.currentColumn()
if not item:
return
addr = addrs[0]
if not is_address(addr):
k = str(item.data(0,32).toString())
if k:
self.create_account_menu(position, k, item)
else:
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
column_title = self.headerItem().text(col)
menu.addAction(_("Copy %s")%column_title, lambda: self.parent.app.clipboard().setText(item.text(col)))
if col in self.editable_columns:
menu.addAction(_("Edit %s")%column_title, lambda: self.editItem(item, col))
menu.addAction(_("Request payment"), lambda: self.parent.receive_at(addr))
menu.addAction(_('History'), lambda: self.parent.show_address(addr))
menu.addAction(_('Public Keys'), lambda: self.parent.show_public_keys(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.parent.show_private_key(addr))
if not is_multisig and not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.parent.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.parent.encrypt_message(addr))
if self.wallet.is_imported(addr):
menu.addAction(_("Remove from wallet"), lambda: self.parent.delete_imported_key(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(addr_URL))
if any(not self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Freeze"), lambda: self.parent.set_frozen_state(addrs, True))
if any(self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Unfreeze"), lambda: self.parent.set_frozen_state(addrs, False))
def can_send(addr):
return not self.wallet.is_frozen(addr) and sum(self.wallet.get_addr_balance(addr)[:2])
if any(can_send(addr) for addr in addrs):
menu.addAction(_("Send From"), lambda: self.parent.send_from_addresses(addrs))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.viewport().mapToGlobal(position))
def create_account_menu(self, position, k, item):
menu = QMenu()
exp = item.isExpanded()
menu.addAction(_("Minimize") if exp else _("Maximize"), lambda: self.set_account_expanded(item, k, not exp))
menu.addAction(_("Rename"), lambda: self.parent.edit_account_label(k))
if self.wallet.seed_version > 4:
menu.addAction(_("View details"), lambda: self.parent.show_account_details(k))
menu.exec_(self.viewport().mapToGlobal(position))
def set_account_expanded(self, item, k, b):
item.setExpanded(b)
self.accounts_expanded[k] = b
def on_close(self):
self.wallet.storage.put('accounts_expanded', self.accounts_expanded)
| mit |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.5/tests/regressiontests/generic_views/list.py | 41 | 9756 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.views.generic.base import View
from django.utils.encoding import force_str
from .models import Author, Artist
class ListViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertTrue(isinstance(res.context['view'], View))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| apache-2.0 |
kurtrr/open-numismat | OpenNumismat/TabView.py | 1 | 9619 | from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import *
from OpenNumismat.PageView import PageView
from OpenNumismat.Tools.Gui import createIcon
class TabBar(QTabBar):
doubleClicked = pyqtSignal(int)
def __init__(self, parent):
super(TabBar, self).__init__(parent)
self.setContextMenuPolicy(Qt.CustomContextMenu)
def mouseDoubleClickEvent(self, event):
index = self.tabAt(event.pos())
self.doubleClicked.emit(index)
class TabView(QTabWidget):
def __init__(self, parent):
super(TabView, self).__init__(parent)
tabBar = TabBar(self)
self.setTabBar(tabBar)
self.setMovable(True)
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.closePage)
self.currentChanged.connect(self.activatedPage)
self.tabBar().customContextMenuRequested.connect(
self.tabBarContextMenuEvent)
self.tabBar().doubleClicked.connect(self.tabDClicked)
self.oldPage = None
self.__createActions()
def mouseDoubleClickEvent(self, event):
self.newList()
def tabDClicked(self, index):
self.renamePage(index)
def actions(self):
return self.__actions
def __createActions(self):
self.__actions = {}
newListAct = QAction(self.tr("&New..."), self)
newListAct.triggered.connect(self.newList)
self.__actions['new'] = newListAct
cloneListAct = QAction(self.tr("Clone"), self)
cloneListAct.triggered.connect(self._clone)
self.__actions['clone'] = cloneListAct
openPageMenu = QMenu(self.tr("Open"), self)
self.__actions['open'] = openPageMenu
removeAllAct = QAction(createIcon('cross.png'),
self.tr("Remove all"), self)
removeAllAct.triggered.connect(self.removeClosedPages)
self.__actions['removeAll'] = removeAllAct
renameListAct = QAction(self.tr("Rename..."), self)
renameListAct.triggered.connect(self.renamePage)
self.__actions['rename'] = renameListAct
selectColumnsAct = QAction(self.tr("Select columns..."), self)
selectColumnsAct.triggered.connect(self.selectColumns)
self.__actions['select'] = selectColumnsAct
closeListAct = QAction(self.tr("Close"), self)
closeListAct.setShortcut(QKeySequence.Close)
closeListAct.triggered.connect(self.closePage)
self.__actions['close'] = closeListAct
removeListAct = QAction(createIcon('cross.png'),
self.tr("Remove"), self)
removeListAct.triggered.connect(self.removePage)
self.__actions['remove'] = removeListAct
def tabBarContextMenuEvent(self, pos):
index = self.tabBar().tabAt(pos)
self.setCurrentIndex(index)
menu = QMenu(self)
menu.addAction(self.__actions['rename'])
menu.setDefaultAction(self.__actions['rename'])
menu.addAction(self.__actions['clone'])
menu.addSeparator()
menu.addAction(self.__actions['remove'])
menu.exec_(self.mapToGlobal(pos))
def _clone(self):
index = self.currentIndex()
oldLabel = self.tabText(index)
oldWidget = self.widget(index)
pageTitle = oldLabel + self.tr(" (clone)")
pageParam = self.collection.pages().addPage(pageTitle)
pageParam.listParam = oldWidget.listView.listParam.clone()
pageParam.listParam.pageId = pageParam.id
pageParam.listParam.save()
pageView = PageView(pageParam, self)
pageView.setModel(self.collection.model())
self.addTab(pageView, pageParam.title)
self.setCurrentWidget(pageView)
self.collection.pages().openPage(pageView)
def activatedPage(self, index):
enabled = (index >= 0)
self.__actions['rename'].setEnabled(enabled)
self.__actions['close'].setEnabled(enabled)
self.__actions['remove'].setEnabled(enabled)
statusBar = self.parent().statusBar()
if self.oldPage:
statusBar.removeWidget(self.oldPage.listView.listCountLabel)
statusBar.removeWidget(self.oldPage.listView.listSelectedLabel)
if index >= 0:
page = self.widget(index)
page.model().select()
statusBar.addPermanentWidget(page.listView.listCountLabel)
page.listView.listCountLabel.show()
statusBar.addPermanentWidget(page.listView.listSelectedLabel)
page.listView.listSelectedLabel.show()
self.oldPage = page
def setCollection(self, collection):
self.collection = collection
for _ in range(self.count()):
self.removeTab(0)
for pageParam in collection.pages().pagesParam():
if pageParam.isopen:
pageView = PageView(pageParam, self)
pageView.setModel(self.collection.model())
self.addTab(pageView, pageParam.title)
# If no pages exists => create default page
if self.count() == 0:
self.__createListPage(self.tr("Coins"))
def currentModel(self):
index = self.currentIndex()
page = self.widget(index)
return page.model()
def newList(self):
label, ok = QInputDialog.getText(self, self.tr("New list"),
self.tr("Enter list title"), text=self.tr("New list"),
flags=(Qt.WindowCloseButtonHint | Qt.WindowSystemMenuHint))
if ok and label:
self.__createListPage(label)
def renamePage(self, index=None):
index = self.currentIndex()
oldLabel = self.tabText(index)
label, ok = QInputDialog.getText(self, self.tr("Rename list"),
self.tr("Enter new list title"), text=oldLabel,
flags=(Qt.WindowCloseButtonHint | Qt.WindowSystemMenuHint))
if ok and label:
self.setTabText(index, label)
page = self.widget(index)
self.collection.pages().renamePage(page, label)
self.setCurrentIndex(index)
def selectColumns(self, index=None):
listView = self.currentListView()
listView.selectColumns()
def closePage(self, index=None):
if self.count() <= 1:
QMessageBox.information(self, self.tr("Remove page"),
self.tr("Can't close latest opened page.\n"
"Add a new one first."))
return
if not index:
index = self.currentIndex()
page = self.widget(index)
self.removeTab(index)
self.collection.pages().closePage(page)
def removePage(self):
if self.count() <= 1:
QMessageBox.information(self, self.tr("Remove page"),
self.tr("Can't remove latest opened page.\n"
"Add a new one first."))
return
index = self.currentIndex()
pageTitle = self.tabText(index)
result = QMessageBox.question(self, self.tr("Remove page"),
self.tr("Remove the page '%s' permanently?") % pageTitle,
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if result == QMessageBox.Yes:
page = self.widget(index)
self.removeTab(index)
self.collection.pages().removePage(page.param)
def removeClosedPages(self):
result = QMessageBox.question(self, self.tr("Remove pages"),
self.tr("Remove all closed pages permanently?"),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if result == QMessageBox.Yes:
closedPages = self.collection.pages().closedPages()
for pageParam in closedPages:
self.collection.pages().removePage(pageParam)
def savePagePositions(self):
pages = []
for i in range(self.count()):
pages.append(self.widget(i))
self.collection.pages().savePositions(pages)
def openPage(self, pageParam):
pageView = PageView(pageParam, self)
pageView.setModel(self.collection.model())
self.addTab(pageView, pageParam.title)
self.setCurrentWidget(pageView)
self.collection.pages().openPage(pageView)
def updateOpenPageMenu(self):
menu = self.__actions['open']
menu.clear()
closedPages = self.collection.pages().closedPages()
hasClosedPages = len(closedPages)
menu.setEnabled(hasClosedPages)
if hasClosedPages:
for param in closedPages:
act = OpenPageAction(param, self)
act.openPageTriggered.connect(self.openPage)
menu.addAction(act)
menu.addSeparator()
menu.addAction(self.__actions['removeAll'])
def currentListView(self):
return self.currentWidget().listView
def __createListPage(self, title):
pageParam = self.collection.pages().addPage(title)
pageView = PageView(pageParam, self)
pageView.setModel(self.collection.model())
self.addTab(pageView, title)
self.setCurrentWidget(pageView)
class OpenPageAction(QAction):
openPageTriggered = pyqtSignal(object)
def __init__(self, pageParam, parent=None):
super(OpenPageAction, self).__init__(pageParam.title, parent)
self.pageParam = pageParam
self.triggered.connect(self.trigger)
def trigger(self):
self.openPageTriggered.emit(self.pageParam)
| gpl-3.0 |
weaver-viii/h2o-3 | py2/jenkins_h2o_port_allocate.py | 30 | 8319 | #!/usr/bin/python
# "Avoid locker or centralized resource by hard-wiring the port mapping within range"
# "implied by max # of ports used per job, max # of executors per machine, and # of machines."
# "Map of source determines port. in/out using env variables"
print "\njenkins_h2o_port_allocate...."
import socket, os, subprocess
USED_HOSTNAMES = [
'mr-0xb1',
'mr-0xb4',
'mr-0x2',
'mr-0x3',
'mr-0x4',
'mr-0x5',
'mr-0x6',
'mr-0x7',
'mr-0x8',
'mr-0x9',
'mr-0x10',
'mr-0xd4',
'mr-0xd5',
'mr-0xd6',
'mr-0xd7',
'mr-0xd8',
'mr-0xd9',
'mr-0xd10',
'Kevin-Ubuntu3',
]
# maximum number of ports a job uses 10 = 5 jvms * 2 ports per h2o jvm (current max known)
PORTS_PER_SLOT = 10
DEFAULT_BASE_PORT = 54340
EXECUTOR_NUM = 8
def jenkins_h2o_port_allocate():
"""
input: jenkins environment variable EXECUTOR_NUMBER
output: creates ./BASE_PORT.sh, that you should 'source ./PORT.sh'
(can't see the env. variables directly from python?)
which will create os environment variables H2O_PORT and H2O_PORT_OFFSET (legacy)
internal state for this script that can be updated:
USED_HOSTNAMES (list of machine names),
PORTS_PER_SLOT (max per any job),
DEFAULT_BASE_PORT
If you modify any of the internal state, you may introduce contention between
new jenkins jobs and running jenkins jobs. (might not!)
You should stop/start all jobs (or ignore failures) if you modify internal state here.
Hence, no parameters to avoid living dangerously!
"""
if os.environ.has_key("EXECUTOR_NUMBER"):
# this will fail if it's not an integer
executor = int(os.environ["EXECUTOR_NUMBER"])
else:
executor = 1 # jenkins starts with 1
print "jenkins EXECUTOR_NUMBER:", executor
if executor<0 or executor>=EXECUTOR_NUM:
raise Exception("executor: %s wrong? Expecting 1-8 jenkins executors on a machine (0-7 exp.)" % executor)
h2oPort = DEFAULT_BASE_PORT
h2oPortOffset = 0
hostname = socket.gethostname()
if hostname not in USED_HOSTNAMES:
print "WARNING: this hostname: %s isn't in my list. You should add it?" % hostname
print "Will use default base port"
else:
hostnameIndex = USED_HOSTNAMES.index(hostname)
h2oPortOffset = PORTS_PER_SLOT * (executor + hostnameIndex)
h2oPort += h2oPortOffset
print "Possible h2o base_port range is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 2)
print "Possible h2o ports used ranged is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 1)
print "want to 'export H2O_PORT=%s'" % h2oPort
print "want to 'export H2O_PORT_OFFSET=%s # legacy'" % h2oPortOffset
f = open('H2O_BASE_PORT.sh','w')
f.write('export H2O_PORT=%s\n' % h2oPort)
f.write('export H2O_PORT_OFFSET=%s # legacy\n' % h2oPortOffset)
f.close()
print "\nNow please:\nsource ./H2O_BASE_PORT.sh"
if __name__ == "__main__":
jenkins_h2o_port_allocate()
"""
This auto-magics the manual allocation I did when parallelized the current 8-way jenkins jobs,
2 per machine, on the jenkins mr-0xd4 that dispatches to mr-0xd5 thru mr-0xd9
The rationale for a global allocate requires understanding what machines a jenkins master/slave can be on,
and what machines they send h2o jars to.
at 0xdata:
A jenkins master is a member of a group of machines. Jenkins can send the python or other test to another slave machine, and then the test can dispatch h2o either locally, or to other machines in the group.
it can target h2o.jar's anywhere in that group, or dispatch a job to a slave in that group that might do the same.
We currently have two such groups, with one jenkins master in each group (mr-0xb4 and mr-0xd4)
(update: let's just say it's all one big group. Not worth optimizing for subgroup knowlege)
So using
(hostname offset in the list of total hostnames) * (EXECUTOR_NUMBER-1 * PORTS_PER_SLOT)
Will give a unique offset from the default 54340 base, for the job, regardless of which jenkins (master or slave) starts it in the group and where the h2o targest are (which is controlled by the config.json used in the job)
all cloud builds done in a job (one or more) use the same offset.
Dispatching tests from your laptop..will they collide with jenkins?
If the host machine is not in the list, like a laptop, then the offset is 0. (54340 will be used). I suppose jenkins could shift it's base_port to be at least 10 above 54340, so existing scripts that users have, that use 54340, won't be stepped on by jenkins. 54340 could be the jenkins base port.
EC2:
I suppose if the tests are used in ec2, we only do one h2o jar per machine, (or multijvm) so no conflict if 54340 is used. (or 54340). We typically want fast EC2 results, so don't overload target machines?. I suppose an EC2 machine list could be created in this script if we started overloading EC2 machines also
PORTS_PER_SLOT is 10 right now, since the most a job will do is 5 h2o jvms.
I guess to ease the transition, I could leave the H2O_PORT_OFFSET as the api to build_cloud(), and have another python script look at the current ho2 IP and EXECUTOR_NUMBER env variable from jenkins
Notes:
Right now, assuming the subnet octet range from a group is 160-180 or 181-190 works. 164 is an oddball case (out of the ten range for it's group)
I guess I could just put a list of IPs for the jenkins groups that exist, and find the group your in, and then get a "group index" from that list. That's robust and easily maintainable.
This algorithm keeps the total port range in use = (max # of executors per jenkins master or slave) * PORTS_PER_SLOT * (# of machines in a group)
Using 2 executors per machine is nice. 4 is about the max that works well with h2o. so 4 * 10 * 10 = 400 ports
that would be 54340 thru 54721
NICE POSSIBILITES: If we know that ubuntu or other services need to reserve ports that are in our range, we can put in mappings to other ports for those values, or shift the port range or whatever...i.e. we can adjust the algorithm in one place. If the 54340 base is not good, that's set in h2o.py..currently tests don't modify base_port (except for some cloud tests we don't run in jenkins, that do more than 5 jvms on a single machine)
I suppose the tool could output the exact port to use, rather than an offset to h2o.py's default. Maybe initially will output both, so h2o.py can migrate
i.e. environment variables H2O_PORT_OFFSET and H2O_PORT (= 5321 + H2O_PORT_OFFSET)
UPDATE: To allow for dispatching h2o to any machine in any jenkins group, we can have just one group list that has all possible machines. Makes the used port range twice as big (800) but that's okay. It's like going to a 255.255.0.0 network!
Detail:
Jenkins has global environment variables
This one is useful
EXECUTOR_NUMBER The unique number that identifies the current executor (among executors of the same machine) that's carrying out this build. This is the number you see in the "build executor status", except that the number starts from 0, not 1.
Now each slave machine can have multiple executors, in addition to the master.
So since in a grand scheme, we don't know who's creating h2o.jars on target machines, from which machine, (jenkins master or slave)...
it means we want a global h2o port allocation (assuming that scraping an h2o port from OS allocation is ugly)
I have cases on 164 jenkins that send the python job to jenkins slave 174, which dispatches h2o jars to 175-180, Or dispatch to YARN on hadoop clusters, but we don't care about ports there, we get told the ip/port by the h2odriver.
Since the pool of machines in a group is fixed, we have the EXECUTOR_NUMBER which is the parallelism per machine (jenkins master or slave), and we
Will give a unique offset to 54340
We can call it a "PORT_SLOT" and pass it as a environment variable like the current "export H2O_PORT_OFFSET=40"
that the build_cloud() uses to offset the default base_port. I suppose PORTS_PER_SLOT can be fixed in build_cloud() so it's the same for all jobs (so jobs don't step over each other.
"""
| apache-2.0 |
Tesla-Redux/android_external_skia | tools/reformat-json.py | 208 | 1741 | #!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
'''
Rewrites a JSON file to use Python's standard JSON pretty-print format,
so that subsequent runs of rebaseline.py will generate useful diffs
(only the actual checksum differences will show up as diffs, not obscured
by format differences).
Should not modify the JSON contents in any meaningful way.
'''
# System-level imports
import argparse
import os
import sys
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
#
# This assumes that the 'gm' directory has been checked out as a sibling of
# the 'tools' directory containing this script, which will be the case if
# 'trunk' was checked out as a single unit.
GM_DIRECTORY = os.path.realpath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
def Reformat(filename):
print 'Reformatting file %s...' % filename
gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename)
def _Main():
parser = argparse.ArgumentParser(description='Reformat JSON files in-place.')
parser.add_argument('filenames', metavar='FILENAME', nargs='+',
help='file to reformat')
args = parser.parse_args()
for filename in args.filenames:
Reformat(filename)
sys.exit(0)
if __name__ == '__main__':
_Main()
| bsd-3-clause |
alexgorban/models | official/nlp/bert/run_squad.py | 1 | 16603 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.modeling import model_training_utils
from official.nlp import optimization
from official.nlp.albert import configs as albert_configs
from official.nlp.bert import bert_models
from official.nlp.bert import common_flags
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
from official.nlp.bert import squad_lib as squad_lib_wp
from official.nlp.bert import squad_lib_sp
from official.nlp.bert import tokenization
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
flags.DEFINE_enum(
'mode', 'train_and_predict',
['train_and_predict', 'train', 'predict', 'export_only'],
'One of {"train_and_predict", "train", "predict", "export_only"}. '
'`train_and_predict`: both train and predict to a json file. '
'`train`: only trains the model. '
'`predict`: predict answers from the squad json file. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`.')
flags.DEFINE_string('train_data_path', '',
'Training data path with train tfrecords.')
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
# Model training specific flags.
flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.')
# Predict processing related.
flags.DEFINE_string('predict_file', None,
'Prediction data path with train tfrecords.')
flags.DEFINE_string('vocab_file', None,
'The vocabulary file that the BERT model was trained on.')
flags.DEFINE_bool(
'do_lower_case', True,
'Whether to lower case the input text. Should be True for uncased '
'models and False for cased models.')
flags.DEFINE_float(
'null_score_diff_threshold', 0.0,
'If null_score - best_non_null is greater than the threshold, '
'predict null. This is only used for SQuAD v2.')
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal SQuAD evaluation.')
flags.DEFINE_integer('predict_batch_size', 8,
'Total batch size for prediction.')
flags.DEFINE_integer(
'n_best_size', 20,
'The total number of n-best predictions to generate in the '
'nbest_predictions.json output file.')
flags.DEFINE_integer(
'max_answer_length', 30,
'The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one another.')
flags.DEFINE_string(
'sp_model_file', None,
'The path to the sentence piece model. Used by sentence piece tokenizer '
'employed by ALBERT.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
MODEL_CLASSES = {
'bert': (bert_configs.BertConfig, squad_lib_wp, tokenization.FullTokenizer),
'albert': (albert_configs.AlbertConfig, squad_lib_sp,
tokenization.FullSentencePieceTokenizer),
}
def squad_loss_fn(start_positions,
end_positions,
start_logits,
end_logits,
loss_factor=1.0):
"""Returns sparse categorical crossentropy for start/end logits."""
start_loss = tf.keras.backend.sparse_categorical_crossentropy(
start_positions, start_logits, from_logits=True)
end_loss = tf.keras.backend.sparse_categorical_crossentropy(
end_positions, end_logits, from_logits=True)
total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
total_loss *= loss_factor
return total_loss
def get_loss_fn(loss_factor=1.0):
"""Gets a loss function for squad task."""
def _loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
return squad_loss_fn(
start_positions,
end_positions,
start_logits,
end_logits,
loss_factor=loss_factor)
return _loss_fn
def get_raw_results(predictions):
"""Converts multi-replica predictions to RawResult."""
squad_lib = MODEL_CLASSES[FLAGS.model_type][1]
for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'],
predictions['start_logits'],
predictions['end_logits']):
for values in zip(unique_ids.numpy(), start_logits.numpy(),
end_logits.numpy()):
yield squad_lib.RawResult(
unique_id=values[0],
start_logits=values[1].tolist(),
end_logits=values[2].tolist())
def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size,
is_training):
"""Gets a closure to create a dataset.."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_squad_dataset(
input_file_pattern,
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx)
return dataset
return _dataset_fn
def predict_squad_customized(strategy, input_meta_data, bert_config,
predict_tfrecord_path, num_steps):
"""Make predictions using a Bert-based squad model."""
predict_dataset_fn = get_dataset_fn(
predict_tfrecord_path,
input_meta_data['max_seq_length'],
FLAGS.predict_batch_size,
is_training=False)
predict_iterator = iter(
strategy.experimental_distribute_datasets_from_function(
predict_dataset_fn))
with strategy.scope():
# Prediction always uses float32, even if training uses mixed precision.
tf.keras.mixed_precision.experimental.set_policy('float32')
squad_model, _ = bert_models.squad_model(
bert_config, input_meta_data['max_seq_length'])
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
logging.info('Restoring checkpoints from %s', checkpoint_path)
checkpoint = tf.train.Checkpoint(model=squad_model)
checkpoint.restore(checkpoint_path).expect_partial()
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
def _replicated_step(inputs):
"""Replicated prediction calculation."""
x, _ = inputs
unique_ids = x.pop('unique_ids')
start_logits, end_logits = squad_model(x, training=False)
return dict(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits)
outputs = strategy.experimental_run_v2(
_replicated_step, args=(next(iterator),))
return tf.nest.map_structure(strategy.experimental_local_results, outputs)
all_results = []
for _ in range(num_steps):
predictions = predict_step(predict_iterator)
for result in get_raw_results(predictions):
all_results.append(result)
if len(all_results) % 100 == 0:
logging.info('Made predictions for %d records.', len(all_results))
return all_results
def train_squad(strategy,
input_meta_data,
custom_callbacks=None,
run_eagerly=False):
"""Run bert squad training."""
if strategy:
logging.info('Training using customized training loop with distribution'
' strategy.')
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_config_v2(FLAGS.enable_xla)
use_float16 = common_flags.use_float16()
if use_float16:
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
bert_config = MODEL_CLASSES[FLAGS.model_type][0].from_json_file(
FLAGS.bert_config_file)
epochs = FLAGS.num_train_epochs
num_train_examples = input_meta_data['train_data_size']
max_seq_length = input_meta_data['max_seq_length']
steps_per_epoch = int(num_train_examples / FLAGS.train_batch_size)
warmup_steps = int(epochs * num_train_examples * 0.1 / FLAGS.train_batch_size)
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
max_seq_length,
FLAGS.train_batch_size,
is_training=True)
def _get_squad_model():
"""Get Squad model and optimizer."""
squad_model, core_model = bert_models.squad_model(
bert_config,
max_seq_length,
hub_module_url=FLAGS.hub_module_url)
squad_model.optimizer = optimization.create_optimizer(
FLAGS.learning_rate, steps_per_epoch * epochs, warmup_steps)
if use_float16:
# Wraps optimizer with a LossScaleOptimizer. This is done automatically
# in compile() with the "mixed_float16" policy, but since we do not call
# compile(), we must wrap the optimizer manually.
squad_model.optimizer = (
tf.keras.mixed_precision.experimental.LossScaleOptimizer(
squad_model.optimizer, loss_scale=common_flags.get_loss_scale()))
if FLAGS.fp16_implementation == 'graph_rewrite':
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
squad_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
squad_model.optimizer)
return squad_model, core_model
# The original BERT model does not scale the loss by
# 1/num_replicas_in_sync. It could be an accident. So, in order to use
# the same hyper parameter, we do the same thing here by keeping each
# replica loss as it is.
loss_fn = get_loss_fn(
loss_factor=1.0 /
strategy.num_replicas_in_sync if FLAGS.scale_loss else 1.0)
model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_squad_model,
loss_fn=loss_fn,
model_dir=FLAGS.model_dir,
steps_per_epoch=steps_per_epoch,
steps_per_loop=FLAGS.steps_per_loop,
epochs=epochs,
train_input_fn=train_input_fn,
init_checkpoint=FLAGS.init_checkpoint,
run_eagerly=run_eagerly,
custom_callbacks=custom_callbacks)
def predict_squad(strategy, input_meta_data):
"""Makes predictions for a squad dataset."""
config_cls, squad_lib, tokenizer_cls = MODEL_CLASSES[FLAGS.model_type]
bert_config = config_cls.from_json_file(FLAGS.bert_config_file)
if tokenizer_cls == tokenization.FullTokenizer:
tokenizer = tokenizer_cls(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
else:
assert tokenizer_cls == tokenization.FullSentencePieceTokenizer
tokenizer = tokenizer_cls(sp_model_file=FLAGS.sp_model_file)
doc_stride = input_meta_data['doc_stride']
max_query_length = input_meta_data['max_query_length']
# Whether data should be in Ver 2.0 format.
version_2_with_negative = input_meta_data.get('version_2_with_negative',
False)
eval_examples = squad_lib.read_squad_examples(
input_file=FLAGS.predict_file,
is_training=False,
version_2_with_negative=version_2_with_negative)
eval_writer = squad_lib.FeatureWriter(
filename=os.path.join(FLAGS.model_dir, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
kwargs = dict(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=input_meta_data['max_seq_length'],
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False,
output_fn=_append_feature,
batch_size=FLAGS.predict_batch_size)
# squad_lib_sp requires one more argument 'do_lower_case'.
if squad_lib == squad_lib_sp:
kwargs['do_lower_case'] = FLAGS.do_lower_case
dataset_size = squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Running predictions *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', FLAGS.predict_batch_size)
num_steps = int(dataset_size / FLAGS.predict_batch_size)
all_results = predict_squad_customized(strategy, input_meta_data, bert_config,
eval_writer.filename, num_steps)
output_prediction_file = os.path.join(FLAGS.model_dir, 'predictions.json')
output_nbest_file = os.path.join(FLAGS.model_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(FLAGS.model_dir, 'null_odds.json')
squad_lib.write_predictions(
eval_examples,
eval_features,
all_results,
FLAGS.n_best_size,
FLAGS.max_answer_length,
FLAGS.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
version_2_with_negative=version_2_with_negative,
null_score_diff_threshold=FLAGS.null_score_diff_threshold,
verbose=FLAGS.verbose_logging)
def export_squad(model_export_path, input_meta_data):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
bert_config = MODEL_CLASSES[FLAGS.model_type][0].from_json_file(
FLAGS.bert_config_file)
# Export uses float32 for now, even if training uses mixed precision.
tf.keras.mixed_precision.experimental.set_policy('float32')
squad_model, _ = bert_models.squad_model(bert_config,
input_meta_data['max_seq_length'])
model_saving_utils.export_bert_model(
model_export_path, model=squad_model, checkpoint_dir=FLAGS.model_dir)
def main(_):
# Users should always run this script under TF 2.x
assert tf.version.VERSION.startswith('2.')
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
# Configures cluster spec for multi-worker distribution strategy.
if FLAGS.num_gpus > 0:
_ = distribution_utils.configure_cluster(FLAGS.worker_hosts,
FLAGS.task_index)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if FLAGS.mode in ('train', 'train_and_predict'):
train_squad(strategy, input_meta_data, run_eagerly=FLAGS.run_eagerly)
if FLAGS.mode in ('predict', 'train_and_predict'):
predict_squad(strategy, input_meta_data)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('model_dir')
app.run(main)
| apache-2.0 |
miniconfig/home-assistant | homeassistant/components/sensor/random.py | 3 | 1991 | """
Support for showing random numbers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.random/
"""
import asyncio
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_MINIMUM, CONF_MAXIMUM)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Random Sensor'
DEFAULT_MIN = 0
DEFAULT_MAX = 20
ICON = 'mdi:hanger'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAXIMUM, default=DEFAULT_MAX): cv.positive_int,
vol.Optional(CONF_MINIMUM, default=DEFAULT_MIN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup the Random number sensor."""
name = config.get(CONF_NAME)
minimum = config.get(CONF_MINIMUM)
maximum = config.get(CONF_MAXIMUM)
async_add_devices([RandomSensor(name, minimum, maximum)], True)
return True
class RandomSensor(Entity):
"""Representation of a Random number sensor."""
def __init__(self, name, minimum, maximum):
"""Initialize the sensor."""
self._name = name
self._minimum = minimum
self._maximum = maximum
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@asyncio.coroutine
def async_update(self):
"""Get a new number and updates the states."""
from random import randrange
self._state = randrange(self._minimum, self._maximum + 1)
| mit |
abligh/xen4.2-minideb | tools/python/xen/xm/setenforce.py | 43 | 2560 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Author: Machon Gregory <mbgrego@tycho.ncsc.mil>
#============================================================================
"""Modify the current mode of the Flask XSM module.
"""
from xen.xm.opts import OptionError
from xen.xm import main as xm_main
from xen.xm.main import server
from xen.util import xsconstants
def help():
return """
Usage: xm setenforce [ Enforcing | Permissive | 1 | 0 ]
Modifies the current mode of the Flask XSM module to be permissive or
enforcing. Using Enforcing or 1 will put the Flask module in enforcing
mode. Using Permissive or 0 will put the Flask module in permissive
mode."""
def setenforce(mode):
if len(mode) == 1 and ( mode == "0" or mode == "1" ):
val = int(mode)
elif mode.lower() == "enforcing":
val = 1
elif mode.lower() == "permissive":
val = 0
else:
raise OptionError("%s is an unsupported mode" % mode)
if xm_main.serverType == xm_main.SERVER_XEN_API:
if xsconstants.XS_POLICY_FLASK != \
int(server.xenapi.XSPolicy.get_xstype()):
raise OptionError("Unsupported policy type")
ret = server.xenapi.XSPolicy.setenforce(val)
else:
if server.xend.security.on() != xsconstants.XS_POLICY_FLASK:
raise OptionError("Unsupported policy type")
ret = server.xend.security.setenforce(val)
def main(argv):
if len(argv) != 2:
raise OptionError("Invalid arguments")
if "-?" in argv:
help()
return
mode = argv[1];
setenforce(mode)
if __name__ == '__main__':
try:
main(sys.argv)
except Exception, e:
sys.stderr.write('Error: %s\n' % str(e))
sys.exit(-1)
| gpl-2.0 |
cgvarela/Impala | tests/query_test/test_hdfs_caching.py | 13 | 8247 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Validates limit on scan nodes
#
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3, SkipIfIsilon
from tests.util.shell_util import exec_process
# End to end test that hdfs caching is working.
@SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
@SkipIfIsilon.caching
class TestHdfsCaching(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == "text")
# The tpch nation table is cached as part of data loading. We'll issue a query
# against this table and verify the metric is updated correctly.
@pytest.mark.execute_serially
def test_table_is_cached(self, vector):
cached_read_metric = "impala-server.io-mgr.cached-bytes-read"
query_string = "select count(*) from tpch.nation"
expected_bytes_delta = 2199
impala_cluster = ImpalaCluster()
# Collect the cached read metric on all the impalads before running the query
cached_bytes_before = list()
for impalad in impala_cluster.impalads:
cached_bytes_before.append(impalad.service.get_metric_value(cached_read_metric))
# Execute the query.
result = self.execute_query(query_string)
assert(len(result.data) == 1)
assert(result.data[0] == '25')
# Read the metrics again.
cached_bytes_after = list()
for impalad in impala_cluster.impalads:
cached_bytes_after.append(impalad.service.get_metric_value(cached_read_metric))
# Verify that the cached bytes increased by the expected number on exactly one of
# the impalads.
num_metrics_increased = 0
assert(len(cached_bytes_before) == len(cached_bytes_after))
for i in range(0, len(cached_bytes_before)):
assert(cached_bytes_before[i] == cached_bytes_after[i] or\
cached_bytes_before[i] + expected_bytes_delta == cached_bytes_after[i])
if cached_bytes_after[i] > cached_bytes_before[i]:
num_metrics_increased = num_metrics_increased + 1
if num_metrics_increased != 1:
# Test failed, print the metrics
for i in range(0, len(cached_bytes_before)):
print "%d %d" % (cached_bytes_before[i], cached_bytes_after[i])
assert(False)
def test_cache_cancellation(self, vector):
""" This query runs on some mix of cached and not cached tables. The query has
a limit so it exercises the cancellation paths. Regression test for
IMPALA-1019. """
num_iters = 100
query_string = """
with t1 as (select int_col x, bigint_col y from functional.alltypes limit 2),
t2 as (select int_col x, bigint_col y from functional.alltypestiny limit 2),
t3 as (select int_col x, bigint_col y from functional.alltypessmall limit 2)
select * from t1, t2, t3 where t1.x = t2.x and t2.x = t3.x """
# Run this query for some iterations since it is timing dependent.
for x in xrange(1, num_iters):
result = self.execute_query(query_string)
assert(len(result.data) == 2)
@SkipIfS3.caching
@SkipIfIsilon.caching
class TestHdfsCachingDdl(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCachingDdl, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and \
v.get_value('table_format').compression_codec == 'none')
def setup_method(self, method):
self.cleanup_db("cachedb")
self.client.execute("create database cachedb")
def teardown_method(self, method):
self.cleanup_db("cachedb")
@pytest.mark.execute_serially
def test_caching_ddl(self, vector):
# Get the number of cache requests before starting the test
num_entries_pre = get_num_cache_requests()
self.run_test_case('QueryTest/hdfs-caching', vector)
# After running this test case we should be left with 8 cache requests.
# In this case, 1 for each table + 7 more for each cached partition.
assert num_entries_pre == get_num_cache_requests() - 8
self.client.execute("drop table cachedb.cached_tbl_part")
self.client.execute("drop table cachedb.cached_tbl_nopart")
# Dropping the tables should cleanup cache entries leaving us with the same
# total number of entries
assert num_entries_pre == get_num_cache_requests()
@pytest.mark.execute_serially
def test_cache_reload_validation(self, vector):
"""This is a set of tests asserting that cache directives modified
outside of Impala are picked up after reload, cf IMPALA-1645"""
num_entries_pre = get_num_cache_requests()
create_table = ("create table cachedb.cached_tbl_reload "
"(id int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Access the table once to load the metadata
self.client.execute("select count(*) from cachedb.cached_tbl_reload")
create_table = ("create table cachedb.cached_tbl_reload_part (i int) "
"partitioned by (j int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Add two partitions
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=1)")
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=2)")
assert num_entries_pre + 4 == get_num_cache_requests(), \
"Adding the tables should be reflected by the number of cache directives."
# Modify the cache directive outside of Impala and reload the table to verify
# that changes are visible
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload")
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part")
drop_cache_directives_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=1")
change_cache_directive_repl_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2", 3)
# Create a bogus cached table abusing an existing cache directive ID, IMPALA-1750
dirid = get_cache_directive_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
self.client.execute(("create table cachedb.no_replication_factor (id int) " \
"tblproperties(\"cache_directive_id\"=\"%s\")" % dirid))
self.run_test_case('QueryTest/hdfs-caching-validation', vector)
def drop_cache_directives_for_path(path):
"""Drop the cache directive for a given path"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -removeDirectives -path %s" % path)
assert rc == 0, \
"Error removing cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_cache_directive_for_path(path):
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -path %s" % path)
assert rc == 0
dirid = re.search('^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, re.MULTILINE).group(1)
return dirid
def change_cache_directive_repl_for_path(path, repl):
"""Drop the cache directive for a given path"""
dirid = get_cache_directive_for_path(path)
rc, stdout, stderr = exec_process(
"hdfs cacheadmin -modifyDirective -id %s -replication %s" % (dirid, repl))
assert rc == 0, \
"Error modifying cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_num_cache_requests():
"""Returns the number of outstanding cache requests"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats")
assert rc == 0, 'Error executing hdfs cacheadmin: %s %s' % (stdout, stderr)
return len(stdout.split('\n'))
| apache-2.0 |
fu-berlin-swp-2014/center-points | tests/cli_test.py | 1 | 1288 | import unittest
from tempfile import NamedTemporaryFile
from centerpoints.cli import parse_arguments
class TestArgumentParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ptsf = NamedTemporaryFile()
@classmethod
def tearDownClass(cls):
cls.ptsf.truncate()
cls.ptsf.close()
def test_algorithm_choice(self):
self.assertRaises(SystemExit, parse_arguments, ["-1", "-2"])
self.assertRaises(SystemExit, parse_arguments, ["-1", "-3"])
self.assertRaises(SystemExit, parse_arguments, ["-2", "-3"])
def test_format_choice(self):
args = ["-1", "--json", "--csv", "", self.ptsf.name]
self.assertRaises(SystemExit, parse_arguments, args)
options = parse_arguments(["-1", "--json", self.ptsf.name])
self.assertEqual("json", options.format)
options = parse_arguments(["-1", self.ptsf.name])
self.assertEqual("csv", options.format)
options = parse_arguments(["-1", "--csv", ",", self.ptsf.name])
self.assertEqual("csv", options.format)
self.assertEqual(options.separator, ",", )
options = parse_arguments(["-1", self.ptsf.name])
self.assertEqual(options.format, "csv", )
self.assertEqual(options.separator, "\t")
| mit |
mistio/mist.client | src/mistcommand/helpers/clouds.py | 1 | 11828 | import sys
from prettytable import PrettyTable
from mistcommand.helpers.login import authenticate
def show_cloud(cloud):
x = PrettyTable(["Title", "ID", "Provider", "State"])
x.add_row([cloud.title, cloud.id, cloud.provider, cloud.info['state']])
print x
print
print "Machines:"
x = PrettyTable(["Name", "ID", "State", "Public Ips"])
machines = cloud.machines()
for machine in machines:
try:
public_ips = machine.info['public_ips']
ips = " -- ".join(public_ips)
except:
ips = ""
x.add_row([machine.name, machine.id, machine.info['state'], ips])
print x
def list_clouds(client, pretty):
clouds = client.clouds()
if not client.clouds():
print "No clouds found"
sys.exit(0)
if pretty:
x = PrettyTable(["Name", "ID", "Provider", "State"])
for cloud in clouds:
x.add_row([cloud.title, cloud.id, cloud.provider, cloud.info['state']])
print x
else:
for cloud in clouds:
# print cloud.title, cloud.id, cloud.provider, cloud.info['state']
print "%-40s %-40s %-30s %-20s" % (cloud.title, cloud.id, cloud.provider, cloud.info['state'])
def choose_cloud(client, args):
cloud_id = args.id
cloud_name = args.name
if cloud_id:
clouds = client.clouds(id=cloud_id) or None
cloud = clouds[0] if clouds else None
elif cloud_name:
clouds = client.clouds(name=cloud_name)
cloud = clouds[0] if clouds else None
else:
clouds = client.clouds(search=args.cloud)
cloud = clouds[0] if clouds else None
return cloud
def return_cloud(client, args):
cloud_id = args.cloud_id
cloud_name = args.cloud_name
if cloud_id:
clouds = client.clouds(id=cloud_id) or None
cloud = clouds[0] if clouds else None
elif cloud_name:
clouds = client.clouds(name=cloud_name)
cloud = clouds[0] if clouds else None
else:
clouds = client.clouds(search=args.cloud)
cloud = clouds[0] if clouds else None
return cloud
def add_cloud(client, args):
provider = args.provider
if "ec2" in provider:
add_ec2_cloud(client, args)
elif "rackspace" in provider:
add_rackspace_cloud(client, args)
elif "nepho" in provider:
add_nepho_cloud(client, args)
elif "digi" in provider:
add_digital_cloud(client,args)
elif "linode" in provider:
add_linode_cloud(client, args)
elif "openstack" in provider:
add_openstack_cloud(client, args)
elif "softlayer" in provider:
add_softlayer_cloud(client, args)
# elif "hp" in provider:
# add_hp_cloud(client, args)
elif "docker" in provider:
add_docker_cloud(client, args)
elif "azure" in provider:
add_azure_cloud(client, args)
elif "bare" in provider:
add_bare_metal_cloud(client, args)
elif provider == "coreos":
add_coreos_cloud(client, args)
elif "gce" in provider:
add_gce_cloud(client, args)
elif provider == "vcloud":
add_vcloud_cloud(client, args)
elif provider == "indonesian_vcloud":
add_indonesian_cloud(client, args)
elif provider == 'vsphere':
add_vsphere_cloud(client, args)
elif provider == "libvirt":
add_libvirt_cloud(client, args)
elif provider == 'hostvirtual':
add_hostvirtual_cloud(client, args)
elif provider == 'vultr':
add_vultr_cloud(client, args)
elif provider == 'packet':
add_packet_cloud(client, args)
def add_gce_cloud(client, args):
title = args.name
provider = args.provider
email = args.gce_email
project_id = args.gce_project_id
private_key_path = args.gce_private_key
if private_key_path:
with open(private_key_path, 'r') as f:
private_key = f.read()
else:
private_key = ""
client.add_cloud(title=title, provider=provider, email=email, project_id=project_id,
private_key=private_key)
def add_libvirt_cloud(client, args):
title = args.name
provider = args.provider
machine_hostname = args.libvirt_hostname
machine_user = args.libvirt_user
machine_key = args.libvirt_key
images_location = args.libvirt_images
ssh_port = args.libvirt_ssh_port
client.add_cloud(title=title, provider=provider,
machine_hostname=machine_hostname,
machine_user=machine_user, machine_key=machine_key,
images_location=images_location, ssh_port=ssh_port)
def add_vcloud_cloud(client, args):
title = args.name
provider = args.provider
username = args.vcloud_username
password = args.vcloud_password
organization = args.vcloud_organization
host = args.vcloud_host
client.add_cloud(title=title, provider=provider, username=username, password=password,
organization=organization, host=host)
def add_vsphere_cloud(client, args):
title = args.name
provider = args.provider
username = args.vsphere_username
password = args.vsphere_password
host = args.vsphere_host
client.add_cloud(title=title, provider=provider, username=username,
password=password, host=host)
def add_indonesian_cloud(client, args):
title = args.name
provider = args.provider
username = args.indonesian_username
password = args.indonesian_password
organization = args.indonesian_organization
indonesianRegion = args.indonesian_region
client.add_cloud(title=title, provider=provider, username=username,
password=password, organization=organization,
indonesianRegion=indonesianRegion)
def add_docker_cloud(client, args):
title = args.name
provider = args.provider
docker_host = args.docker_host
docker_port = args.docker_port
docker_auth_user = args.docker_auth_user
docker_auth_password = args.docker_auth_password
if args.docker_key_file:
with open(args.docker_key_file, 'r') as f:
key_file = f.read()
else:
key_file = ""
if args.docker_cert_file:
with open(args.docker_cert_file, 'r') as f:
cert_file = f.read()
else:
cert_file = ""
if args.docker_ca_cert_file:
with open(args.docker_ca_cert_file, 'r') as f:
ca_cert_file = f.read()
else:
ca_cert_file = ""
client.add_cloud(title=title, provider=provider, docker_host=docker_host,
docker_port=docker_port, auth_user=docker_auth_user,
auth_password=docker_auth_password, key_file=key_file,
cert_file=cert_file, ca_cert_file=ca_cert_file)
def add_azure_cloud(client, args):
title = args.name
provider = args.provider
subscription_id = args.azure_sub_id
with open(args.azure_cert_path) as f:
certificate = f.read()
client.add_cloud(title=title, provider=provider, subscription_id=subscription_id, certificate=certificate)
def add_ec2_cloud(client, args):
title = args.name
provider = args.provider
api_key = args.ec2_api_key
api_secret = args.ec2_api_secret
region = args.ec2_region
client.add_cloud(title=title, provider=provider, api_key=api_key, api_secret=api_secret, region=region)
def add_rackspace_cloud(client, args):
title = args.name
provider = args.provider
username = args.rackspace_username
api_key = args.rackspace_api_key
region = args.rackspace_region
client.add_cloud(title=title, provider=provider, username=username, api_key=api_key, region=region)
def add_nepho_cloud(client, args):
title = args.name
provider = args.provider
username = args.nepho_username
password = args.nepho_password
client.add_cloud(title=title, provider=provider, username=username, password=password)
def add_digital_cloud(client, args):
title = args.name
provider = args.provider
token = args.digi_token
client.add_cloud(title=title, provider=provider, token=token)
def add_linode_cloud(client, args):
title = args.name
provider = args.provider
api_key = args.linode_api_key
client.add_cloud(title=title, provider=provider, api_key=api_key)
def add_openstack_cloud(client, args):
title = args.name
provider = args.provider
username = args.openstack_username
password = args.openstack_password
auth_url = args.openstack_auth_url
tenant_name = args.openstack_tenant
region = args.openstack_region
client.add_cloud(title=title, provider=provider, username=username, password=password,
auth_url=auth_url, tenant_name=tenant_name, region=region)
def add_softlayer_cloud(client, args):
title = args.name
provider = args.provider
username = args.softlayer_username
api_key = args.softlayer_api_key
client.add_cloud(title=title, provider=provider, username=username, api_key=api_key)
# def add_hp_cloud(client, args):
# title = args.name
# provider = args.provider
# username = args.hp_username
# password = args.hp_password
# tenant_name = args.hp_tenant
# region = args.hp_region
#
# client.add_cloud(title=title, provider=provider, username=username,
# password=password, tenant_name=tenant_name, region=region)
def add_bare_metal_cloud(client, args):
title = args.name
provider = args.provider
machine_ip = args.bare_hostname
machine_user = args.bare_user
machine_port = args.bare_port
machine_key = args.bare_ssh_key_id
client.add_cloud(title=title, provider=provider, machine_ip=machine_ip,
machine_key=machine_key, machine_port=machine_port,
machine_user=machine_user)
def add_coreos_cloud(client, args):
title = args.name
provider = args.provider
machine_ip = args.core_hostname
machine_user = args.core_user
machine_port = args.core_port
machine_key = args.core_ssh_key_id
client.add_cloud(title=title, provider=provider, machine_ip=machine_ip,
machine_key=machine_key, machine_port=machine_port,
machine_user=machine_user)
def add_hostvirtual_cloud(client, args):
title = args.name
provider = args.provider
api_key = args.hostvirtual_api_key
client.add_cloud(title=title, provider=provider, api_key=api_key)
def add_vultr_cloud(client, args):
title = args.name
provider = args.provider
api_key = args.vultr_api_key
client.add_cloud(title=title, provider=provider, api_key=api_key)
def add_packet_cloud(client, args):
title = args.name
provider = args.provider
api_key = args.packet_api_key
project_id = args.packet_project
client.add_cloud(title=title, provider=provider, api_key=api_key,
project_id=project_id)
def cloud_action(args):
client = authenticate()
if args.action == 'list-clouds':
pretty = args.pretty
list_clouds(client, pretty)
elif args.action == 'rename-cloud':
cloud = choose_cloud(client, args)
cloud.rename(args.new_name)
print "Renamed cloud to %s" % args.new_name
elif args.action == 'delete-cloud':
cloud = choose_cloud(client, args)
if cloud:
cloud.delete()
print "Deleted cloud %s" % cloud.title
else:
print "Could not find cloud"
elif args.action == 'describe-cloud':
cloud = choose_cloud(client, args)
if cloud:
show_cloud(cloud)
else:
print "Could not find cloud"
elif args.action == 'add-cloud':
add_cloud(client, args)
print "New cloud added"
| gpl-3.0 |
sileht/deb-openstack-quantum | quantum/extensions/_pprofiles.py | 3 | 2300 | """
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
"""
def get_view_builder(req):
"""get view builder"""
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""
ViewBuilder for Portprofile,
derived from quantum.views.networks
"""
def __init__(self, base_url):
"""
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, portprofile_data, is_detail=False):
"""Generic method used to generate a portprofile entity."""
if is_detail:
portprofile = self._build_detail(portprofile_data)
else:
portprofile = self._build_simple(portprofile_data)
return portprofile
def _build_simple(self, portprofile_data):
"""Return a simple description of a portprofile"""
return dict(portprofile=dict(id=portprofile_data['profile_id']))
def _build_detail(self, portprofile_data):
"""Return a detailed info of a portprofile."""
if (portprofile_data['assignment'] is None):
return dict(portprofile=dict(id=portprofile_data['profile_id'],
name=portprofile_data['profile_name'],
qos_name=portprofile_data['qos_name']))
else:
return dict(portprofile=dict(id=portprofile_data['profile_id'],
name=portprofile_data['profile_name'],
qos_name=portprofile_data['qos_name'],
assignment=portprofile_data['assignment']))
| apache-2.0 |
ryano144/intellij-community | python/helpers/profiler/thriftpy3/TMultiplexedProcessor.py | 44 | 2242 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thriftpy3.Thrift import TProcessor, TMessageType, TException
from thriftpy3.protocol import TProtocolDecorator, TMultiplexedProtocol
class TMultiplexedProcessor(TProcessor):
def __init__(self):
self.services = {}
def registerProcessor(self, serviceName, processor):
self.services[serviceName] = processor
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin();
if type != TMessageType.CALL & type != TMessageType.ONEWAY:
raise TException("TMultiplex protocol only supports CALL & ONEWAY")
index = name.find(TMultiplexedProtocol.SEPARATOR)
if index < 0:
raise TException("Service name not found in message name: " + name + ". Did you forget to use TMultiplexProtocol in your client?")
serviceName = name[0:index]
call = name[index+len(TMultiplexedProtocol.SEPARATOR):]
if not serviceName in self.services:
raise TException("Service name not found: " + serviceName + ". Did you forget to call registerProcessor()?")
standardMessage = (
call,
type,
seqid
)
return self.services[serviceName].process(StoredMessageProtocol(iprot, standardMessage), oprot)
class StoredMessageProtocol(TProtocolDecorator.TProtocolDecorator):
def __init__(self, protocol, messageBegin):
TProtocolDecorator.TProtocolDecorator.__init__(self, protocol)
self.messageBegin = messageBegin
def readMessageBegin(self):
return self.messageBegin
| apache-2.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/rds2/exceptions.py | 163 | 4451 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class InvalidSubnet(JSONResponseError):
pass
class DBParameterGroupQuotaExceeded(JSONResponseError):
pass
class DBSubnetGroupAlreadyExists(JSONResponseError):
pass
class DBSubnetGroupQuotaExceeded(JSONResponseError):
pass
class InstanceQuotaExceeded(JSONResponseError):
pass
class InvalidRestore(JSONResponseError):
pass
class InvalidDBParameterGroupState(JSONResponseError):
pass
class AuthorizationQuotaExceeded(JSONResponseError):
pass
class DBSecurityGroupAlreadyExists(JSONResponseError):
pass
class InsufficientDBInstanceCapacity(JSONResponseError):
pass
class ReservedDBInstanceQuotaExceeded(JSONResponseError):
pass
class DBSecurityGroupNotFound(JSONResponseError):
pass
class DBInstanceAlreadyExists(JSONResponseError):
pass
class ReservedDBInstanceNotFound(JSONResponseError):
pass
class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError):
pass
class InvalidDBSecurityGroupState(JSONResponseError):
pass
class InvalidVPCNetworkState(JSONResponseError):
pass
class ReservedDBInstancesOfferingNotFound(JSONResponseError):
pass
class SNSTopicArnNotFound(JSONResponseError):
pass
class SNSNoAuthorization(JSONResponseError):
pass
class SnapshotQuotaExceeded(JSONResponseError):
pass
class OptionGroupQuotaExceeded(JSONResponseError):
pass
class DBParameterGroupNotFound(JSONResponseError):
pass
class SNSInvalidTopic(JSONResponseError):
pass
class InvalidDBSubnetGroupState(JSONResponseError):
pass
class DBSubnetGroupNotFound(JSONResponseError):
pass
class InvalidOptionGroupState(JSONResponseError):
pass
class SourceNotFound(JSONResponseError):
pass
class SubscriptionCategoryNotFound(JSONResponseError):
pass
class EventSubscriptionQuotaExceeded(JSONResponseError):
pass
class DBSecurityGroupNotSupported(JSONResponseError):
pass
class InvalidEventSubscriptionState(JSONResponseError):
pass
class InvalidDBSubnetState(JSONResponseError):
pass
class InvalidDBSnapshotState(JSONResponseError):
pass
class SubscriptionAlreadyExist(JSONResponseError):
pass
class DBSecurityGroupQuotaExceeded(JSONResponseError):
pass
class ProvisionedIopsNotAvailableInAZ(JSONResponseError):
pass
class AuthorizationNotFound(JSONResponseError):
pass
class OptionGroupAlreadyExists(JSONResponseError):
pass
class SubscriptionNotFound(JSONResponseError):
pass
class DBUpgradeDependencyFailure(JSONResponseError):
pass
class PointInTimeRestoreNotEnabled(JSONResponseError):
pass
class AuthorizationAlreadyExists(JSONResponseError):
pass
class DBSubnetQuotaExceeded(JSONResponseError):
pass
class OptionGroupNotFound(JSONResponseError):
pass
class DBParameterGroupAlreadyExists(JSONResponseError):
pass
class DBInstanceNotFound(JSONResponseError):
pass
class ReservedDBInstanceAlreadyExists(JSONResponseError):
pass
class InvalidDBInstanceState(JSONResponseError):
pass
class DBSnapshotNotFound(JSONResponseError):
pass
class DBSnapshotAlreadyExists(JSONResponseError):
pass
class StorageQuotaExceeded(JSONResponseError):
pass
class SubnetAlreadyInUse(JSONResponseError):
pass
| gpl-3.0 |
todaychi/hue | desktop/core/ext-py/MySQL-python-1.2.5/setup_posix.py | 39 | 3392 | import os, sys
from ConfigParser import SafeConfigParser
# This dequote() business is required for some older versions
# of mysql_config
def dequote(s):
if s[0] in "\"'" and s[0] == s[-1]:
s = s[1:-1]
return s
def compiler_flag(f):
return "-%s" % f
def mysql_config(what):
from os import popen
f = popen("%s --%s" % (mysql_config.path, what))
data = f.read().strip().split()
ret = f.close()
if ret:
if ret/256:
data = []
if ret/256 > 1:
raise EnvironmentError("%s not found" % (mysql_config.path,))
return data
mysql_config.path = "mysql_config"
def get_config():
from setup_common import get_metadata_and_options, enabled, create_release_file
metadata, options = get_metadata_and_options()
if 'mysql_config' in options:
mysql_config.path = options['mysql_config']
extra_objects = []
static = enabled(options, 'static')
if enabled(options, 'embedded'):
libs = mysql_config("libmysqld-libs")
client = "mysqld"
elif enabled(options, 'threadsafe'):
libs = mysql_config("libs_r")
client = "mysqlclient_r"
if not libs:
libs = mysql_config("libs")
client = "mysqlclient"
else:
libs = mysql_config("libs")
client = "mysqlclient"
library_dirs = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("L")) ]
libraries = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("l")) ]
removable_compile_args = [ compiler_flag(f) for f in "ILl" ]
extra_compile_args = [ i.replace("%", "%%") for i in mysql_config("cflags")
if i[:2] not in removable_compile_args ]
# Copy the arch flags for linking as well
extra_link_args = list()
for i in range(len(extra_compile_args)):
if extra_compile_args[i] == '-arch':
extra_link_args += ['-arch', extra_compile_args[i + 1]]
include_dirs = [ dequote(i[2:])
for i in mysql_config('include')
if i.startswith(compiler_flag('I')) ]
if not include_dirs: # fix for MySQL-3.23
include_dirs = [ dequote(i[2:])
for i in mysql_config('cflags')
if i.startswith(compiler_flag('I')) ]
if static:
extra_objects.append(os.path.join(library_dirs[0],'lib%s.a' % client))
if client in libraries:
libraries.remove(client)
name = "MySQL-python"
if enabled(options, 'embedded'):
name = name + "-embedded"
metadata['name'] = name
define_macros = [
('version_info', metadata['version_info']),
('__version__', metadata['version']),
]
create_release_file(metadata)
del metadata['version_info']
ext_options = dict(
name = "_mysql",
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
include_dirs = include_dirs,
extra_objects = extra_objects,
define_macros = define_macros,
)
return metadata, ext_options
if __name__ == "__main__":
sys.stderr.write("""You shouldn't be running this directly; it is used by setup.py.""")
| apache-2.0 |
rhdedgar/openshift-tools | ansible/roles/lib_git/build/src/git_push.py | 13 | 2595 | # pylint: skip-file
class GitPush(GitCLI):
''' Class to wrap the git merge line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
path,
remote,
src_branch,
dest_branch,
ssh_key=None):
''' Constructor for GitPush '''
super(GitPush, self).__init__(path, ssh_key=ssh_key)
self.path = path
self.remote = remote
self.src_branch = src_branch
self.dest_branch = dest_branch
self.debug = []
os.chdir(path)
def checkout_branch(self):
''' check out the desired branch '''
current_branch_results = self._get_current_branch()
if current_branch_results['results'] == self.src_branch:
return True
current_branch_results = self._checkout(self.src_branch)
self.debug.append(current_branch_results)
if current_branch_results['returncode'] == 0:
return True
return False
def remote_update(self):
''' update the git remotes '''
remote_update_results = self._remote_update()
self.debug.append(remote_update_results)
if remote_update_results['returncode'] == 0:
return True
return False
def need_push(self):
''' checks to see if push is needed '''
git_status_results = self._status(show_untracked=False)
self.debug.append(git_status_results)
status_msg = "Your branch is ahead of '%s" %self.remote
if status_msg in git_status_results['results']:
return True
return False
def push(self):
'''perform a git push '''
if not self.src_branch or not self.dest_branch or not self.remote:
return {'returncode': 1,
'results':
'Invalid variables being passed in. Please investigate remote, src_branc, and/or dest_branch',
}
if self.checkout_branch():
if self.remote_update():
if self.need_push():
push_results = self._push(self.remote, self.src_branch, self.dest_branch)
push_results['debug'] = self.debug
return push_results
else:
return {'returncode': 0,
'results': {},
'no_push_needed': True
}
return {'returncode': 1,
'results': {},
'debug': self.debug
}
| apache-2.0 |
daj0ker/BinPy | BinPy/examples/source/ic/Series_7400/IC7404.py | 5 | 1235 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 7404
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 7404:
ic = IC_7404()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {1: 1, 3: 0, 5: 0, 7: 0, 9: 0, 11: 0, 13: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(8, c)
print(c)
| bsd-3-clause |
PieXcoin/PieXcoin | contrib/zmq/zmq_sub.py | 69 | 1726 | #!/usr/bin/env python
import array
import binascii
import zmq
import struct
port = 28332
zmqContext = zmq.Context()
zmqSubSocket = zmqContext.socket(zmq.SUB)
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtxlock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawtxlock")
zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
try:
while True:
msg = zmqSubSocket.recv_multipart()
topic = str(msg[0].decode("utf-8"))
body = msg[1]
sequence = "Unknown";
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == "hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == "hashtx":
print ('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == "hashtxlock":
print('- HASH TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == "rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == "rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == "rawtxlock":
print('- RAW TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
except KeyboardInterrupt:
zmqContext.destroy()
| mit |
robertmattmueller/sdac-compiler | sympy/liealgebras/root_system.py | 24 | 6973 | # -*- coding: utf-8 -*-
from .cartan_type import CartanType
from sympy.core import Basic
class RootSystem(Basic):
"""
Every simple Lie algebra has a unique root system.
To find the root system, we first consider the Cartan subalgebra of g,
which is the maximal abelian subalgebra, and consider the adjoint
action of g on this subalgebra. There is a root system associated
with this action. Now, a root system over a vector space V is a set
of finite vectors Φ(called roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Φ are x and -x
3. For every x in Φ, the set Φ is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Φ, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Φ, which we will call Δ, such that:
1. Δ is a basis of V
2. Each root x in Φ can be written x = Σ k_y y for y in Δ
The elements of Δ are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References: https://en.wikipedia.org/wiki/Root_system
Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""
Creates a new RootSystem object. This method assigns an attribute
called cartan_type to each instance of a RootSystem object. When
an instance of RootSystem is called, it needs an argument, which
should be an instance of a simple Lie algebra. We then take the
CartanType of this argument and set it as the cartan_type attribute
of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""
This method generates and returns the simple roots of the Lie
algebra. The rank of the Lie algebra determines the number of
simple roots that it has. This method obtains the rank of the
Lie algebra, and then uses the simple_root method from the Lie
algebra classes to generate all the simple roots.
Example
====
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""
This method generates all the roots of a given root
system, and stores them in a dictionary where the
keys are integer numbers. It generates the roots
by getting the dictionary of all positive roots from
the bases classes, and then taking each root, and
multiplying it by -1 and adding it to the dictionary.
In this way all the negative roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""
The root space is the vector space spanned by the
simple roots, i.e. it is a vector space with a
distinguished basis, the simple roots. This method
returns a string that represents the root space as
the span of the simple roots, alpha[1],...., alpha[n].
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""
This is a method for adding two simple roots together.
The function takes as input two integers, root1 and root2.
It then uses these integers as keys in the dictionary of
simple roots, and gets the corresponding simple roots, and
then adds them together.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""
This is a method that takes two roots and adds them together
if and only if their sum is also a root. It takes as input
two vectors which should be roots. It then computes their sum
and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying
that the sum is not a root.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""
Return the Cartan matrix of Lie algebra associated
with this root system.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""
Return the Dynkin diagram of the Lie algebra
associated with this root system.
Example
=======
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
| gpl-3.0 |
hermanliang/gitinspector | gitinspector/metrics.py | 48 | 9526 | # coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
from changes import FileDiff
import comment
import filtering
import interval
import re
import subprocess
__metric_eloc__ = {"java": 500, "c": 500, "cpp": 500, "h": 300, "hpp": 300, "php": 500, "py": 500, "glsl": 1000,
"rb": 500, "js": 500, "sql": 1000, "xml": 1000}
__metric_cc_tokens__ = [[["java", "js", "c", "cc", "cpp"], ["else", "for\s+\(.*\)", "if\s+\(.*\)", "case\s+\w+:",
"default:", "while\s+\(.*\)"],
["assert", "break", "continue", "return"]],
[["py"], ["^\s+elif .*:$", "^\s+else:$", "^\s+for .*:", "^\s+if .*:$", "^\s+while .*:$"],
["^\s+assert", "break", "continue", "return"]]]
METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50
METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75
class MetricsLogic:
def __init__(self):
self.eloc = {}
self.cyclomatic_complexity = {}
self.cyclomatic_complexity_density = {}
ls_tree_r = subprocess.Popen("git ls-tree --name-only -r " + interval.get_ref(), shell=True, bufsize=1,
stdout=subprocess.PIPE).stdout
for i in ls_tree_r.readlines():
i = i.strip().decode("unicode_escape", "ignore")
i = i.encode("latin-1", "replace")
i = i.decode("utf-8", "replace").strip("\"").strip("'").strip()
if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):
file_r = subprocess.Popen("git show " + interval.get_ref() + ":\"{0}\"".format(i.strip()),
shell=True, bufsize=1, stdout=subprocess.PIPE).stdout.readlines()
extension = FileDiff.get_extension(i)
lines = MetricsLogic.get_eloc(file_r, extension)
cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension)
if __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines:
self.eloc[i.strip()] = lines
if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc:
self.cyclomatic_complexity[i.strip()] = cycc
if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines):
self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines)
@staticmethod
def get_cyclomatic_complexity(file_r, extension):
is_inside_comment = False
cc_counter = 0
entry_tokens = None
exit_tokens = None
for i in __metric_cc_tokens__:
if extension in i[0]:
entry_tokens = i[1]
exit_tokens = i[2]
if entry_tokens or exit_tokens:
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
for j in entry_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 2
for j in exit_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 1
return cc_counter
return -1
@staticmethod
def get_eloc(file_r, extension):
is_inside_comment = False
eloc_counter = 0
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
eloc_counter += 1
return eloc_counter
ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)")
CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)")
CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_("The following files have an elevated cyclomatic complexity density " \
"(in order of severity)")
METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository")
METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]]
def __get_metrics_score__(ceiling, value):
for i in reversed(METRICS_VIOLATION_SCORES):
if value > ceiling * i[0]:
return i[1]
class Metrics(Outputable):
def output_text(self):
metrics_logic = MetricsLogic()
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".")
if metrics_logic.eloc:
print("\n" + _(ELOC_INFO_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):
print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])))
if metrics_logic.cyclomatic_complexity:
print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True):
print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])))
if metrics_logic.cyclomatic_complexity_density:
print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True):
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]))
def output_html(self):
metrics_logic = MetricsLogic()
metrics_xml = "<div><div class=\"box\" id=\"metrics\">"
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>"
if metrics_logic.eloc:
metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if metrics_logic.cyclomatic_complexity:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if metrics_logic.cyclomatic_complexity_density:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]) + "</div>"
metrics_xml += "</div>"
metrics_xml += "</div></div>"
print(metrics_xml)
def output_xml(self):
metrics_logic = MetricsLogic()
if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
else:
eloc_xml = ""
if metrics_logic.eloc:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):
eloc_xml += "\t\t\t<estimated-lines-of-code>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
if metrics_logic.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True):
eloc_xml += "\t\t\t<cyclomatic-complexity>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity>\n"
if metrics_logic.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0])
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")
| gpl-3.0 |
liangyaozhan/rt-thread | bsp/beaglebone/rtconfig.py | 7 | 2654 | import os
# toolchains options
ARCH='arm'
CPU='am335x'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Program Files (x86)\CodeSourcery\Sourcery_CodeBench_Lite_for_ARM_EABI\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support IAR yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -Wall -march=armv7-a -mtune=cortex-a8'+\
' -ftree-vectorize -ffast-math -mfpu=vfpv3-d16 -mfloat-abi=softfp'
#DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-beaglebone.map,-cref,-u,Reset_Handler -T beaglebone_ram.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMP'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-beaglebone.map --scatter beaglebone_ram.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' --cpu DARMP'
CFLAGS = ''
AFLAGS = ''
LFLAGS = ' --config beaglebone_ram.icf'
EXEC_PATH += '/arm/bin/'
RT_USING_MINILIBC = False
POST_ACTION = ''
| gpl-2.0 |
peterneher/peters-stuff | CaffeScripts/CaffeNetworks/CaffeUNet_2D.py | 1 | 10711 | __author__ = "Peter F. Neher"
import numpy as np
import caffe
from caffe import layers as L
from caffe import params as P
from Layers.DiceLoss import DiceLossLayer
from Layers.DiceIndex import DiceIndexLayer
class CaffeUNet_2D :
def __init__(self, batch_size, num_classes, input_size, data_channels, label_channels, base_n_filters, num_blocks, loss_func='xent', ignore_label=None) :
self.data_channels = data_channels
self.label_channels = label_channels
self.batch_size = batch_size
self.num_classes = num_classes
self.input_size = input_size
self.base_n_filters = base_n_filters
self.num_blocks = num_blocks
self.loss_func = loss_func
self.do_dropout = True
self.kernel_size = 3
self.weight_filler = dict(type='msra')
self.bias_filler = dict(type='constant')
self.net_spec = None
self.param = [dict(lr_mult=1,decay_mult=1),dict(lr_mult=2,decay_mult=0)]
self.ignore_label = ignore_label
self.use_batchnorm = False
self.create_network_structure()
def save_net_spec(self, output_file):
f = open(output_file, 'w')
f.write(str(self.net_spec.to_proto()))
f.close()
def print_all_available_layers(self):
layer_ist = caffe.layer_type_list()
for el in layer_ist:
print el
def print_network_sizes(self, model_file):
net = caffe.Net(model_file, caffe.TRAIN)
for k, v in net.blobs.items():
print k, v.data.shape
def add_contraction_block(self, input, block_number):
if self.use_batchnorm :
l = self.add_batchnormscale(name='contr_' + str(block_number) + '_1', input=L.ReLU(L.Convolution(input, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, block_number-1), weight_filler=self.weight_filler, bias_filler=self.bias_filler, param=self.param), in_place=True))
l = self.add_batchnormscale(name='contr_' + str(block_number) + '_2', input=L.ReLU(L.Convolution( l, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, block_number-1), weight_filler=self.weight_filler, bias_filler=self.bias_filler, param=self.param), in_place=True))
else:
l = self.add_conv(input=input, name='contr_' + str(block_number) + '_1', filter_mult=block_number-1)
l = self.add_conv(input=l, name='contr_' + str(block_number) + '_2', filter_mult=block_number-1)
l = L.Pooling( l, kernel_size=2, stride=2, pool=P.Pooling.MAX)
setattr(self.net_spec, 'pool_' + str(block_number), l)
return l
def add_expansion_block(self, input, block_number):
l = L.Deconvolution(input, convolution_param=dict(num_output=self.base_n_filters * pow(2, block_number), group=self.base_n_filters * pow(2, block_number), kernel_size=2, stride=2, weight_filler=dict(type='constant', value=1), bias_term=False), param = dict(lr_mult=0, decay_mult=0))
setattr(self.net_spec, 'upscale_' + str(block_number), l)
l = L.Concat(l, getattr(self.net_spec, 'contr_' + str(block_number) + '_2'), axis=1)
setattr(self.net_spec, 'concat_' + str(block_number), l)
if self.use_batchnorm:
l = self.add_batchnormscale(name='expand_' + str(block_number) + '_1', input=L.ReLU(L.Convolution(l, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, block_number-1), weight_filler=self.weight_filler, bias_filler=self.bias_filler, param=self.param), in_place=True))
l = self.add_batchnormscale(name='expand_' + str(block_number) + '_2', input=L.ReLU(L.Convolution(l, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, block_number-1), weight_filler=self.weight_filler, bias_filler=self.bias_filler, param=self.param), in_place=True))
else:
l = self.add_conv(input=l, name='expand_' + str(block_number) + '_1', filter_mult=block_number-1)
l = self.add_conv(input=l, name='expand_' + str(block_number) + '_2', filter_mult=block_number-1)
return l
def add_batchnormscale(self, input, name):
if True : # necessary?
batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': True }
param = [dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)]
l = L.BatchNorm(input, name=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
setattr(self.net_spec, name+'_bn', l)
batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': False }
l = L.BatchNorm(input, name=name+'_bn', top=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
setattr(self.net_spec, name+'_bn' + '_train', l)
l = L.Scale(getattr(self.net_spec, name+'_bn'), scale_param = { 'bias_term': True } )
setattr(self.net_spec, name, l)
else : # here without split in use_global_stats True/False
l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
setattr(self.net_spec, name, l)
return l
def add_conv(self, input, name, filter_mult):
l = L.Convolution(input, pad=self.pad, kernel_size=self.kernel_size,
num_output=self.base_n_filters * pow(2, filter_mult),
weight_filler=self.weight_filler)
setattr(self.net_spec, name, l)
return L.ReLU(l, in_place=True)
def create_network_structure(self):
self.pad = (self.kernel_size - 1) / 2
self.net_spec = caffe.NetSpec()
self.net_spec.data = L.Input(ntop=1, input_param={'shape': {'dim': [self.batch_size, self.data_channels, self.input_size, self.input_size]}})
self.net_spec.target = L.Input(ntop=1, input_param={'shape': {'dim': [self.batch_size, self.label_channels, self.input_size, self.input_size]}}, exclude={'stage': 'deploy'})
last_layer = self.net_spec.data
for i in range(1,self.num_blocks+1) :
last_layer = self.add_contraction_block(last_layer, i)
if self.do_dropout:
last_layer = L.Dropout(last_layer, dropout_ratio=0.4, in_place=True)
if self.use_batchnorm:
last_layer = self.add_batchnormscale(name='encode_1', input=L.ReLU(L.Convolution(last_layer, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, self.num_blocks), weight_filler=self.weight_filler), in_place=True))
last_layer = self.add_batchnormscale(name='encode_2', input=L.ReLU(L.Convolution(last_layer, pad=self.pad, kernel_size=self.kernel_size, num_output=self.base_n_filters * pow(2, self.num_blocks), weight_filler=self.weight_filler), in_place=True))
else:
last_layer = self.add_conv(last_layer, name='encode_1', filter_mult=self.num_blocks)
last_layer = self.add_conv(last_layer, name='encode_2', filter_mult=self.num_blocks)
for i in range(1,self.num_blocks+1)[::-1] :
last_layer = self.add_expansion_block(last_layer, i)
self.net_spec.seg = L.Convolution(last_layer, pad=0, kernel_size=1, num_output=self.num_classes, weight_filler=self.weight_filler)
self.net_spec.softmax = L.Softmax(self.net_spec.seg)
self.net_spec.argmax = L.ArgMax(self.net_spec.softmax, axis=1)
self.net_spec.silence = L.Silence(self.net_spec.argmax, ntop=0, include={'phase': caffe.TRAIN})
self.net_spec.target_argmax = L.ArgMax(self.net_spec.target, axis=1, exclude={'stage': 'deploy'})
if self.loss_func=='xent' :
if self.ignore_label is None :
self.net_spec.loss = L.SoftmaxWithLoss(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'})
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'})
else :
self.net_spec.loss = L.SoftmaxWithLoss(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, loss_param={'ignore_label': self.ignore_label})
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, accuracy_param={'ignore_label': self.ignore_label})
elif self.loss_func=='dice' :
if self.ignore_label is None :
self.net_spec.loss = L.Python(self.net_spec.softmax, self.net_spec.target, loss_weight=1, python_param=dict( module='DiceLoss', layer='DiceLossLayer' ), exclude={'stage': 'deploy'} )
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'})
else :
self.net_spec.loss = L.Python(self.net_spec.softmax, self.net_spec.target, loss_weight=1, python_param=dict(module='DiceLoss', layer='DiceLossLayer', param_str="{'param1': " + str(self.ignore_label) + "}"), exclude={'stage': 'deploy'})
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, accuracy_param={'ignore_label': self.ignore_label})
elif self.loss_func=='both' :
if self.ignore_label is None :
self.net_spec.xent_loss = L.SoftmaxWithLoss(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, loss_weight=10)
self.net_spec.loss = L.Python(self.net_spec.softmax, self.net_spec.target, loss_weight=1, python_param=dict( module='DiceLoss', layer='DiceLossLayer' ), exclude={'stage': 'deploy'})
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'})
else :
self.net_spec.xent_loss = L.SoftmaxWithLoss(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, loss_weight=10, loss_param={'ignore_label': self.ignore_label})
self.net_spec.loss = L.Python(self.net_spec.softmax, self.net_spec.target, loss_weight=1, python_param=dict(module='DiceLoss', layer='DiceLossLayer', param_str="{'param1': " + str(self.ignore_label) + "}"), exclude={'stage': 'deploy'})
self.net_spec.accuracy = L.Accuracy(self.net_spec.seg, self.net_spec.target_argmax, exclude={'stage': 'deploy'}, accuracy_param={'ignore_label': self.ignore_label})
self.net_spec.dice = L.Python(self.net_spec.softmax, self.net_spec.target, loss_weight=1, python_param=dict(module='DiceIndex', layer='DiceIndexLayer'), exclude={'stage': 'deploy'}) | gpl-3.0 |
EuropeanSocialInnovationDatabase/ESID-main | TextMining/Classifiers/Trainers/NaiveBayesOutputs.py | 1 | 16411 | from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.pipeline import Pipeline
import numpy as np
import pandas as pd
import re
from os import listdir
from os.path import join,isdir
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
import pickle
from sklearn.utils import resample
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
data_folder = "../../../Helpers/FullDataset_Alina/"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
# job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder + "/" + ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder + "/" + ann + '/' + file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
# doc.Annotations = []
doc.DocumentName = file
Annot.documents.append(doc)
if (file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder + "/" + ann + '/' + file, 'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index + len(line)
l.Text = line
line_index = line_index + len(line) + 1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder + "/" + ann + '/' + file.replace(".txt", ".ann"), 'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+', '', a).replace(' ', ' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann == "ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if (low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (
low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (
low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan <= Ann.StartSpan and line.EndSpan >= Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann) <= 2):
continue
mark = sp_split_ann[2].replace('\n', '')
if (mark_name == "DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark) >= 1:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b" or mark_name == "DL_Objective"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a" or mark_name == "DL_Innovativeness"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark) >= 1:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a" or mark_name == "DL_Actors"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (
doc.Project_Mark_Objective_1A == 0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C == 0 and doc.Project_Mark_Actors_2A == 0
and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2C == 0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A == 0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
i = 0
j = i + 1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i < len(ds.Annotators) - 1:
while j < len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num < len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations) > 0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations) > 0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
print "Statistics for document:" + doc1.DocumentName
print "Annotators " + annotator1.Name + " and " + annotator2.Name
print "Spam by " + annotator1.Name + ":" + str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if (doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam + 1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
kappa_files = kappa_files + 1
j = j + 1
i = i + 1
j = i + 1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append(
[doc.Text, doc.isProjectObjectiveSatisfied, doc.isProjectActorSatisfied, doc.isProjectOutputSatisfied,
doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
df = pd.DataFrame({'text':text_array,'classa':outputs})
df_majority = df[df.classa==0]
df_minority = df[df.classa==1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=160, # to match majority class
random_state=83293) # reproducible results
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
print df_upsampled.classa.value_counts()
train = text_array[0:int(0.8*len(text_array))]
train_Y = outputs[0:int(0.8*len(actors))]
test = text_array[int(0.8*len(text_array)):]
test_Y = outputs[int(0.8*len(actors)):]
#categories = ['non actor', 'actor']
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
final = 0
for score in scores:
final = final + score
print scores
print "Final:" + str(final/10)
text_clf.fit( df_upsampled.text, df_upsampled.classa)
filename = '../Models/naive_bayes_outputs.sav'
pickle.dump(text_clf, open(filename, 'wb')) | gpl-3.0 |
Em-Pan/swift | test/unit/common/middleware/test_healthcheck.py | 35 | 2901 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import healthcheck
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body='FAKE APP')(
env, start_response)
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.disable_path = os.path.join(self.tempdir, 'dont-taze-me-bro')
self.got_statuses = []
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = healthcheck.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses.append(status)
def test_healthcheck(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['OK'])
def test_healtcheck_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['FAKE APP'])
def test_healthcheck_pass_not_disabled(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['OK'])
def test_healthcheck_pass_disabled(self):
open(self.disable_path, 'w')
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEquals(['503 Service Unavailable'], self.got_statuses)
self.assertEquals(resp, ['DISABLED BY FILE'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
wagnermarkd/presto | presto-docs/src/main/sphinx/ext/download.py | 45 | 2206 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# noinspection PyUnresolvedReferences
from docutils import nodes, utils
# noinspection PyUnresolvedReferences
from sphinx.errors import SphinxError
GROUP_ID = 'com.facebook.presto'
ARTIFACTS = {
'server': ('presto-server', 'tar.gz', None),
'cli': ('presto-cli', 'jar', 'executable'),
'jdbc': ('presto-jdbc', 'jar', None),
'verifier': ('presto-verifier', 'jar', 'executable'),
'benchmark-driver': ('presto-benchmark-driver', 'jar', 'executable'),
}
def maven_filename(artifact, version, packaging, classifier):
classifier = '-' + classifier if classifier else ''
return '%s-%s%s.%s' % (artifact, version, classifier, packaging)
def maven_download(group, artifact, version, packaging, classifier):
base = 'https://repo1.maven.org/maven2/'
group_path = group.replace('.', '/')
filename = maven_filename(artifact, version, packaging, classifier)
return base + '/'.join((group_path, artifact, version, filename))
def setup(app):
# noinspection PyDefaultArgument,PyUnusedLocal
def download_link_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
version = app.config.release
if not text in ARTIFACTS:
inliner.reporter.error('Unsupported download type: ' + text)
return [], []
artifact, packaging, classifier = ARTIFACTS[text]
title = maven_filename(artifact, version, packaging, classifier)
uri = maven_download(GROUP_ID, artifact, version, packaging, classifier)
node = nodes.reference(title, title, internal=False, refuri=uri)
return [node], []
app.add_role('maven_download', download_link_role)
| apache-2.0 |
andykimpe/chromium-test-npapi | third_party/tlslite/tlslite/utils/aes.py | 149 | 1064 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Abstract class for AES."""
class AES(object):
def __init__(self, key, mode, IV, implementation):
if len(key) not in (16, 24, 32):
raise AssertionError()
if mode != 2:
raise AssertionError()
if len(IV) != 16:
raise AssertionError()
self.isBlockCipher = True
self.block_size = 16
self.implementation = implementation
if len(key)==16:
self.name = "aes128"
elif len(key)==24:
self.name = "aes192"
elif len(key)==32:
self.name = "aes256"
else:
raise AssertionError()
#CBC-Mode encryption, returns ciphertext
#WARNING: *MAY* modify the input as well
def encrypt(self, plaintext):
assert(len(plaintext) % 16 == 0)
#CBC-Mode decryption, returns plaintext
#WARNING: *MAY* modify the input as well
def decrypt(self, ciphertext):
assert(len(ciphertext) % 16 == 0) | bsd-3-clause |
CalcMan/PokemonGo-Map | docs/conf.py | 19 | 9973 | # -*- coding: utf-8 -*-
#
# PokemonGo-Map documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 2 02:20:19 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Add markdown support
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.md'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PokemonGo-Map'
copyright = u'2016, Pokemon Masters'
author = u'Pokemon Masters'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'3.1'
# The full version, including alpha/beta/rc tags.
release = u'3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'PokemonGo-Map v3.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PokemonGo-Mapdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PokemonGo-Map.tex', u'PokemonGo-Map Documentation',
u'Pokemon Masters', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pokemongo-map', u'PokemonGo-Map Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PokemonGo-Map', u'PokemonGo-Map Documentation',
author, 'PokemonGo-Map', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| agpl-3.0 |
CUCWD/edx-platform | lms/djangoapps/commerce/tests/mocks.py | 17 | 5933 | """ Commerce app tests package. """
import json
import httpretty
from django.conf import settings
from . import factories
# pylint: disable=invalid-name
class mock_ecommerce_api_endpoint(object):
"""
Base class for contextmanagers used to mock calls to api endpoints.
The contextmanager internally activates and deactivates httpretty as
required, therefore it is not advised to use this mock endpoint in
test cases where httpretty is being used directly.
"""
# override this in subclasses.
default_response = None
# override this in subclasses, using one of httpretty's method constants
method = None
host = settings.ECOMMERCE_API_URL.strip('/')
def __init__(self, response=None, status=200, expect_called=True, exception=None, reset_on_exit=True):
"""
Keyword Arguments:
response: a JSON-serializable Python type representing the desired response body.
status: desired HTTP status for the response.
expect_called: a boolean indicating whether an API request was expected; set
to False if we should ensure that no request arrived.
exception: raise this exception instead of returning an HTTP response when called.
reset_on_exit (bool): Indicates if `httpretty` should be reset after the decorator exits.
"""
self.response = response or self.default_response
self.status = status
self.expect_called = expect_called
self.exception = exception
self.reset_on_exit = reset_on_exit
def get_uri(self):
"""
Returns the uri to register with httpretty for this contextmanager.
"""
return self.host + '/' + self.get_path().lstrip('/')
def get_path(self):
"""
Returns the path of the URI to register with httpretty for this contextmanager.
Subclasses must override this method.
Returns:
str
"""
raise NotImplementedError
def _exception_body(self, request, uri, headers): # pylint: disable=unused-argument
"""Helper used to create callbacks in order to have httpretty raise Exceptions."""
raise self.exception # pylint: disable=raising-bad-type
def __enter__(self):
httpretty.enable()
httpretty.register_uri(
self.method,
self.get_uri(),
status=self.status,
body=self._exception_body if self.exception is not None else json.dumps(self.response),
adding_headers={'Content-Type': 'application/json'},
)
def __exit__(self, exc_type, exc_val, exc_tb):
called_if_expected = self.expect_called == (httpretty.last_request().headers != {})
httpretty.disable()
if self.reset_on_exit:
httpretty.reset()
assert called_if_expected
class mock_basket_order(mock_ecommerce_api_endpoint):
""" Mocks calls to E-Commerce API client basket order method. """
default_response = {'number': 1}
method = httpretty.GET
def __init__(self, basket_id, **kwargs):
super(mock_basket_order, self).__init__(**kwargs)
self.basket_id = basket_id
def get_path(self):
return '/baskets/{}/order/'.format(self.basket_id)
class mock_create_refund(mock_ecommerce_api_endpoint):
""" Mocks calls to E-Commerce API client refund creation method. """
default_response = []
method = httpretty.POST
def get_path(self):
return '/refunds/'
class mock_process_refund(mock_ecommerce_api_endpoint):
""" Mocks calls to E-Commerce API client refund process method. """
default_response = []
method = httpretty.PUT
def __init__(self, refund_id, **kwargs):
super(mock_process_refund, self).__init__(**kwargs)
self.refund_id = refund_id
def get_path(self):
return '/refunds/{}/process/'.format(self.refund_id)
class mock_order_endpoint(mock_ecommerce_api_endpoint):
""" Mocks calls to E-Commerce API client basket order method. """
default_response = {'number': 'EDX-100001'}
method = httpretty.GET
def __init__(self, order_number, **kwargs):
super(mock_order_endpoint, self).__init__(**kwargs)
self.order_number = order_number
def get_path(self):
return '/orders/{}/'.format(self.order_number)
class mock_get_orders(mock_ecommerce_api_endpoint):
""" Mocks calls to E-Commerce API client order get method. """
default_response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
),
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
),
]
),
factories.OrderFactory(
lines=[
factories.OrderLineFactory(product=factories.ProductFactory(product_class='Coupon'))
]
),
]
}
method = httpretty.GET
def get_path(self):
return '/orders/'
| agpl-3.0 |
agushuley/django-multihost | gu_multihost/middleware.py | 1 | 1737 | import re
from django.contrib import sites
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponsePermanentRedirect
from django.utils import cache
from django.conf import settings
from . import models, get_default_site, set_default_site, set_current_site
def _process_request(request):
if not get_default_site():
set_default_site(models.Site(
site=sites.models.Site.objects.get(id=settings.SITE_ID)
))
_host = request.META["HTTP_HOST"]
site = None
for host in models.Site.objects.order_by("order").all():
r = re.compile(host.host_regexp)
if r.match(_host):
if host.urls_module:
setattr(request, "urlconf", str(host.urls_module))
else:
if hasattr(request, "urlconf"):
delattr(request, "urlconf")
site = host
break
elif _host.startswith("www.") and r.match(_host[4:]):
path = u'%s%s' % (host.site.domain, request.META["PATH_INFO"],)
if request.META["QUERY_STRING"]:
path = u'%s?%s' % (path, request.META["QUERY_STRING"], )
return HttpResponsePermanentRedirect(path)
if not site:
try:
site = models.Site.objects.get(site__id__exact=settings.SITE_ID)
except ObjectDoesNotExist:
site = get_default_site()
set_current_site(site)
#def process_request
class MultiHostMiddleware:
def __init__(self):
pass
def process_request(self, request):
return _process_request(request)
def process_response(self, request, response):
cache.patch_vary_headers(response, ('Host',))
return response | mit |
jmaas/cobbler | cobbler/modules/managers/genders.py | 2 | 3539 | import distutils.sysconfig
import sys
import os
import time
import cobbler.templar
from cobbler.utils import _
from cobbler.cexceptions import CX
plib = distutils.sysconfig.get_python_lib()
mod_path = "%s/cobbler" % plib
sys.path.insert(0, mod_path)
template_file = "/etc/cobbler/genders.template"
settings_file = "/etc/genders"
def register():
# we should run anytime something inside of cobbler changes.
return "/var/lib/cobbler/triggers/change/*"
def write_genders_file(config, profiles_genders, distros_genders, mgmtcls_genders):
"""
genders file is over-written when manage_genders is set in
/var/lib/cobbler/settings.
"""
templar_inst = cobbler.templar.Templar(config)
try:
f2 = open(template_file, "r")
except:
raise CX(_("error reading template: %s") % template_file)
template_data = ""
template_data = f2.read()
f2.close()
metadata = {
"date": time.asctime(time.gmtime()),
"profiles_genders": profiles_genders,
"distros_genders": distros_genders,
"mgmtcls_genders": mgmtcls_genders
}
templar_inst.render(template_data, metadata, settings_file, None)
def run(api, args, logger):
# do not run if we are not enabled.
if(not api.settings().manage_genders):
return 0
profiles_genders = dict()
distros_genders = dict()
mgmtcls_genders = dict()
# let's populate our dicts
# TODO: the lists that are created here are strictly comma separated.
# /etc/genders allows for host lists that are in the notation
# similar to: node00[01-07,08,09,70-71]
# at some point, need to come up with code to generate these types of lists.
# profiles
for prof in api.profiles():
# create the key
profiles_genders[prof.name] = ""
for system in api.find_system(profile=prof.name, return_list=True):
profiles_genders[prof.name] += system.name + ","
# remove a trailing comma
profiles_genders[prof.name] = profiles_genders[prof.name][:-1]
if(profiles_genders[prof.name] == ""):
profiles_genders.pop(prof.name, None)
# distros
for dist in api.distros():
# create the key
distros_genders[dist.name] = ""
for system in api.find_system(distro=dist.name, return_list=True):
distros_genders[dist.name] += system.name + ","
# remove a trailing comma
distros_genders[dist.name] = distros_genders[dist.name][:-1]
if(distros_genders[dist.name] == ""):
distros_genders.pop(dist.name, None)
# mgmtclasses
for mgmtcls in api.mgmtclasses():
# create the key
mgmtcls_genders[mgmtcls.name] = ""
for system in api.find_system(mgmt_classes=mgmtcls.name, return_list=True):
mgmtcls_genders[mgmtcls.name] += system.name + ","
# remove a trailing comma
mgmtcls_genders[mgmtcls.name] = mgmtcls_genders[mgmtcls.name][:-1]
if(mgmtcls_genders[mgmtcls.name] == ""):
mgmtcls_genders.pop(mgmtcls.name, None)
# the file doesn't exist and for some reason the template engine
# won't create it, so spit out an error and tell the user what to do.
if(not os.path.isfile(settings_file)):
logger.info("Error: " + settings_file + " does not exist.")
logger.info("Please run: touch " + settings_file + " as root and try again.")
return 1
write_genders_file(api._collection_mgr, profiles_genders, distros_genders, mgmtcls_genders)
return 0
| gpl-2.0 |
bswartz/cinder | cinder/coordination.py | 4 | 9966 | # Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Coordination and locking utilities."""
import inspect
import random
import threading
import uuid
import eventlet
from eventlet import tpool
import itertools
from oslo_config import cfg
from oslo_log import log
import six
from tooz import coordination
from tooz import locking
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
LOG = log.getLogger(__name__)
coordination_opts = [
cfg.StrOpt('backend_url',
default='file://$state_path',
help='The backend URL to use for distributed coordination.'),
cfg.FloatOpt('heartbeat',
default=1.0,
help='Number of seconds between heartbeats for distributed '
'coordination.'),
cfg.FloatOpt('initial_reconnect_backoff',
default=0.1,
help='Initial number of seconds to wait after failed '
'reconnection.'),
cfg.FloatOpt('max_reconnect_backoff',
default=60.0,
help='Maximum number of seconds between sequential '
'reconnection retries.'),
]
CONF = cfg.CONF
CONF.register_opts(coordination_opts, group='coordination')
class Coordinator(object):
"""Tooz coordination wrapper.
Coordination member id is created from concatenated
`prefix` and `agent_id` parameters.
:param str agent_id: Agent identifier
:param str prefix: Used to provide member identifier with a
meaningful prefix.
"""
def __init__(self, agent_id=None, prefix=''):
self.coordinator = None
self.agent_id = agent_id or str(uuid.uuid4())
self.started = False
self.prefix = prefix
self._ev = None
self._dead = None
def is_active(self):
return self.coordinator is not None
def start(self):
"""Connect to coordination backend and start heartbeat."""
if not self.started:
try:
self._dead = threading.Event()
self._start()
self.started = True
# NOTE(bluex): Start heartbeat in separate thread to avoid
# being blocked by long coroutines.
if self.coordinator and self.coordinator.requires_beating:
self._ev = eventlet.spawn(
lambda: tpool.execute(self.heartbeat))
except coordination.ToozError:
LOG.exception(_LE('Error starting coordination backend.'))
raise
LOG.info(_LI('Coordination backend started successfully.'))
def stop(self):
"""Disconnect from coordination backend and stop heartbeat."""
if self.started:
self.coordinator.stop()
self._dead.set()
if self._ev is not None:
self._ev.wait()
self._ev = None
self.coordinator = None
self.started = False
def get_lock(self, name):
"""Return a Tooz backend lock.
:param str name: The lock name that is used to identify it
across all nodes.
"""
# NOTE(bluex): Tooz expects lock name as a byte string.
lock_name = (self.prefix + name).encode('ascii')
if self.coordinator is not None:
return self.coordinator.get_lock(lock_name)
else:
raise exception.LockCreationFailed(_('Coordinator uninitialized.'))
def heartbeat(self):
"""Coordinator heartbeat.
Method that every couple of seconds (config: `coordination.heartbeat`)
sends heartbeat to prove that the member is not dead.
If connection to coordination backend is broken it tries to
reconnect every couple of seconds
(config: `coordination.initial_reconnect_backoff` up to
`coordination.max_reconnect_backoff`)
"""
while self.coordinator is not None and not self._dead.is_set():
try:
self._heartbeat()
except coordination.ToozConnectionError:
self._reconnect()
else:
self._dead.wait(cfg.CONF.coordination.heartbeat)
def _start(self):
# NOTE(bluex): Tooz expects member_id as a byte string.
member_id = (self.prefix + self.agent_id).encode('ascii')
self.coordinator = coordination.get_coordinator(
cfg.CONF.coordination.backend_url, member_id)
self.coordinator.start()
def _heartbeat(self):
try:
self.coordinator.heartbeat()
return True
except coordination.ToozConnectionError:
LOG.exception(_LE('Connection error while sending a heartbeat '
'to coordination backend.'))
raise
except coordination.ToozError:
LOG.exception(_LE('Error sending a heartbeat to coordination '
'backend.'))
return False
def _reconnect(self):
"""Reconnect with jittered exponential backoff increase."""
LOG.info(_LI('Reconnecting to coordination backend.'))
cap = cfg.CONF.coordination.max_reconnect_backoff
backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
for attempt in itertools.count(1):
try:
self._start()
break
except coordination.ToozError:
backoff = min(cap, random.uniform(base, backoff * 3))
msg = _LW('Reconnect attempt %(attempt)s failed. '
'Next try in %(backoff).2fs.')
LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
self._dead.wait(backoff)
LOG.info(_LI('Reconnected to coordination backend.'))
COORDINATOR = Coordinator(prefix='cinder-')
class Lock(locking.Lock):
"""Lock with dynamic name.
:param str lock_name: Lock name.
:param dict lock_data: Data for lock name formatting.
:param coordinator: Coordinator class to use when creating lock.
Defaults to the global coordinator.
Using it like so::
with Lock('mylock'):
...
ensures that only one process at a time will execute code in context.
Lock name can be formatted using Python format string syntax::
Lock('foo-{volume.id}, {'volume': ...,})
Available field names are keys of lock_data.
"""
def __init__(self, lock_name, lock_data=None, coordinator=None):
super(Lock, self).__init__(str(id(self)))
lock_data = lock_data or {}
self.coordinator = coordinator or COORDINATOR
self.blocking = True
self.lock = self._prepare_lock(lock_name, lock_data)
def _prepare_lock(self, lock_name, lock_data):
if not isinstance(lock_name, six.string_types):
raise ValueError(_('Not a valid string: %s') % lock_name)
return self.coordinator.get_lock(lock_name.format(**lock_data))
def acquire(self, blocking=None):
"""Attempts to acquire lock.
:param blocking: If True, blocks until the lock is acquired. If False,
returns right away. Otherwise, the value is used as a timeout
value and the call returns maximum after this number of seconds.
:return: returns true if acquired (false if not)
:rtype: bool
"""
blocking = self.blocking if blocking is None else blocking
return self.lock.acquire(blocking=blocking)
def release(self):
"""Attempts to release lock.
The behavior of releasing a lock which was not acquired in the first
place is undefined.
"""
self.lock.release()
def synchronized(lock_name, blocking=True, coordinator=None):
"""Synchronization decorator.
:param str lock_name: Lock name.
:param blocking: If True, blocks until the lock is acquired.
If False, raises exception when not acquired. Otherwise,
the value is used as a timeout value and if lock is not acquired
after this number of seconds exception is raised.
:param coordinator: Coordinator class to use when creating lock.
Defaults to the global coordinator.
:raises tooz.coordination.LockAcquireFailed: if lock is not acquired
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one process will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
Lock name can be formatted using Python format string syntax::
@synchronized('{f_name}-{vol.id}-{snap[name]}')
def foo(self, vol, snap):
...
Available field names are: decorated function parameters and
`f_name` as a decorated function name.
"""
def wrap(f):
@six.wraps(f)
def wrapped(*a, **k):
call_args = inspect.getcallargs(f, *a, **k)
call_args['f_name'] = f.__name__
lock = Lock(lock_name, call_args, coordinator)
with lock(blocking):
return f(*a, **k)
return wrapped
return wrap
| apache-2.0 |
gsmartway/odoo | addons/web/doc/conf.py | 494 | 8552 | # -*- coding: utf-8 -*-
#
# OpenERP Technical Documentation configuration file, created by
# sphinx-quickstart on Fri Feb 17 16:14:06 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
sys.path.insert(0, os.path.abspath('../addons'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.viewcode',
'patchqueue'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenERP Web Developers Documentation'
copyright = u'2012, OpenERP s.a.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.0'
# The full version, including alpha/beta/rc tags.
release = '7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openerp-web-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openerp-web-doc.tex', u'OpenERP Web Developers Documentation',
u'OpenERP s.a.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openerp-web-doc', u'OpenERP Web Developers Documentation',
[u'OpenERP s.a.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenERPWebDocumentation', u'OpenERP Web Developers Documentation',
u'OpenERP s.a.', 'OpenERPWebDocumentation', 'Developers documentation for the openerp-web project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
todo_include_todos = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'openerpserver': ('http://doc.openerp.com/trunk/developers/server', None),
}
| agpl-3.0 |
lakshmi-kannan/st2contrib | packs/github/sensors/github_repository_sensor.py | 6 | 5136 | import eventlet
from github import Github
from st2reactor.sensor.base import PollingSensor
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
DATE_FORMAT_STRING = '%Y-%m-%d %H:%M:%S'
class GithubRepositorySensor(PollingSensor):
EVENT_TYPE_WHITELIST = [
'IssuesEvent', # Triggered when an issue is assigned, unassigned, labeled, unlabeled,
# opened, closed, or reopened
'IssueCommentEvent', # Triggered when an issue comment is created
'ForkEvent', # Triggered when a user forks a repository,
'WatchEvent' # Triggered when a user stars a repository
]
def __init__(self, sensor_service, config=None, poll_interval=None):
super(GithubRepositorySensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'github.repository_event'
self._logger = self._sensor_service.get_logger(__name__)
self._client = None
self._repositories = []
self._last_event_ids = {}
def setup(self):
self._client = Github(self._config['token'])
for repository_dict in self._config['repository_sensor']['repositories']:
user = self._client.get_user(repository_dict['user'])
repository = user.get_repo(repository_dict['name'])
self._repositories.append((repository_dict['name'], repository))
def poll(self):
for repository_name, repository_obj in self._repositories:
self._logger.debug('Processing repository "%s"' %
(repository_name))
self._process_repository(name=repository_name,
repository=repository_obj)
def _process_repository(self, name, repository):
"""
Retrieve events for the provided repository and dispatch triggers for
new events.
:param name: Repository name.
:type name: ``str``
:param repository: Repository object.
:type repository: :class:`Repository`
"""
assert(isinstance(name, basestring))
count = self._config['repository_sensor']['count']
events = repository.get_events()[:count]
events = list(reversed(list(events)))
last_event_id = self._get_last_id(name=name)
for event in events:
if last_event_id and int(event.id) <= int(last_event_id):
# This event has already been processed
continue
self._handle_event(repository=name, event=event)
if events:
self._set_last_id(name=name, last_id=events[-1].id)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self, name):
"""
:param name: Repository name.
:type name: ``str``
"""
if not self._last_event_ids.get(name, None) and hasattr(self._sensor_service, 'get_value'):
key_name = 'last_id.%s' % (name)
self._last_event_ids[name] = self._sensor_service.get_value(name=key_name)
return self._last_event_ids.get(name, None)
def _set_last_id(self, name, last_id):
"""
:param name: Repository name.
:type name: ``str``
"""
self._last_event_ids[name] = last_id
if hasattr(self._sensor_service, 'set_value'):
key_name = 'last_id.%s' % (name)
self._sensor_service.set_value(name=key_name, value=last_id)
def _handle_event(self, repository, event):
if event.type not in self.EVENT_TYPE_WHITELIST:
self._logger.debug('Skipping ignored event (type=%s)' % (event.type))
return
self._dispatch_trigger_for_event(repository=repository, event=event)
def _dispatch_trigger_for_event(self, repository, event):
trigger = self._trigger_ref
created_at = event.created_at
if created_at:
created_at = created_at.strftime(DATE_FORMAT_STRING)
# Common attributes
payload = {
'repository': repository,
'id': event.id,
'created_at': created_at,
'type': event.type,
'actor': {
'id': event.actor.id,
'login': event.actor.login,
'name': event.actor.name,
'email': event.actor.email,
'loaction': event.actor.location,
'bio': event.actor.bio,
'url': event.actor.html_url
},
'payload': {}
}
event_specific_payload = self._get_payload_for_event(event=event)
payload['payload'] = event_specific_payload
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_payload_for_event(self, event):
payload = event.payload or {}
return payload
| apache-2.0 |
ESS-LLP/erpnext | erpnext/projects/doctype/activity_cost/activity_cost.py | 112 | 1403 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class DuplicationError(frappe.ValidationError): pass
class ActivityCost(Document):
def validate(self):
self.set_title()
self.check_unique()
def set_title(self):
if self.employee:
if not self.employee_name:
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
self.title = _("{0} for {1}").format(self.employee_name, self.activity_type)
else:
self.title = self.activity_type
def check_unique(self):
if self.employee:
if frappe.db.sql("""select name from `tabActivity Cost` where employee_name= %s and activity_type= %s and name != %s""",
(self.employee_name, self.activity_type, self.name)):
frappe.throw(_("Activity Cost exists for Employee {0} against Activity Type - {1}")
.format(self.employee, self.activity_type), DuplicationError)
else:
if frappe.db.sql("""select name from `tabActivity Cost` where ifnull(employee, '')='' and activity_type= %s and name != %s""",
(self.activity_type, self.name)):
frappe.throw(_("Default Activity Cost exists for Activity Type - {0}")
.format(self.activity_type), DuplicationError)
| gpl-3.0 |
bzennn/blog_flask | python/lib/python3.5/site-packages/setuptools/py36compat.py | 313 | 2891 | import sys
from distutils.errors import DistutilsOptionError
from distutils.util import strtobool
from distutils.debug import DEBUG
class Distribution_parse_config_files:
"""
Mix-in providing forward-compatibility for functionality to be
included by default on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils.
"""
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser(interpolation=None)
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
if sys.version_info < (3,):
# Python 2 behavior is sufficient
class Distribution_parse_config_files:
pass
if False:
# When updated behavior is available upstream,
# disable override here.
class Distribution_parse_config_files:
pass
| gpl-3.0 |
amar-sharma/selenium | py/selenium/webdriver/ie/webdriver.py | 71 | 2263 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import utils
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.command import Command
from selenium.common.exceptions import WebDriverException
import base64
from .service import Service
DEFAULT_TIMEOUT = 30
DEFAULT_PORT = 0
DEFAULT_HOST = None
DEFAULT_LOG_LEVEL = None
DEFAULT_LOG_FILE = None
class WebDriver(RemoteWebDriver):
def __init__(self, executable_path='IEDriverServer.exe', capabilities=None,
port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, host=DEFAULT_HOST,
log_level=DEFAULT_LOG_LEVEL, log_file=DEFAULT_LOG_FILE):
self.port = port
if self.port == 0:
self.port = utils.free_port()
self.host = host
self.log_level = log_level
self.log_file = log_file
self.iedriver = Service(executable_path, port=self.port,
host=self.host, log_level=self.log_level, log_file=self.log_file)
self.iedriver.start()
if capabilities is None:
capabilities = DesiredCapabilities.INTERNETEXPLORER
RemoteWebDriver.__init__(
self,
command_executor='http://localhost:%d' % self.port,
desired_capabilities=capabilities)
self._is_remote = False
def quit(self):
RemoteWebDriver.quit(self)
self.iedriver.stop()
| apache-2.0 |
robclewley/fovea | fovea/common.py | 2 | 7430 | """
Common utilities for fovea diagnostics.
"""
from __future__ import division, absolute_import
from numpy import tan
import os, sys
import PyDSTool as dst
from PyDSTool.Toolbox.phaseplane import bisection
class gen_versioner(object):
_targetlangs = {'dopri': 'c', 'radau': 'c',
'vode': 'python', 'euler': 'python'}
def __init__(self, cwd, model_name, name_base, gen_type, gen_version=0):
"""
Internal utility to manage versions of Generator objects within single
session.
cwd = current working directory (string)
Option to set known gen version # to reuse:
Version 0 means not yet created.
Works across saved and restarted sessions
"""
self.cwd = cwd
self.model_name = model_name
self.name_base = name_base
self.gen_version = gen_version
self.gen_type = gen_type
# keyed by version
self.used_dsargs = {}
self.logfile = os.path.join(self.cwd, 'models', 'gen_dsargs_log.sav')
if os.path.exists(self.logfile):
# reload previous list of versions
self.used_dsargs = dst.loadObjects(self.logfile)[0]
self.classes = {'vode': dst.Generator.Vode_ODEsystem,
'dopri': dst.Generator.Dopri_ODEsystem,
'radau': dst.Generator.Radau_ODEsystem,
'euler': dst.Generator.Euler_ODEsystem}
self.targetlang = self._targetlangs[gen_type]
def make(self, dsargs):
# use cache if available
for gen_ver, prev_dsargs in self.used_dsargs.items():
if dst.filteredDict(dsargs, 'name', True) == dst.filteredDict(prev_dsargs,
'name', True):
# compare everything but the name, but check all up to final '_ver<X>'
parts1 = dsargs.name.split('_')
parts2 = prev_dsargs.name.split('_') # will have one more part
if parts1 == parts2[:-2]:
print("Reusing identical build")
return dst.loadObjects(os.path.join(self.cwd, 'models',
prev_dsargs.name+'.sav'))[0]
# no matches
return self.build(dsargs)
def build(self, dsargs, is_stiff=False):
# re-compute in case gen type has been changed
self.targetlang = self._targetlangs[self.gen_type]
if is_stiff and self.targetlang == 'python' and self.gen_type == 'vode':
dsargs.algparams['stiff'] = True
name = dsargs.name
if self.gen_version == 0:
self.gen_version = 1
# assume it's sufficient to check if .sav file there rather than .so
found_new = False
while not found_new:
filename = os.path.join(self.cwd, 'models', name + '_' + \
self.gen_type + \
'_ver%i'%self.gen_version+'.sav')
if not os.path.exists(filename):
found_new = True
else:
print(filename + ' already exists')
self.gen_version += 1
dsargs.name = name+'_'+self.gen_type+'_ver%i'%self.gen_version
gen = self.classes[self.gen_type](dsargs)
model = dst.embed(gen, name=self.model_name, dsi_name='gen')
self.used_dsargs[self.gen_version] = dsargs.copy()
self.save_gen(model, name)
return model
def load_gen(self, name):
if self.gen_version == 0:
raise ValueError("No current version known: set gen_version")
return dst.loadObjects(os.path.join(self.cwd, 'models', name + '_' + \
self.gen_type + \
'_ver%i'%self.gen_version+'.sav'))[0]
def save_gen(self, model, name):
dst.saveObjects(model, os.path.join(self.cwd, 'models', name + '_' + \
self.gen_type + \
'_ver%i'%self.gen_version+'.sav'))
dst.saveObjects(self.used_dsargs, self.logfile, force=True)
def fdiff(pts, ix):
"""Simple forward finite difference utility:
Calculates approx to derivative of all coordinates in a pointset at index ix,
using the time values in the pointset (returning a Point)
"""
a = pts[ix]
b = pts[ix+1]
ts = pts.indepvararray
ta = ts[ix]
tb = ts[ix+1]
return (b-a)/(tb-ta)
class queue(object):
"""
Create a queue of items that has a limited size.
As more items than the limit are added, oldest items are
removed from the end of the queue.
"""
def __init__(self, size, data=None):
self.size = size
if data is None:
self.data = []
elif type(data) is not list:
raise TypeError('queue object must be a list')
else:
assert len(data) <= size, 'Initial data must be smaller than queue size'
self.data = data
def __call__(self, i=None):
if i is None:
ret = self.data
else:
ret = self.data[i]
return ret
def append(self, item):
if len(self.data) < self.size:
#print(len(self.data))
self.data.append(item)
return []
else:
self.data.append(item)
extra = len(self.data) - self.size
popped = []
for i in range(extra):
popped.append(self.data.pop(i))
return popped
class hashedQ(object):
"""
Class to create a queue that has a lookup table.
"""
def __init__(self, size):
self.size = size
self.keys = []
self.data = []
def __call__(self, key=None):
if key is None:
ret = {}
for key in self.keys:
ix = self.keys.index(key)
ret.update({key: self.data[ix]})
else:
ix = self.keys.index(key)
ret = self.data[ix]
return ret
def append(self, item):
"""Appended item must be a mapping object (dict-like)
"""
for key in item:
self.keys.append(key)
self.data.append(item[key])
if len(self.keys) > self.size:
extra = len(self.keys) - self.size
for i in range(extra):
self.keys.pop(i)
self.data.pop(i)
return False
return True
def checked_scale(sc):
"""Internal utility to verify linear scale syntax:
None or [None, (ylo,yhi)] or [(xlo,xhi), None]
of [(xlo,xhi), (ylo,yhi)]
"""
if sc is not None:
if len(sc) != 2:
raise ValueError("Invalid argument for axis scales")
if sc[0] is not None:
if len(sc[0]) != 2:
raise ValueError("X-axis scale must have two components")
if sc[1] is not None:
if len(sc[1]) != 2:
raise ValueError("Y-axis scale must have two components")
return sc
def castNullArray(null):
"""
Convert nullcline array data to a list of pairs of arrays usable by plotter.
"""
return [null[:,0], null[:,1]]
def castNull(null):
"""
Convert nullcline object to a set of points usable by plotter.
"""
return [null.array[:,0], null.array[:,1]]
| bsd-3-clause |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/contrib/humanize/tests.py | 100 | 9960 | from __future__ import unicode_literals
import datetime
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.contrib.humanize.templatetags import humanize
from django.template import Template, Context, defaultfilters
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.html import escape
from django.utils.timezone import utc
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils import tzinfo
from django.utils.unittest import skipIf
# Mock out datetime in some tests so they don't fail occasionally when they
# run too slow. Use a fixed datetime for datetime.now(). DST change in
# America/Chicago (the default time zone) happened on March 11th in 2012.
now = datetime.datetime(2012, 3, 9, 22, 30)
class MockDateTime(datetime.datetime):
@classmethod
def now(self, tz=None):
if tz is None or tz.utcoffset(now) is None:
return now
else:
# equals now.replace(tzinfo=utc)
return now.replace(tzinfo=tz) + tz.utcoffset(now)
class HumanizeTests(TestCase):
def humanize_tester(self, test_list, result_list, method):
# Using max below ensures we go through both lists
# However, if the lists are not equal length, this raises an exception
for test_content, result in zip(test_list, result_list):
t = Template('{%% load humanize %%}{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result),
msg="%s test failed, produced '%s', should've produced '%s'" % (method, rendered, result))
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567',
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
None)
self.humanize_tester(test_list, result_list, 'intcomma')
def test_l10n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567',
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=False):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override('ja'):
with self.settings(USE_L10N=True):
self.humanize_tester([100], ['100'], 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000',
'1300000000000000', '3500000000000000000000',
'8100000000000000000000000000000000', None)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion',
'1.3 quadrillion', '3.5 sextillion',
'8.1 decillion', None)
self.humanize_tester(test_list, result_list, 'intword')
def test_i18n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', None)
result_list = ('100', '1.000', '10.123', '10.311', '1.000.000', '1.234.567,25',
'100', '1.000', '10.123', '10.311', '1.000.000', None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_i18n_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000')
result_list = ('100', '1,0 Million', '1,2 Millionen', '1,3 Millionen',
'1,0 Milliarde', '2,0 Milliarden', '6,0 Billionen')
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = ('one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', '10', None)
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_('today'), _('yesterday'), _('tomorrow'),
someday_result, "I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = tzinfo.FixedOffset(datetime.timedelta(hours=-12))
tz_two = tzinfo.FixedOffset(datetime.timedelta(hours=12))
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
@skipIf(settings.TIME_ZONE != "America/Chicago" and pytz is None,
"this test requires pytz when a non-default time zone is set")
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=utc)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
self.humanize_tester([dt], ['yesterday'], 'naturalday')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
now,
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=utc),
]
result_list = [
'now',
'a second ago',
'30 seconds ago',
'a minute ago',
'2 minutes ago',
'an hour ago',
'23 hours ago',
'1 day ago',
'1 year, 4 months ago',
'a second from now',
'30 seconds from now',
'a minute from now',
'2 minutes from now',
'an hour from now',
'23 hours from now',
'1 day from now',
'2 days, 6 hours from now',
'1 year, 4 months from now',
'now',
'now',
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == '2 days, 6 hours from now'
result_list_with_tz_support[-4] == '2 days, 5 hours from now'
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
self.humanize_tester(test_list, result_list, 'naturaltime')
with override_settings(USE_TZ=True):
self.humanize_tester(test_list, result_list_with_tz_support, 'naturaltime')
finally:
humanize.datetime = orig_humanize_datetime
| mit |
ge0rgi/cinder | cinder/volume/drivers/nexenta/nfs.py | 5 | 34331 | # Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import re
import six
from eventlet import greenthread
from oslo_log import log as logging
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import interface
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.3.1'
LOG = logging.getLogger(__name__)
@interface.volumedriver
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
.. code-block:: none
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
1.1.3 - Redefined volume_backend_name attribute inherited from
RemoteFsDriver.
1.2.0 - Added migrate and retype methods.
1.3.0 - Extend volume method.
1.3.1 - Cache capacity info and check shared folders on setup.
"""
driver_prefix = 'nexenta'
volume_backend_name = 'NexentaNfsDriver'
VERSION = VERSION
VOLUME_FILE_NAME = 'volume'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Nexenta_CI"
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes
self._nms2volroot = {}
self.share2nms = {}
self.nfs_versions = {}
self.shares_with_capacities = {}
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
shares_config = getattr(self.configuration, self.driver_prefix +
'_shares_config')
if shares_config:
self.configuration.nfs_shares_config = shares_config
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(shares_config)
self._mount_subfolders()
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
if (folder not in nms.netstorsvc.get_shared_folders(
'svc:/network/nfs/server:default', '')):
self._share_folder(nms, volume_name, dataset)
self._get_capacity_info(nfs_share)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
LOG.warning(_LW("Volume status must be 'available' or 'retyping'."
" Current volume status: %s"), volume['status'])
return false_ret
if 'capabilities' not in host:
LOG.warning(_LW("Unsupported host. No capabilities found"))
return false_ret
capabilities = host['capabilities']
ns_shares = capabilities['ns_shares']
dst_parts = capabilities['location_info'].split(':')
dst_host, dst_volume = dst_parts[1:]
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
nms = self.share2nms[volume['provider_location']]
ssh_bindings = nms.appliance.ssh_list_bindings()
shares = []
for bind in ssh_bindings:
for share in ns_shares:
if (share.startswith(ssh_bindings[bind][3]) and
ns_shares[share] >= volume['size']):
shares.append(share)
if len(shares) == 0:
LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
"SSH-bound."), share)
return false_ret
share = sorted(shares, key=ns_shares.get, reverse=True)[0]
snapshot = {
'volume_name': volume['name'],
'volume_id': volume['id'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
location = volume['provider_location']
src = '%(share)s/%(volume)s@%(snapshot)s' % {
'share': location.split(':')[1].split('volumes/')[1],
'volume': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])
try:
nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self._get_nms_for_url(capabilities['nms_url'])
dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],
volume['name'], snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
return True, {'provider_location': share}
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
model_update = None
src_backend = self.__class__.__name__
dst_backend = host['capabilities']['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning(_LW('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.'),
{
'src_backend': src_backend,
'dst_backend': dst_backend
})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
provider_location = volume['provider_location']
nms = self.share2nms[provider_location]
else:
nms_url = host['capabilities']['nms_url']
nms = self._get_nms_for_url(nms_url)
model_update = provider_location
provider_location = provider_location['provider_location']
share = provider_location.split(':')[1].split('volumes/')[1]
folder = '%(share)s/%(volume)s' % {
'share': share,
'volume': volume['name']
}
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.folder.set_child_prop(
folder, options[opt], new)
retyped = True
except exception.NexentaException:
LOG.error(_LE('Error trying to change %(opt)s'
' from %(old)s to %(new)s'),
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, model_update
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_dataset_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
folder_path = '%s/%s' % (vol, folder)
compression = nms.folder.get_child_prop(
folder_path, 'compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set_child_prop(
folder_path, 'compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set_child_prop(
folder_path, 'compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
self._get_capacity_info(nfs_share)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
if (('size' in volume) and (
volume['size'] > snapshot['volume_size'])):
self.extend_volume(volume, volume['size'])
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'volume_size': src_vref['size'],
'name': self._get_clone_snapshot_name(volume)}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
return self.create_volume_from_snapshot(volume, snapshot)
except exception.NexentaException:
LOG.error(_LE('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (exception.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
nfs_share = volume.get('provider_location')
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
mount_path = self.remote_path(volume).strip(
'/%s' % self.VOLUME_FILE_NAME)
if mount_path in self._remotefsclient._read_mounts():
self._execute('umount', mount_path, run_as_root=True)
try:
props = nms.folder.get_child_props(folder, 'origin') or {}
nms.folder.destroy(folder, '-r')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Folder %s does not exist, it was '
'already deleted.'), folder)
return
raise
self._get_capacity_info(nfs_share)
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
try:
nms.snapshot.destroy(origin, '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it was '
'already deleted.'), origin)
return
raise
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
volume_path = self.remote_path(volume)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, new_size)
else:
block_size_mb = 1
block_count = ((new_size - volume['size']) * units.Gi /
(block_size_mb * units.Mi))
nms.appliance.execute(
'dd if=/dev/zero seek=%(seek)d of=%(path)s'
' bs=%(bs)dM count=%(count)d' % {
'seek': volume['size'] * units.Gi / block_size_mb,
'path': volume_path,
'bs': block_size_mb,
'count': block_count
}
)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not '
'exist, it was already deleted.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
elif 'has dependent clones' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent '
'clones, it will be deleted later.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
nms.appliance.execute(
'truncate --size %(size)dG %(path)s' % {
'path': path,
'size': size
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_LI('Creating regular file: %s.'
'This may take some time.'), path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_LI('Regular file: %s created.'), path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nfs_share = nfs_share.encode('utf-8')
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = re.split(r'\s+', share, 2)
share_address = share_info[0].strip()
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
if not re.match(r'.+:/.+', share_address):
LOG.warning(_LW("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug('Shares loaded: %s', self.shares)
def _get_subshare_mount_point(self, nfs_share, volume):
mnt_path = '%s/%s' % (
self._get_mount_point_for_share(nfs_share), volume['name'])
sub_share = '%s/%s' % (nfs_share, volume['name'])
return sub_share, mnt_path
def _ensure_share_mounted(self, nfs_share, mount_path=None):
"""Ensure that NFS share is mounted on the host.
Unlike the parent method this one accepts mount_path as an optional
parameter and uses it as a mount point if provided.
:param nfs_share: NFS share name
:param mount_path: mount path on the host
"""
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
num_attempts = max(1, self.configuration.nfs_mount_attempts)
for attempt in range(num_attempts):
try:
if mount_path is None:
self._remotefsclient.mount(nfs_share, mnt_flags)
else:
if mount_path in self._remotefsclient._read_mounts():
LOG.info(_LI('Already mounted: %s'), mount_path)
return
self._execute('mkdir', '-p', mount_path,
check_exit_code=False)
self._remotefsclient._mount_nfs(nfs_share, mount_path,
mnt_flags)
return
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(six.text_type(e))
LOG.warning(
_LW('Mount attempt %(attempt)d failed: %(error)s. '
'Retrying mount ...'), {
'attempt': attempt,
'error': e})
greenthread.sleep(1)
def _mount_subfolders(self):
ctxt = context.get_admin_context()
vol_entries = self.db.volume_get_all_by_host(ctxt, self.host)
for vol in vol_entries:
nfs_share = vol['provider_location']
if ((nfs_share in self.shares) and
(self._get_nfs_server_version(nfs_share) < 4)):
sub_share, mnt_path = self._get_subshare_mount_point(
nfs_share, vol)
self._ensure_share_mounted(sub_share, mnt_path)
def _get_nfs_server_version(self, share):
if not self.nfs_versions.get(share):
nms = self.share2nms[share]
nfs_opts = nms.netsvc.get_confopts(
'svc:/network/nfs/server:default', 'configure')
try:
self.nfs_versions[share] = int(
nfs_opts['nfs_server_versmax']['current'])
except KeyError:
self.nfs_versions[share] = int(
nfs_opts['server_versmax']['current'])
return self.nfs_versions[share]
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder),
'used|available')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
self.shares_with_capacities[nfs_share] = {
'free': utils.str2gib_size(free),
'total': utils.str2gib_size(free + allocated)}
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
def _get_clone_snapshot_name(self, volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
def _is_clone_snapshot_name(self, snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
total_space = 0
free_space = 0
share = None
for _share in self._mounted_shares:
if self.shares_with_capacities[_share]['free'] > free_space:
free_space = self.shares_with_capacities[_share]['free']
total_space = self.shares_with_capacities[_share]['total']
share = _share
location_info = '%(driver)s:%(share)s' % {
'driver': self.__class__.__name__,
'share': share
}
nms_url = self.share2nms[share].url
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'nms_url': nms_url,
'ns_shares': self.shares_with_capacities,
'driver_version': self.VERSION,
'storage_protocol': 'NFS',
'total_capacity_gb': total_space,
'free_capacity_gb': free_space,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'location_info': location_info,
'volume_backend_name': self.backend_name,
'nfs_mount_point_base': self.nfs_mount_point_base
}
| apache-2.0 |
Alwnikrotikz/pygowave-server | pygowave_server/common/pycow/decorators.py | 5 | 3006 |
#
# PyCow - Python to JavaScript with MooTools translator
# Copyright 2009 Patrick Schneider <patrick.p2k.schneider@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This file contains some compatibility decorators which implement parts of
# MooTool's functionality.
#
import copy
from types import ClassType, FunctionType
__all__ = ["Implements", "Class"]
def Implements(*args):
"""
This class decorator is used to implement methods of given classes into the
decorated class. Your class needs a `implement` method for this to work.
See the `Class` decorator.
"""
def do_implement(target):
for cls in args:
target.implement(cls)
return target
return do_implement
@classmethod
def implement(cls, properties):
"""
Implements the passed in properties into the base Class, altering the
base Class.
The same as creating a new Class with the Implements decorator, but
handy when you need to modify existing classes.
Note: `properties` must be either a dictionary or a class.
"""
oldstyle = isinstance(cls, ClassType)
if isinstance(properties, type):
properties = properties.__dict__
for name, value in properties.iteritems():
if name.startswith("__") and name.endswith("__"): continue
if oldstyle:
cls.__dict__[name] = value
else:
setattr(cls, name, value)
return cls
def parent(self, *args):
"""
Call the parent class' method. This uses a stacktrace and thus may not be
very efficient. Use super() instead.
"""
import traceback
getattr(super(self.__class__, self), traceback.extract_stack(None, 2)[0][2])(*args)
def Class(cls):
"""
This class decorator patches some things in your classes for MooTools
compatibility. Namely it modifies the constructor to create shallow copies
of class-bound properties (to reflect MooTools' behaviour) and adds a
`implement` and `parent` method to the class.
"""
__orig_init__ = cls.__init__
def __moo__init__(self, *args, **kwargs):
"""
MooTools compatibility constructor. Makes shallow copies of all class
properties and calls the original constructor.
"""
for name, value in self.__class__.__dict__.iteritems():
if name.startswith("__") and name.endswith("__"): continue
if isinstance(value, FunctionType): continue
setattr(self, name, copy.copy(value))
__orig_init__(self, *args, **kwargs)
cls.__init__ = __moo__init__
if isinstance(cls, ClassType):
cls.__dict__["implement"] = implement
else:
cls.implement = implement
cls.parent = parent
return cls
| apache-2.0 |
wkrzemien/DIRAC | Core/Utilities/Graphs/BarGraph.py | 5 | 5858 | """ BarGraph represents bar graphs with vertical bars both simple
and stacked.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
import datetime
from pylab import setp
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pixelToPoint, \
PrettyDateLocator, PrettyDateFormatter, \
PrettyScalarFormatter
__RCSID__ = "$Id$"
class BarGraph( PlotBase ):
"""
The BarGraph class is a straightforward bar graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
if 'span' in self.prefs:
self.width = self.prefs['span']
else:
self.width = 1.0
if self.gdata.key_type == "time":
# Try to guess the time bin span
nKeys = self.gdata.getNumberOfKeys()
self.width = (max(self.gdata.all_keys)-min(self.gdata.all_keys))/(nKeys-1)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []
tmp_y = []
# Evaluate the bar width
width = float(self.width)
if self.gdata.key_type == 'time':
#width = (1 - self.bar_graph_space) * width / 86400.0
width = width / 86400.0
offset = 0
elif self.gdata.key_type == 'string':
self.bar_graph_space = 0.1
width = (1 - self.bar_graph_space) * width
offset = self.bar_graph_space / 2.0
else:
offset = 0
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
if 'log_yaxis' in self.prefs:
tmp_b = [0.001]*nKeys
ymin = 0.001
else:
tmp_b = [0.]*nKeys
ymin = 0.
self.polygons = []
self.lines = []
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
seq_b = [(self.gdata.max_num_key+width,0.0),(self.gdata.min_num_key,0.0)]
zorder = 0.0
dpi = self.prefs.get('dpi',100)
for label,num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
tmp_t = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.
tmp_x.append( offset+key )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_x.append( offset+key )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
tmp_y.append( float(value)+tmp_b[ind] )
tmp_x.append( offset+key+width )
#tmp_y.append( ymin )
tmp_y.append( 0.001 )
tmp_t.append(float(value)+tmp_b[ind])
ind += 1
seq_t = zip(tmp_x,tmp_y)
seq = seq_t+seq_b
poly = Polygon( seq, facecolor=color, fill=True,
linewidth=pixelToPoint(0.2,dpi),
zorder=zorder)
self.ax.add_patch( poly )
self.polygons.append( poly )
tmp_b = list(tmp_t)
zorder -= 0.1
tight_bars_flag = self.prefs.get('tight_bars',False)
if tight_bars_flag:
setp( self.polygons, linewidth=0. )
#pivots = keys
#for idx in range(len(pivots)):
# self.coords[ pivots[idx] ] = self.bars[idx]
ymax = max(tmp_b)
ymax *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
else:
ymin = min( tmp_b, 0. )
ymin *= 1.1
xmax=max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax+offset )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
#ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
#labels = ax.get_xticklabels()
else:
return None
| gpl-3.0 |
Baaaaam/cyCLASS | install.py | 20 | 4950 | #! /usr/bin/env python
import os
import sys
import subprocess
import shutil
try:
import argparse as ap
except ImportError:
import pyne._argparse as ap
absexpanduser = lambda x: os.path.abspath(os.path.expanduser(x))
def check_windows_cmake(cmake_cmd):
if os.name == 'nt':
files_on_path = set()
for p in os.environ['PATH'].split(';')[::-1]:
if os.path.exists(p):
files_on_path.update(os.listdir(p))
if 'cl.exe' in files_on_path:
pass
elif 'sh.exe' in files_on_path:
cmake_cmd += ['-G "MSYS Makefiles"']
elif 'gcc.exe' in files_on_path:
cmake_cmd += ['-G "MinGW Makefiles"']
cmake_cmd = ' '.join(cmake_cmd)
def install(args):
if not os.path.exists(args.build_dir):
os.mkdir(args.build_dir)
elif args.clean_build:
shutil.rmtree(args.build_dir)
os.mkdir(args.build_dir)
root_dir = os.path.split(__file__)[0]
makefile = os.path.join(args.build_dir, 'Makefile')
if not os.path.exists(makefile):
rtn = subprocess.call(['which', 'cmake'], shell=(os.name == 'nt'))
if rtn != 0:
sys.exit("CMake could not be found, "
"please install CMake before developing Cyclus.")
cmake_cmd = ['cmake', os.path.abspath(root_dir)]
if args.prefix:
cmake_cmd += ['-DCMAKE_INSTALL_PREFIX=' +
absexpanduser(args.prefix)]
if args.cmake_prefix_path:
cmake_cmd += ['-DCMAKE_PREFIX_PATH=' +
absexpanduser(args.cmake_prefix_path)]
if args.coin_root:
cmake_cmd += ['-DCOIN_ROOT_DIR=' + absexpanduser(args.coin_root)]
if args.cyclus_root:
cmake_cmd += ['-DCYCLUS_ROOT_DIR='+absexpanduser(args.cyclus_root)]
if args.boost_root:
cmake_cmd += ['-DBOOST_ROOT=' + absexpanduser(args.boost_root)]
if args.build_type:
cmake_cmd += ['-DCMAKE_BUILD_TYPE=' + args.build_type]
check_windows_cmake(cmake_cmd)
rtn = subprocess.check_call(cmake_cmd, cwd=args.build_dir,
shell=(os.name == 'nt'))
make_cmd = ['make']
if args.threads:
make_cmd += ['-j' + str(args.threads)]
rtn = subprocess.check_call(make_cmd, cwd=args.build_dir,
shell=(os.name == 'nt'))
if args.test:
make_cmd += ['test']
elif not args.build_only:
make_cmd += ['install']
rtn = subprocess.check_call(make_cmd, cwd=args.build_dir,
shell=(os.name == 'nt'))
def uninstall(args):
makefile = os.path.join(args.build_dir, 'Makefile')
if not os.path.exists(args.build_dir) or not os.path.exists(makefile):
sys.exist("May not uninstall Cyclus since it has not yet been built.")
rtn = subprocess.check_call(['make', 'uninstall'], cwd=args.build_dir,
shell=(os.name == 'nt'))
def main():
localdir = absexpanduser('~/.local')
description = "An installation helper script. " +\
"For more information, please see fuelcycle.org."
parser = ap.ArgumentParser(description=description)
build_dir = 'where to place the build directory'
parser.add_argument('--build_dir', help=build_dir, default='build')
uninst = 'uninstall'
parser.add_argument('--uninstall', action='store_true', help=uninst, default=False)
clean = 'attempt to remove the build directory before building'
parser.add_argument('--clean-build', action='store_true', help=clean)
threads = "the number of threads to use in the make step"
parser.add_argument('-j', '--threads', type=int, help=threads)
prefix = "the relative path to the installation directory"
parser.add_argument('--prefix', help=prefix, default=localdir)
build_only = 'only build the package, do not install'
parser.add_argument('--build-only', action='store_true', help=build_only)
test = 'run tests after building'
parser.add_argument('--test', action='store_true', help=test)
coin = "the relative path to the Coin-OR libraries directory"
parser.add_argument('--coin_root', help=coin)
cyclus = "the relative path to Cyclus installation directory"
parser.add_argument('--cyclus_root',help=cyclus, default=localdir)
boost = "the relative path to the Boost libraries directory"
parser.add_argument('--boost_root', help=boost)
cmake_prefix_path = "the cmake prefix path for use with FIND_PACKAGE, " + \
"FIND_PATH, FIND_PROGRAM, or FIND_LIBRARY macros"
parser.add_argument('--cmake_prefix_path', help=cmake_prefix_path)
build_type = "the CMAKE_BUILD_TYPE"
parser.add_argument('--build_type', help=build_type)
args = parser.parse_args()
if args.uninstall:
uninstall(args)
else:
install(args)
if __name__ == "__main__":
main()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/generic/methods/test_reorder_levels.py | 2 | 2804 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestReorderLevels:
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_reorder_levels(self, klass):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = df if klass is DataFrame else df["A"]
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
tm.assert_equal(result, expected)
def test_reorder_levels_swaplevel_equivalence(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.reorder_levels(["month", "day", "year"])
expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = ymd["A"].reorder_levels(["month", "day", "year"])
expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match="hierarchical axis"):
ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match="Too many levels"):
ymd.index.reorder_levels([1, 2, 3])
| bsd-3-clause |
fx2003/tensorflow-study | TensorFlow实战/models/syntaxnet/dragnn/tools/evaluator.py | 12 | 6988 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a DRAGNN model on a given set of CoNLL-formatted sentences.
Sample invocation:
bazel run -c opt <...>:dragnn_eval -- \
--master_spec="/path/to/master-spec" \
--checkpoint_file="/path/to/model/name.checkpoint" \
--input_file="/path/to/input/documents/test.connlu"
"""
import os
import re
import time
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.client import timeline
from tensorflow.python.platform import gfile
from dragnn.protos import spec_pb2
from dragnn.python import evaluation
from dragnn.python import graph_builder
from dragnn.python import sentence_io
from dragnn.python import spec_builder
from syntaxnet import sentence_pb2
import dragnn.python.load_dragnn_cc_impl
import syntaxnet.load_parser_ops
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master_spec', '',
'Path to text file containing a DRAGNN master spec to run.')
flags.DEFINE_string('resource_dir', '',
'Optional base directory for resources in the master spec.')
flags.DEFINE_bool('complete_master_spec', False, 'Whether the master_spec '
'needs the lexicon and other resources added to it.')
flags.DEFINE_string('checkpoint_file', '', 'Path to trained model checkpoint.')
flags.DEFINE_string('input_file', '',
'File of CoNLL-formatted sentences to read from.')
flags.DEFINE_string('output_file', '',
'File path to write annotated sentences to.')
flags.DEFINE_integer('max_batch_size', 2048, 'Maximum batch size to support.')
flags.DEFINE_string('inference_beam_size', '', 'Comma separated list of '
'component_name=beam_size pairs.')
flags.DEFINE_string('locally_normalize', '', 'Comma separated list of '
'component names to do local normalization on.')
flags.DEFINE_integer('threads', 10, 'Number of threads used for intra- and '
'inter-op parallelism.')
flags.DEFINE_string('timeline_output_file', '', 'Path to save timeline to. '
'If specified, the final iteration of the evaluation loop '
'will capture and save a TensorFlow timeline.')
flags.DEFINE_string('log_file', '', 'File path to write parser eval results.')
flags.DEFINE_string('language_name', '_', 'Name of language being parsed, '
'for logging.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Parse the flags containint lists, using regular expressions.
# This matches and extracts key=value pairs.
component_beam_sizes = re.findall(r'([^=,]+)=(\d+)',
FLAGS.inference_beam_size)
# This matches strings separated by a comma. Does not return any empty
# strings.
components_to_locally_normalize = re.findall(r'[^,]+',
FLAGS.locally_normalize)
# Reads master spec.
master_spec = spec_pb2.MasterSpec()
with gfile.FastGFile(FLAGS.master_spec) as fin:
text_format.Parse(fin.read(), master_spec)
# Rewrite resource locations.
if FLAGS.resource_dir:
for component in master_spec.component:
for resource in component.resource:
for part in resource.part:
part.file_pattern = os.path.join(FLAGS.resource_dir,
part.file_pattern)
if FLAGS.complete_master_spec:
spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_dir)
# Graph building.
tf.logging.info('Building the graph')
g = tf.Graph()
with g.as_default(), tf.device('/device:CPU:0'):
hyperparam_config = spec_pb2.GridPoint()
hyperparam_config.use_moving_average = True
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
annotator = builder.add_annotation()
builder.add_saver()
tf.logging.info('Reading documents...')
input_corpus = sentence_io.ConllSentenceReader(FLAGS.input_file).corpus()
session_config = tf.ConfigProto(
log_device_placement=False,
intra_op_parallelism_threads=FLAGS.threads,
inter_op_parallelism_threads=FLAGS.threads)
with tf.Session(graph=g, config=session_config) as sess:
tf.logging.info('Initializing variables...')
sess.run(tf.global_variables_initializer())
tf.logging.info('Loading from checkpoint...')
sess.run('save/restore_all', {'save/Const:0': FLAGS.checkpoint_file})
tf.logging.info('Processing sentences...')
processed = []
start_time = time.time()
run_metadata = tf.RunMetadata()
for start in range(0, len(input_corpus), FLAGS.max_batch_size):
end = min(start + FLAGS.max_batch_size, len(input_corpus))
feed_dict = {annotator['input_batch']: input_corpus[start:end]}
for comp, beam_size in component_beam_sizes:
feed_dict['%s/InferenceBeamSize:0' % comp] = beam_size
for comp in components_to_locally_normalize:
feed_dict['%s/LocallyNormalize:0' % comp] = True
if FLAGS.timeline_output_file and end == len(input_corpus):
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict,
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open(FLAGS.timeline_output_file, 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
else:
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict)
processed.extend(serialized_annotations)
tf.logging.info('Processed %d documents in %.2f seconds.',
len(input_corpus), time.time() - start_time)
pos, uas, las = evaluation.calculate_parse_metrics(input_corpus, processed)
if FLAGS.log_file:
with gfile.GFile(FLAGS.log_file, 'w') as f:
f.write('%s\t%f\t%f\t%f\n' % (FLAGS.language_name, pos, uas, las))
if FLAGS.output_file:
with gfile.GFile(FLAGS.output_file, 'w') as f:
for serialized_sentence in processed:
sentence = sentence_pb2.Sentence()
sentence.ParseFromString(serialized_sentence)
f.write(text_format.MessageToString(sentence) + '\n\n')
if __name__ == '__main__':
tf.app.run()
| mit |
Maronato/aosalunos | misago/apps/threadtype/base.py | 3 | 2867 | from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import redirect
from misago.conf import settings
from misago.models import Forum, Thread, Post
from misago.utils.pagination import page_number
class ViewBase(object):
def __new__(cls, request, **kwargs):
obj = super(ViewBase, cls).__new__(cls)
return obj(request, **kwargs)
@property
def search_in(self):
return '%ss' % self.type_prefix
def _type_available(self):
try:
if not self.type_available():
raise Http404()
except AttributeError:
pass
def set_forum_context(self):
pass
def set_thread_context(self):
self.thread = Thread.objects.get(pk=self.kwargs.get('thread'))
self.forum = self.thread.forum
self.proxy = Forum.objects.parents_aware_forum(self.forum)
self.request.acl.forums.allow_forum_view(self.forum)
self.request.acl.threads.allow_thread_view(self.request.user, self.thread)
def set_post_contex(self):
pass
def check_forum_type(self):
type_prefix = self.type_prefix
if type_prefix == 'thread':
type_prefix = 'root'
else:
type_prefix = '%ss' % type_prefix
try:
if self.parents[0].parent_id != Forum.objects.special_pk(type_prefix):
raise Http404()
except (AttributeError, IndexError):
if self.forum.special != type_prefix:
raise Http404()
def _check_permissions(self):
try:
self.check_permissions()
except AttributeError:
pass
def redirect_to_post(self, post, type_prefix=None):
type_prefix = type_prefix or self.type_prefix
queryset = self.request.acl.threads.filter_posts(self.request, self.thread, self.thread.post_set)
page = page_number(queryset.filter(id__lte=post.pk).count(), queryset.count(), settings.posts_per_page)
if page > 1:
return redirect(reverse(type_prefix, kwargs={'thread': self.thread.pk, 'slug': self.thread.slug, 'page': page}) + ('#post-%s' % post.pk))
return redirect(reverse(type_prefix, kwargs={'thread': self.thread.pk, 'slug': self.thread.slug}) + ('#post-%s' % post.pk))
def template_vars(self, context):
return context
def _template_vars(self, context):
context.update({
'type_prefix': self.type_prefix,
'search_in': self.search_in,
})
return self.template_vars(context)
def retreat_redirect(self):
if self.request.POST.get('retreat'):
return redirect(self.request.POST.get('retreat'))
return redirect(reverse(self.type_prefix, kwargs={'thread': self.thread.pk, 'slug': self.thread.slug})) | gpl-2.0 |
af1rst/bite-project | deps/gdata-python-client/src/gdata/tlslite/integration/POP3_TLS.py | 271 | 5466 | """TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| apache-2.0 |
caseyrollins/osf.io | osf/migrations/0067_auto_20171121_1050.py | 22 | 2916 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 16:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0066_merge_20171121_1050'),
]
operations = [
migrations.CreateModel(
name='OneDriveFileNode',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('osf.basefilenode',),
),
migrations.AlterField(
model_name='basefilenode',
name='type',
field=models.CharField(choices=[('osf.trashedfilenode', 'trashed file node'), ('osf.trashedfile', 'trashed file'), ('osf.trashedfolder', 'trashed folder'), ('osf.osfstoragefilenode', 'osf storage file node'), ('osf.osfstoragefile', 'osf storage file'), ('osf.osfstoragefolder', 'osf storage folder'), ('osf.bitbucketfilenode', 'bitbucket file node'), ('osf.bitbucketfolder', 'bitbucket folder'), ('osf.bitbucketfile', 'bitbucket file'), ('osf.boxfilenode', 'box file node'), ('osf.boxfolder', 'box folder'), ('osf.boxfile', 'box file'), ('osf.dataversefilenode', 'dataverse file node'), ('osf.dataversefolder', 'dataverse folder'), ('osf.dataversefile', 'dataverse file'), ('osf.dropboxfilenode', 'dropbox file node'), ('osf.dropboxfolder', 'dropbox folder'), ('osf.dropboxfile', 'dropbox file'), ('osf.figsharefilenode', 'figshare file node'), ('osf.figsharefolder', 'figshare folder'), ('osf.figsharefile', 'figshare file'), ('osf.githubfilenode', 'github file node'), ('osf.githubfolder', 'github folder'), ('osf.githubfile', 'github file'), ('osf.gitlabfilenode', 'git lab file node'), ('osf.gitlabfolder', 'git lab folder'), ('osf.gitlabfile', 'git lab file'), ('osf.googledrivefilenode', 'google drive file node'), ('osf.googledrivefolder', 'google drive folder'), ('osf.googledrivefile', 'google drive file'), ('osf.onedrivefilenode', 'one drive file node'), ('osf.onedrivefolder', 'one drive folder'), ('osf.onedrivefile', 'one drive file'), ('osf.owncloudfilenode', 'owncloud file node'), ('osf.owncloudfolder', 'owncloud folder'), ('osf.owncloudfile', 'owncloud file'), ('osf.s3filenode', 's3 file node'), ('osf.s3folder', 's3 folder'), ('osf.s3file', 's3 file')], db_index=True, max_length=255),
),
migrations.CreateModel(
name='OneDriveFile',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('osf.onedrivefilenode', models.Model),
),
migrations.CreateModel(
name='OneDriveFolder',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('osf.onedrivefilenode', models.Model),
),
]
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/python/tools/strip_unused.py | 180 | 3786 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes unneeded nodes from a GraphDef file.
This script is designed to help streamline models, by taking the input and
output nodes that will be used by an application and figuring out the smallest
set of operations that are required to run for those arguments. The resulting
minimal graph is then saved out.
The advantages of running this script are:
- You may be able to shrink the file size.
- Operations that are unsupported on your platform but still present can be
safely removed.
The resulting graph may not be as flexible as the original though, since any
input nodes that weren't explicitly mentioned may not be accessible any more.
An example of command-line usage is:
bazel build tensorflow/python/tools:strip_unused && \
bazel-bin/tensorflow/python/tools/strip_unused \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/stripped_graph.pb \
--input_node_names=input0
--output_node_names=softmax
You can also look at strip_unused_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.tools import strip_unused_lib
FLAGS = None
def main(unused_args):
strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,
FLAGS.input_binary,
FLAGS.output_graph,
FLAGS.output_binary,
FLAGS.input_node_names,
FLAGS.output_node_names,
FLAGS.placeholder_type_enum)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_graph',
type=str,
default='',
help='TensorFlow \'GraphDef\' file to load.')
parser.add_argument(
'--input_binary',
nargs='?',
const=True,
type='bool',
default=False,
help='Whether the input files are in binary format.')
parser.add_argument(
'--output_graph',
type=str,
default='',
help='Output \'GraphDef\' file name.')
parser.add_argument(
'--output_binary',
nargs='?',
const=True,
type='bool',
default=True,
help='Whether to write a binary format graph.')
parser.add_argument(
'--input_node_names',
type=str,
default='',
help='The name of the input nodes, comma separated.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--placeholder_type_enum',
type=int,
default=dtypes.float32.as_datatype_enum,
help='The AttrValue enum to use for placeholders.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
gokudomatic/cobiv | cobiv/modules/database/sqlitedb/sqlitesetmanager.py | 1 | 7393 | import logging
from functools import partial
from kivy.clock import Clock
from cobiv.modules.core.sets.setmanager import SetManager
class SqliteSetManager(SetManager):
logger = logging.getLogger(__name__)
def ready(self):
super().ready()
self.conn = self.lookup('sqlite_ds', 'Datasource').get_connection()
session = self.get_session()
session.set_action("set-add", self.save)
session.set_action("set-load", self.load)
session.set_action("set-rm", self.remove)
session.set_action("set-mv", self.rename)
session.set_action("set-append", self.add_to_current)
session.set_action("set-substract", self.remove_from_current)
view_context = session.get_context('sql')
view_context.fn = self.replay_query
view_context.args['current_set_query'] = None
view_context.args['current_set_id'] = None
def remove(self, id):
with self.conn:
c = self.conn.execute(
'delete from set_detail where set_head_key = (select id from set_head where name=?)', (id,))
c.execute('delete from set_head where name=?', (id,))
def save(self, id):
with self.conn:
c = self.conn.execute(
'delete from set_detail where set_head_key = (select id from set_head where name=?)', (id,))
c.execute(
'insert into set_head (name,readonly) select ?,? where not exists(select 1 from set_head where name=?)',
(id, '0', id))
row = c.execute('select id from set_head where name=?', (id,)).fetchone()
key = row[0]
c.execute(
'insert into set_detail (set_head_key,position,file_key) select ?,c.position,c.file_key from current_set c',
(key,))
def add_to_set(self, set_head_name, file_id):
with self.conn:
row = self.conn.execute('select id from set_head where name=?', (set_head_name,)).fetchone()
if row is not None:
head_key = row[0]
self.conn.execute(
'insert into set_detail (set_head_key,position,file_key) select ?,max(position)+1 as position,? from set_detail where set_head_key=?',
(head_key, file_id, head_key))
def rename(self, id, new_id):
with self.conn:
self.conn.execute('update set_head set name=? where name=?', (new_id, id))
def add_to_current(self, id):
with self.conn:
self.conn.execute(
'insert into current_set select d.* from set_detail d,set_head h where d.set_head_key=h.id and h.name=? and not exists(select 1 from current_set c1 where c1.file_key=d.file_key)',
(id,)
)
self.reenumerate()
self.get_app().fire_event("on_current_set_change")
def remove_from_current(self, id):
with self.conn:
self.conn.execute(
'delete from current_set where file_key in (select file_key from set_detail d, set_head h where d.set_head_key=h.id and h.name=?)',
(id,))
self.reenumerate()
self.get_app().fire_event("on_current_set_change")
def reenumerate(self):
with self.conn:
self.conn.execute(
'create temporary table renum as select rowid fkey from current_set where position>=0 order by position')
self.conn.execute('create unique index renum_idx on renum(fkey)') # improve performance
self.conn.execute(
'update current_set set position=(select r.rowid-1 from renum r where r.fkey=current_set.rowid) where exists (select * from renum where renum.fkey=current_set.rowid)')
self.conn.execute('drop table renum')
def get_list(self):
with self.conn:
rows = self.conn.execute('select name from set_head where not name="*"').fetchall()
return [r[0] for r in rows]
def regenerate_default(self):
with self.conn:
c = self.conn.cursor()
row = c.execute('select id from set_head where name="*"').fetchone()
if row is not None:
head_key = row[0]
c.execute('delete from set_detail where set_head_key=?', (head_key,))
else:
c.execute('insert into set_head (name, readonly) values ("*","0")')
head_key = c.lastrowid
c.execute("create temporary table map_filekey_pos as select id from file")
c.execute(
"insert into set_detail (set_head_key,position,file_key) select ?,rowid-1,id from map_filekey_pos",
(head_key,))
c.execute("drop table map_filekey_pos")
def query_to_current_set(self, query):
view_context = self.get_session().get_context('sql')
if view_context.args['current_set_query'] is not None or view_context.args['current_set_id'] is not None:
view_context.args['current_id'] = self.get_session().cursor.file_id
self.get_session().push_context('sql')
view_context.args['current_set_query'] = query
view_context.args['current_set_id'] = None
c = self.conn.cursor()
c.execute("create temporary table map_filekey_pos as " + query)
c.execute('drop table if exists current_set')
c.execute(
'create temporary table current_set as select 0 as set_head_key,rowid-1 as position,id as file_key from map_filekey_pos')
c.execute('create index cs_index1 on current_set(file_key)')
c.execute("drop table map_filekey_pos")
self.get_app().fire_event("on_current_set_change")
def load(self, id):
view_context = self.get_session().get_context('sql')
if view_context.args['current_set_query'] is not None or view_context.args['current_set_id'] is not None:
view_context.args['current_id'] = self.get_session().cursor.file_id
self.get_session().push_context('sql')
view_context.args['current_set_query'] = None
view_context.args['current_set_id'] = id
with self.conn:
c = self.conn.execute('drop table if exists current_set')
c.execute(
'create temporary table current_set as select d.* from set_detail d,set_head h where d.set_head_key=h.id and h.name=? order by d.position',
(id,))
c.execute('create index cs_index1 on current_set(file_key)')
self.get_app().fire_event("on_current_set_change")
def replay_query(self, current_set_query, current_set_id, current_id):
if current_set_query is not None:
self.query_to_current_set(query=current_set_query)
elif current_set_id is not None:
self.load(id=current_set_id)
else:
self.logger.error("current_set_query and current_set_id should not be both None!")
# if current_id is not None:
# Clock.schedule_once(lambda dt: self.get_session().cursor.go(idx=current_id, force=True), 0.1)
# self.get_session().cursor.go(current_id, force=True)
def test(self):
with self.conn:
c = self.conn.cursor()
c.execute('drop table if exists current_set')
c.execute("create temporary table current_set as select id from file where rowid between 50 and 149")
| mit |
Code4SA/wazimap-za | wazimap_za/management/commands/dumppsql.py | 1 | 1078 | import subprocess
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
class Command(BaseCommand):
help = "Dumps data tables to CSV. Just a thin wrapper around pg_dump."
def add_arguments(self, parser):
parser.add_argument(
'--table',
action='store',
dest='table',
default=None,
help='Which table to dump.'
)
def handle(self, *args, **options):
table_id = options.get('table')
if not table_id:
raise CommandError("You need to specify a table with --table")
db = settings.DATABASES['default']
args = ["-O", "-c", "--if-exists", "-t", table_id]
args += ["-d", db['NAME']]
args += ["-h", db['HOST']]
args += ["-U", db['USER']]
args += ["-U", db['USER']]
args = ["pg_dump"] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.stdout.write(stdout.decode('utf8'))
| mit |
drpngx/tensorflow | tensorflow/python/data/util/convert.py | 9 | 2910 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers constructing Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def optional_param_to_tensor(argument_name,
argument_value,
argument_default=0,
argument_dtype=dtypes.int64):
if argument_value is not None:
return ops.convert_to_tensor(
argument_value, dtype=argument_dtype, name=argument_name)
else:
return constant_op.constant(
argument_default, dtype=argument_dtype, name=argument_name)
def partial_shape_to_tensor(shape_like):
"""Returns a @{tf.Tensor} that represents the given shape.
Args:
shape_like: A value that can be converted to a @{tf.TensorShape} or a
@{tf.Tensor}.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
`-1` is substituted for any unknown dimensions.
"""
try:
# First attempt to convert the input to a shape, and return the
# "canonical" tensor representation, which uses `-1` in place of
# `None`.
shape_like = tensor_shape.as_shape(shape_like)
return ops.convert_to_tensor(
[dim if dim is not None else -1 for dim in shape_like.as_list()],
dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
raise ValueError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the shape was %s."
% (shape_like, ret.shape))
if ret.dtype != dtypes.int64:
raise TypeError("The given shape %s must be a 1-D tensor of tf.int64 "
"values, but the element type was %s."
% (shape_like, ret.dtype.name))
return ret
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/preprocessing/data.py | 5 | 94481 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[ 0. 0. ]
[ 0.25 0.25]
[ 0.5 0.5 ]
[ 1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[ 1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>>
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[ 0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[ 3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the ``axis`` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the ``transform``
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
| mit |
ktnyt/chainer | examples/chainermn/mnist/train_mnist.py | 3 | 4716 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__(
# the size of the inputs to each layer will be inferred
l1=L.Linear(784, n_units), # n_in -> n_units
l2=L.Linear(n_units, n_units), # n_units -> n_units
l3=L.Linear(n_units, n_out), # n_units -> n_out
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--communicator', type=str,
default='hierarchical', help='Type of communicator')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
if args.communicator == 'naive':
print("Error: 'naive' communicator does not support GPU.\n")
exit(-1)
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
else:
if args.communicator != 'naive':
print('Warning: using naive communicator '
'because only naive supports CPU-only execution')
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
model = L.Classifier(MLP(args.unit, 10))
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Create a multi node evaluator from a standard Chainer evaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| mit |
boyuegame/kbengine | kbe/src/lib/python/Lib/nturl2path.py | 89 | 2396 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib.parse
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.parse.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise OSError(error)
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.parse.unquote(comp)
# Issue #11474 - handing url such as |c/|
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib.parse
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.parse.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise OSError(error)
drive = urllib.parse.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.parse.quote(comp)
return path
| lgpl-3.0 |
joliva/maestro-ng | maestro/plays.py | 1 | 15835 | # Copyright (C) 2013 SignalFuse, Inc.
#
# Docker container orchestration utility.
from __future__ import print_function
import collections
import json
import sys
import time
from . import exceptions
# Some utility functions for output.
def color(cond):
"""Returns 32 (green) or 31 (red) depending on the validity of the given
condition."""
return cond and 32 or 31
def up(cond):
"""Returns 'up' or 'down' depending on the validity of the given
condition."""
return cond and 'up' or 'down'
class BaseOrchestrationPlay:
"""Base class for orchestration plays, holds the ordered list containers to
act on."""
def __init__(self, containers=[]):
self._containers = containers
def run(self):
raise NotImplementedError
class OutputFormatter:
"""Output formatter for nice, progressive terminal output.
Manages the output of a progressively updated terminal line, with "in
progress" labels and a "committed" base label.
"""
def __init__(self, prefix=None):
self._committed = prefix
def commit(self, s=None):
if self._committed and s:
self._committed = '{} {}'.format(self._committed, s)
elif not self._committed and s:
self._committed = s
print('{}\033[K\r'.format(self._committed), end='')
sys.stdout.flush()
def pending(self, s):
if self._committed and s:
print('{} {}\033[K\r'.format(self._committed, s), end='')
elif not self._committed and s:
print('{}\033[K\r'.format(s), end='')
sys.stdout.flush()
def end(self):
print('')
sys.stdout.flush()
class FullStatus(BaseOrchestrationPlay):
"""A Maestro orchestration play that displays the status of the given
services and/or instance containers."""
def __init__(self, containers=[]):
BaseOrchestrationPlay.__init__(self, containers)
def run(self):
print('{:>3s} {:<20s} {:<15s} {:<20s} {:<15s} {:<10s}'.format(
' #', 'INSTANCE', 'SERVICE', 'SHIP', 'CONTAINER', 'STATUS'))
for order, container in enumerate(self._containers, 1):
o = OutputFormatter(
('{:>3d}. \033[;1m{:<20.20s}\033[;0m {:<15.15s} ' +
'{:<20.20s}').format(order,
container.name,
container.service.name,
container.ship.name))
try:
o.pending('checking container...')
status = container.status()
o.commit('\033[{:d};1m{:<15s}\033[;0m'.format(
color(status and status['State']['Running']),
(status and status['State']['Running']
and container.id[:7] or 'down')))
o.pending('checking service...')
ping = status and status['State']['Running'] \
and container.ping(1)
o.commit('\033[{:d};1m{:<4.4s}\033[;0m'.format(color(ping),
up(ping)))
for name, port in container.ports.iteritems():
o.end()
o = OutputFormatter(' >>')
o.pending('{:>9.9s}:{:s}'.format(port['external'][1],
name))
ping = container.ping_port(name)
o.commit('\033[{:d};1m{:>9.9s}\033[;0m:{:s}'.format(
color(ping), port['external'][1], name))
except Exception:
o.commit('\033[31;1m{:<15s} {:<10s}\033[;0m'.format(
'host down', 'down'))
o.end()
class Status(BaseOrchestrationPlay):
"""A less advanced, but faster status display orchestration play that only
looks at the presence and status of the containers. Status information is
bulk-polled from each ship's Docker daemon."""
def __init__(self, containers=[]):
BaseOrchestrationPlay.__init__(self, containers)
def run(self):
status = {}
o = OutputFormatter()
for ship in set([container.ship for container in self._containers]):
o.pending('Gathering container information from {} ({})...'.format(
ship.name, ship.ip))
try:
status.update(dict((c['Names'][0][1:], c)
for c in ship.backend.containers()))
except:
pass
o.commit('{:>3s} {:<20s} {:<15s} {:<20s} {:<15s}'.format(
' #', 'INSTANCE', 'SERVICE', 'SHIP', 'CONTAINER'))
o.end()
for order, container in enumerate(self._containers, 1):
o = OutputFormatter(
('{:>3d}. \033[;1m{:<20.20s}\033[;0m {:<15.15s} ' +
'{:<20.20s}').format(order,
container.name,
container.service.name,
container.ship.name))
s = status.get(container.name)
if s and s['Status'].startswith('Up'):
cid = s.get('ID', s.get('Id', None))
o.commit('\033[32;1m{}\033[;0m'.format(cid[:7]))
else:
o.commit('\033[31;1mdown\033[;0m')
o.end()
class Start(BaseOrchestrationPlay):
"""A Maestro orchestration play that will execute the start sequence of the
requested services, starting each container for each instance of the
services, in the given start order, waiting for each container's
application to become available before moving to the next one."""
def __init__(self, containers=[], registries={}, refresh_images=False):
BaseOrchestrationPlay.__init__(self, containers)
self._registries = registries
self._refresh_images = refresh_images
def run(self):
print('{:>3s} {:<20s} {:<15s} {:<20s} {:<15s} {:<10s}'.format(
' #', 'INSTANCE', 'SERVICE', 'SHIP', 'CONTAINER', 'STATUS'))
for order, container in enumerate(self._containers, 1):
o = OutputFormatter(
('{:>3d}. \033[;1m{:<20.20s}\033[;0m {:<15.15s} ' +
'{:<20.20s}').format(order,
container.name,
container.service.name,
container.ship.name))
error = None
try:
# TODO: None is used to indicate that no action was performed
# because the container and its application were already
# running. This makes the following code not very nice and this
# could be improved.
result = self._start_container(o, container)
o.commit('\033[{:d};1m{:<10s}\033[;0m'.format(
color(result is not False),
result is None and 'up' or
(result and 'started' or
'service did not start!')))
if result is False:
error = [
('Halting start sequence because {} failed to start!'
.format(container)),
container.ship.backend.logs(container.id)]
raise exceptions.OrchestrationException('\n'.join(error))
except Exception:
o.commit('\033[31;1mfailed to start container!\033[;0m')
raise
finally:
o.end()
def _update_pull_progress(self, progress, last):
"""Update an image pull progress map with latest download progress
information for one of the image layers, and return the average of the
download progress of all layers as an indication of the overall
progress of the pull."""
try:
last = json.loads(last)
progress[last['id']] = last['status'] == 'Download complete' \
and 100 \
or (100.0 * last['progressDetail']['current'] /
last['progressDetail']['total'])
except:
pass
return reduce(lambda x, y: x+y, progress.values()) / len(progress) \
if progress else 0
def _wait_for_status(self, container, cond, retries=10):
while retries >= 0:
status = container.status(refresh=True)
if cond(status):
return True
time.sleep(0.5)
retries -= 1
return False
def _login_to_registry(self, o, container):
"""Extract the registry name from the image needed for the container,
and if authentication data is provided for that registry, login to it
so a subsequent pull operation can be performed."""
image = container.service.get_image_details()
if image['repository'].find('/') <= 0:
return
registry, repo_name = image['repository'].split('/', 1)
if registry not in self._registries:
return
o.pending('logging in to {}...'.format(registry))
try:
container.ship.backend.login(**self._registries[registry])
except Exception as e:
raise exceptions.OrchestrationException(
'Login to {} failed: {}'.format(registry, e))
def _start_container(self, o, container):
"""Start the given container.
If the container and its application are already running, no action is
performed and the function returns None to indicate that. Otherwise, a
new container must be created and started. To achieve this, any
existing container of the same name is first removed. Then, if
necessary or if requested, the container image is pulled from its
registry. Finally, the container is created and started, configured as
necessary. We then wait for the application to start and return True or
False depending on whether the start was successful."""
o.pending('checking service...')
status = container.status(refresh=True)
# if container.ping(retries=2):
# # If the container pinged, but has no status, it's likely that
# # something else that is not our container is listening on that
# # part. This could indicate that the environment description
# # doesn't match what is running, or supposed to be running, on the
# # target host.
# if not status:
# raise exceptions.OrchestrationException(
# 'Invalid state, other service found running in place of ' +
# 'container {}.!'.format(container))
#
# o.commit('\033[34;0m{:<15s}\033[;0m'.format(container.id[:7]))
# # We use None as a special marker showing the container and the
# # application were already running.
# return None
# Otherwise we need to start it.
if container.id:
o.pending('removing old container {}...'.format(container.id[:7]))
container.ship.backend.remove_container(container.id)
# Check if the image is available, or if we need to pull it down.
image = container.service.get_image_details()
if self._refresh_images or \
not filter(lambda i: container.service.image in i['RepoTags'],
container.ship.backend.images(image['repository'])):
# First, attempt to login if we can/need to.
self._login_to_registry(o, container)
o.pending('pulling image {}...'.format(container.service.image))
progress = {}
for dlstatus in container.ship.backend.pull(stream=True, **image):
o.pending('... {:.1f}%'.format(
self._update_pull_progress(progress, dlstatus)))
# Create and start the container.
o.pending('creating container from {}...'.format(
container.service.image))
ports = container.ports \
and map(lambda p: tuple(p['exposed'].split('/')),
container.ports.itervalues()) \
or None
container.ship.backend.create_container(
image=container.service.image,
hostname=container.name,
name=container.name,
environment=container.env,
volumes=container.volumes.values(),
ports=ports,
detach=True)
o.pending('waiting for container creation...')
if not self._wait_for_status(container, lambda x: x):
raise exceptions.OrchestrationException(
'Container status could not be obtained after creation!')
o.commit('\033[32;1m{:<15s}\033[;0m'.format(container.id[:7]))
o.pending('starting container {}...'.format(container.id[:7]))
ports = collections.defaultdict(list) if container.ports else None
if ports is not None:
for port in container.ports.values():
ports[port['exposed']].append(
(port['external'][0], port['external'][1].split('/')[0]))
container.ship.backend.start(container.id,
binds=container.volumes,
port_bindings=ports,
privileged=container.privileged)
# Waiting one second and checking container state again to make sure
# initialization didn't fail.
o.pending('waiting for container initialization...')
if not self._wait_for_status(container,
lambda x: x and x['State']['Running']):
raise exceptions.OrchestrationException(
'Container status could not be obtained after start!')
# Wait up for the container's application to come online.
o.pending('waiting for service...')
return container.ping(retries=60)
class Stop(BaseOrchestrationPlay):
"""A Maestro orchestration play that will stop and remove the containers of
the requested services. The list of containers should be provided reversed
so that dependent services are stopped first."""
def __init__(self, containers=[]):
BaseOrchestrationPlay.__init__(self, containers)
def run(self):
print('{:>3s} {:<20s} {:<15s} {:<20s} {:<15s} {:<10s}'.format(
' #', 'INSTANCE', 'SERVICE', 'SHIP', 'CONTAINER', 'STATUS'))
for order, container in enumerate(self._containers):
o = OutputFormatter(
('{:>3d}. \033[;1m{:<20.20s}\033[;0m {:<15.15s} ' +
'{:<20.20s}').format(len(self._containers) - order,
container.name,
container.service.name,
container.ship.name))
o.pending('checking container...')
try:
status = container.status(refresh=True)
if not status or not status['State']['Running']:
o.commit('{:<15s} {:<10s}'.format('n/a', 'already down'))
o.end()
continue
except:
o.commit('\033[31;1m{:<15s} {:<10s}\033[;0m'.format(
'host down', 'down'))
o.end()
continue
o.commit('{:<15s}'.format(container.id[:7]))
try:
o.pending('stopping service...')
container.ship.backend.stop(container.id)
o.commit('\033[32;1mstopped\033[;0m')
except:
o.commit('\033[31;1mfail!\033[;0m')
o.end()
| lgpl-3.0 |
ostree/thefuck | tests/functional/test_tcsh.py | 16 | 1775 | import pytest
from tests.functional.plots import with_confirmation, without_confirmation, \
refuse_with_confirmation, select_command_with_arrows
containers = (('thefuck/ubuntu-python3-tcsh',
u'''FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -yy python3 python3-pip python3-dev git
RUN pip3 install -U setuptools
RUN ln -s /usr/bin/pip3 /usr/bin/pip
RUN apt-get install -yy tcsh''',
u'tcsh'),
('thefuck/ubuntu-python2-tcsh',
u'''FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -yy python python-pip python-dev git
RUN pip2 install -U pip setuptools
RUN apt-get install -yy tcsh''',
u'tcsh'))
@pytest.fixture(params=containers)
def proc(request, spawnu, run_without_docker):
proc = spawnu(*request.param)
if not run_without_docker:
proc.sendline(u'pip install /src')
proc.sendline(u'tcsh')
proc.sendline(u'eval `thefuck --alias`')
return proc
@pytest.mark.functional
@pytest.mark.once_without_docker
def test_with_confirmation(proc, TIMEOUT):
with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
@pytest.mark.once_without_docker
def test_select_command_with_arrows(proc, TIMEOUT):
select_command_with_arrows(proc, TIMEOUT)
@pytest.mark.functional
@pytest.mark.once_without_docker
def test_refuse_with_confirmation(proc, TIMEOUT):
refuse_with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
@pytest.mark.once_without_docker
def test_without_confirmation(proc, TIMEOUT):
without_confirmation(proc, TIMEOUT)
# TODO: ensure that history changes.
| mit |
jrkarki/scai | src/scai/testscai.py | 1 | 1169 | """
Created on 22/02/2017
@author: jrkarki
"""
class Reanimator:
"""This class represents lifeless bodies that can be reanimated"""
def __init__(self, corpse, alive=False):
"""
:param corpse: The name of the corpse
:type corpse: str
:param alive: Is the corpse alive? Defaults to False.
:type alive: bool
"""
self.corps = corpse
self.alive = alive
def reanimate(self):
"""Reanimates a corpse, or raises a ValueError if it's already alive"""
if self.alive:
raise ValueError("Can't reanimate something that's alive")
self.alive = True
def __str__(self):
if self.alive:
return "{}... it's alive!!!".format(self.corps)
return "The lifeless body of {}".format(self.corps)
def __repr__(self):
return str(self)
def build_bodies(list_of_str):
"""Makes a corpse object for each name in the list
:param list_of_str: list of strings of names for corpses
:type list_of_str: iter of str
:return: A list of corpses
:rtype: list of Reanimator
"""
return [Reanimator(s) for s in list_of_str] | apache-2.0 |
Abhinav117/pymtl | pisa/pisa_inst_and_test.py | 4 | 5495 | #=========================================================================
# pisa_and_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 0x0f0f0f0f
mfc0 r2, mngr2proc < 0x00ff00ff
nop
nop
nop
nop
nop
nop
nop
nop
and r3, r1, r2
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0x000f000f
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rr_dest_byp_test( 5, "and", 0x00000f0f, 0x000000ff, 0x0000000f ),
gen_rr_dest_byp_test( 4, "and", 0x0000f0f0, 0x00000ff0, 0x000000f0 ),
gen_rr_dest_byp_test( 3, "and", 0x000f0f00, 0x0000ff00, 0x00000f00 ),
gen_rr_dest_byp_test( 2, "and", 0x00f0f000, 0x000ff000, 0x0000f000 ),
gen_rr_dest_byp_test( 1, "and", 0x0f0f0000, 0x00ff0000, 0x000f0000 ),
gen_rr_dest_byp_test( 0, "and", 0xf0f00000, 0x0ff00000, 0x00f00000 ),
]
#-------------------------------------------------------------------------
# gen_src0_byp_test
#-------------------------------------------------------------------------
def gen_src0_byp_test():
return [
gen_rr_src0_byp_test( 5, "and", 0x0f00000f, 0xff000000, 0x0f000000 ),
gen_rr_src0_byp_test( 4, "and", 0xf00000f0, 0xf000000f, 0xf0000000 ),
gen_rr_src0_byp_test( 3, "and", 0x00000f0f, 0x000000ff, 0x0000000f ),
gen_rr_src0_byp_test( 2, "and", 0x0000f0f0, 0x00000ff0, 0x000000f0 ),
gen_rr_src0_byp_test( 1, "and", 0x000f0f00, 0x0000ff00, 0x00000f00 ),
gen_rr_src0_byp_test( 0, "and", 0x00f0f000, 0x000ff000, 0x0000f000 ),
]
#-------------------------------------------------------------------------
# gen_src1_byp_test
#-------------------------------------------------------------------------
def gen_src1_byp_test():
return [
gen_rr_src1_byp_test( 5, "and", 0x0f0f0000, 0x00ff0000, 0x000f0000 ),
gen_rr_src1_byp_test( 4, "and", 0xf0f00000, 0x0ff00000, 0x00f00000 ),
gen_rr_src1_byp_test( 3, "and", 0x0f00000f, 0xff000000, 0x0f000000 ),
gen_rr_src1_byp_test( 2, "and", 0xf00000f0, 0xf000000f, 0xf0000000 ),
gen_rr_src1_byp_test( 1, "and", 0x00000f0f, 0x000000ff, 0x0000000f ),
gen_rr_src1_byp_test( 0, "and", 0x0000f0f0, 0x00000ff0, 0x000000f0 ),
]
#-------------------------------------------------------------------------
# gen_srcs_byp_test
#-------------------------------------------------------------------------
def gen_srcs_byp_test():
return [
gen_rr_srcs_byp_test( 5, "and", 0x000f0f00, 0x0000ff00, 0x00000f00 ),
gen_rr_srcs_byp_test( 4, "and", 0x00f0f000, 0x000ff000, 0x0000f000 ),
gen_rr_srcs_byp_test( 3, "and", 0x0f0f0000, 0x00ff0000, 0x000f0000 ),
gen_rr_srcs_byp_test( 2, "and", 0xf0f00000, 0x0ff00000, 0x00f00000 ),
gen_rr_srcs_byp_test( 1, "and", 0x0f00000f, 0xff000000, 0x0f000000 ),
gen_rr_srcs_byp_test( 0, "and", 0xf00000f0, 0xf000000f, 0xf0000000 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rr_src0_eq_dest_test( "and", 0x00000f0f, 0x000000ff, 0x0000000f ),
gen_rr_src1_eq_dest_test( "and", 0x0000f0f0, 0x00000ff0, 0x000000f0 ),
gen_rr_src0_eq_src1_test( "and", 0x000f0f00, 0x000f0f00 ),
gen_rr_srcs_eq_dest_test( "and", 0x000f0f00, 0x000f0f00 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rr_value_test( "and", 0xff00ff00, 0x0f0f0f0f, 0x0f000f00 ),
gen_rr_value_test( "and", 0x0ff00ff0, 0xf0f0f0f0, 0x00f000f0 ),
gen_rr_value_test( "and", 0x00ff00ff, 0x0f0f0f0f, 0x000f000f ),
gen_rr_value_test( "and", 0xf00ff00f, 0xf0f0f0f0, 0xf000f000 ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src0 = Bits( 32, random.randint(0,0xffffffff) )
src1 = Bits( 32, random.randint(0,0xffffffff) )
dest = src0 & src1
asm_code.append( gen_rr_value_test( "and", src0.uint(), src1.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src0_byp_test ),
asm_test( gen_src1_byp_test ),
asm_test( gen_srcs_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/contrib/distributions/python/ops/vector_student_t.py | 22 | 12708 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vector Student's t distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
# TODO(jvdillon): Add unittests for this once we know where will put this code
# and how it will generally be used. In the interim this code is tested via the
# _VectorStudentT tests.
def _infer_shapes(scale_oppd, shift):
"""Helper which returns batch_shape, event_shape from `Affine` properties.
The `Affine` `Bijector` (roughly) computes `Y = scale @ X.T + shift`. This
function infers the `batch_shape` and `event_shape` from the `scale` and
`shift` terms.
Args:
scale_oppd: Instance of OperatorPDBase subclass representing the `Affine`
`Bijector` scale matrix.
shift: `Tensor` representing the `shift` vector.
Returns:
batch_shape: 1D, integer `Tensor` representing the shape of batch
dimensions.
event_shape: 1D, integer `Tensor` representing the shape of event
dimensions.
Raises:
ValueError: if we are not able to infer batch/event shapes from the args.
"""
# Collect known static shape.
def _has_static_ndims(x):
return x is not None and x.get_shape().ndims is not None
if _has_static_ndims(scale_oppd) and _has_static_ndims(shift):
batch_shape = scale_oppd.get_batch_shape().merge_with(
shift.get_shape()[:-1])
event_shape = scale_oppd.get_shape()[-1:].merge_with(
shift.get_shape()[-1:])
elif _has_static_ndims(scale_oppd):
batch_shape = scale_oppd.get_batch_shape()
event_shape = scale_oppd.get_shape()[-1:]
elif _has_static_ndims(shift):
batch_shape = shift.get_shape()[:-1]
event_shape = shift.get_shape()[-1:]
else:
batch_shape = tensor_shape.TensorShape(None)
event_shape = tensor_shape.TensorShape(None)
# Convert TensorShape to Tensors and see if we're done.
if batch_shape.is_fully_defined():
batch_shape = constant_op.constant(batch_shape.as_list(),
dtype=dtypes.int32)
else:
batch_shape = None
if event_shape.is_fully_defined():
event_shape = constant_op.constant(event_shape.as_list(),
dtype=dtypes.int32)
else:
event_shape = None
if batch_shape is not None and event_shape is not None:
return batch_shape, event_shape
# Collect known dynamic shape.
if scale_oppd is not None:
shape = scale_oppd.shape()
elif shift is not None:
shape = array_ops.shape(shift)
else:
raise ValueError("unable to infer batch_shape, event_shape")
# Fill in what we don't know.
if batch_shape is None:
batch_shape = array_ops.identity(shape[:-1], name="batch_shape")
if event_shape is None:
event_shape = array_ops.identity(shape[-1:], name="event_shape")
return batch_shape, event_shape
class _VectorStudentT(transformed_distribution.TransformedDistribution):
"""A vector version of Student's t-distribution on `R^k`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k
```
where:
* `loc = mu`; a vector in `R^k`,
* `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,
* `Z` denotes the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
ds = tf.contrib.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = ds.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = ds.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name):
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
# pseudocode the basic procedure is:
# if df.batch_shape is scalar:
# if affine.batch_shape is not scalar:
# # broadcast distribution.sample so
# # it has affine.batch_shape.
# self.batch_shape = affine.batch_shape
# else:
# if affine.batch_shape is scalar:
# # let affine broadcasting do its thing.
# self.batch_shape = df.batch_shape
# All of the above magic is actually handled by TransformedDistribution.
# Here we really only need to collect the affine.batch_shape and decide
# what we're going to pass in to TransformedDistribution's
# (override) batch_shape arg.
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=scale_identity_multiplier,
scale_diag=scale_diag,
scale_tril=scale_tril,
scale_perturb_factor=scale_perturb_factor,
scale_perturb_diag=scale_perturb_diag,
validate_args=validate_args)
distribution = student_t.StudentT(
df=df,
loc=array_ops.zeros([], dtype=affine.dtype),
scale=array_ops.ones([], dtype=affine.dtype))
batch_shape, override_event_shape = _infer_shapes(
affine.scale, affine.shift)
override_batch_shape = distribution_util.pick_vector(
distribution.is_scalar_batch(),
batch_shape,
constant_op.constant([], dtype=dtypes.int32))
super(_VectorStudentT, self).__init__(
distribution=distribution,
bijector=affine,
batch_shape=override_batch_shape,
event_shape=override_event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self.distribution.df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self.bijector.shift
@property
def scale(self):
"""Dense (batch) covariance matrix, if available."""
return self.bijector.scale
| apache-2.0 |
retomerz/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/tests/urls.py | 78 | 1913 | from django.conf.urls.defaults import *
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, Template
from django.template.response import TemplateResponse
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show')
return HttpResponseRedirect(show_url)
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
return HttpResponseRedirect(show_url)
def show(request):
t = Template(TEMPLATE)
return HttpResponse(t.render(RequestContext(request)))
def show_template_response(request):
return TemplateResponse(request, Template(TEMPLATE))
urlpatterns = patterns('',
('^add/(debug|info|success|warning|error)/$', add),
('^show/$', show),
('^template_response/add/(debug|info|success|warning|error)/$', add_template_response),
('^template_response/show/$', show_template_response),
)
| apache-2.0 |
ltilve/chromium | third_party/protobuf/python/google/protobuf/internal/reflection_cpp_generated_test.py | 215 | 4054 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which tests the generated C++ implementation."""
__author__ = 'jasonh@google.com (Jason Hsueh)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
import unittest
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_dynamic_pb2
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal.reflection_test import *
class ReflectionCppTest(unittest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
def testExtensionOfGeneratedTypeInDynamicFile(self):
"""Tests that a file built dynamically can extend a generated C++ type.
The C++ implementation uses a DescriptorPool that has the generated
DescriptorPool as an underlay. Typically, a type can only find
extensions in its own pool. With the python C-extension, the generated C++
extendee may be available, but not the extension. This tests that the
C-extension implements the correct special handling to make such extensions
available.
"""
pb1 = more_extensions_pb2.ExtendedMessage()
# Test that basic accessors work.
self.assertFalse(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertFalse(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
pb1.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension] = 17
pb1.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a = 24
self.assertTrue(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertTrue(
pb1.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
# Now serialize the data and parse to a new message.
pb2 = more_extensions_pb2.ExtendedMessage()
pb2.MergeFromString(pb1.SerializeToString())
self.assertTrue(
pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_int32_extension))
self.assertTrue(
pb2.HasExtension(more_extensions_dynamic_pb2.dynamic_message_extension))
self.assertEqual(
17, pb2.Extensions[more_extensions_dynamic_pb2.dynamic_int32_extension])
self.assertEqual(
24,
pb2.Extensions[more_extensions_dynamic_pb2.dynamic_message_extension].a)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dturner-tw/pants | src/python/pants/backend/jvm/targets/managed_jar_dependencies.py | 1 | 1689 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.base.payload import Payload
from pants.base.payload_field import JarsField
from pants.build_graph.target import Target
class ManagedJarDependencies(Target):
"""A set of pinned external artifact versions to apply transitively."""
def __init__(self, payload=None, artifacts=None, **kwargs):
"""
:param artifacts: List of `jar <#jar>`_\s or specs to jar_library targets with pinned versions.
Versions are pinned per (org, name, classifier, ext) artifact coordinate (excludes, etc are
ignored for the purposes of pinning).
"""
jar_objects, self._library_specs = self._split_jars_and_specs(artifacts)
payload = payload or Payload()
payload.add_fields({
'artifacts': JarsField(jar_objects),
})
super(ManagedJarDependencies, self).__init__(payload=payload, **kwargs)
@property
def traversable_specs(self):
return iter(self.library_specs)
@property
def library_specs(self):
"""Lists of specs to resolve to jar_libraries containing more jars."""
return self._library_specs
def _split_jars_and_specs(self, jars):
library_specs = []
jar_objects = []
for item in jars:
if isinstance(item, JarDependency):
jar_objects.append(item)
else:
library_specs.append(item)
return jar_objects, library_specs
| apache-2.0 |
hieukypc/ERP | openerp/addons/stock_calendar/resource.py | 29 | 6503 | import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_columns = {
'group_id': fields.many2one('procurement.group', string="Procurement Group"),
}
class resource_calendar(osv.osv):
_inherit = "resource.calendar"
#Could remove this as it does not help a lot
def _calculate_next_day(self, cr, uid, ids, fields, names, context=None):
res = {}
for calend in self.browse(cr, uid, ids, context=context):
# date1 = self.get_next_day(cr, uid, calend.id, datetime.utcnow() + relativedelta(days = 1))
_format = '%Y-%m-%d %H:%M:%S'
sched_date = self.schedule_days_get_date(
cr, uid, calend.id, 1, day_date=datetime.datetime.utcnow(), compute_leaves=True)
res[calend.id] = sched_date and sched_date.strftime(_format) or False
return res
_columns = {
'next_day': fields.function(_calculate_next_day, string='Next day it should trigger', type='datetime'),
}
# Keep as it takes into account times
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to, leave.group_id.id))
return leaves
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_remove_leaves(self, cr, uid, interval, leave_intervals, context=None):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
- take into account the procurement group when needed
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
#leave_intervals = self.interval_clean(leave_intervals) NOT NECESSARY TO CLEAN HERE AS IT WOULD REMOVE GROUP INFO
current_interval = list(interval)
for leave in leave_intervals:
if len(leave) > 2:
current_group = False
att_obj = self.pool.get("resource.calendar.attendance")
if leave[2]:
if len(current_interval) > 2:
current_group = current_interval[2] and att_obj.browse(cr, uid, current_interval[2], context=context).group_id.id or False
if leave[2] != current_group:
continue
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
if len(interval) > 2:
intervals.append((current_interval[0], current_interval[1], interval[2]))
else:
intervals.append((current_interval[0], current_interval[1],))
return intervals
class resource_calendar_attendance(osv.osv):
_inherit = "resource.calendar.attendance"
_columns = {
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
}
| gpl-3.0 |
XiaosongWei/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/cros_platform_backend_unittest.py | 25 | 1199 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.platform import cros_platform_backend
class CrosPlatformBackendTest(unittest.TestCase):
initial_cstate = {
'cpu0': 'POLL\nC1\nC2\nC3\n0\n138356189\n102416540\n'
'17158209182\n0\n1\n500\n1000\n1403211341',
'cpu1': 'POLL\nC1\nC2\nC3\n0\n107318149\n81786238\n'
'17348563431\n0\n1\n500\n1000\n1403211341'
}
expected_cstate = {
'cpu0': {
'C0': 1403193942018089,
'C1': 138356189,
'C2': 102416540,
'C3': 17158209182
},
'cpu1': {
'C0': 1403193803332182,
'C1': 107318149,
'C2': 81786238,
'C3': 17348563431
}
}
def testCrosParseCpuStates(self):
# Use mock start and end times to allow for the test to calculate C0.
results = cros_platform_backend.CrosPlatformBackend.ParseCStateSample(
self.initial_cstate)
for cpu in results:
for state in results[cpu]:
self.assertAlmostEqual(results[cpu][state],
self.expected_cstate[cpu][state])
| bsd-3-clause |
mxOBS/deb-pkg_trusty_chromium-browser | mojo/public/python/mojo/bindings/reflection.py | 9 | 16958 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The metaclasses used by the mojo python bindings."""
import itertools
import logging
import sys
# pylint: disable=F0401
import mojo.bindings.messaging as messaging
import mojo.bindings.promise as promise
import mojo.bindings.serialization as serialization
class MojoEnumType(type):
"""Meta class for enumerations.
Usage:
class MyEnum(object):
__metaclass__ = MojoEnumType
VALUES = [
('A', 0),
'B',
('C', 5),
]
This will define a enum with 3 values, 'A' = 0, 'B' = 1 and 'C' = 5.
"""
def __new__(mcs, name, bases, dictionary):
dictionary['__slots__'] = ()
dictionary['__new__'] = None
for value in dictionary.pop('VALUES', []):
if not isinstance(value, tuple):
raise ValueError('incorrect value: %r' % value)
key, enum_value = value
if isinstance(key, str) and isinstance(enum_value, int):
dictionary[key] = enum_value
else:
raise ValueError('incorrect value: %r' % value)
return type.__new__(mcs, name, bases, dictionary)
def __setattr__(cls, key, value):
raise AttributeError('can\'t set attribute')
def __delattr__(cls, key):
raise AttributeError('can\'t delete attribute')
class MojoStructType(type):
"""Meta class for structs.
Usage:
class MyStruct(object):
__metaclass__ = MojoStructType
DESCRIPTOR = {
'constants': {
'C1': 1,
'C2': 2,
},
'enums': {
'ENUM1': [
('V1', 1),
'V2',
],
'ENUM2': [
('V1', 1),
'V2',
],
},
'fields': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
],
}
This will define an struct, with:
- 2 constants 'C1' and 'C2';
- 2 enums 'ENUM1' and 'ENUM2', each of those having 2 values, 'V1' and
'V2';
- 1 int32 field named 'x'.
"""
def __new__(mcs, name, bases, dictionary):
dictionary['__slots__'] = ('_fields')
descriptor = dictionary.pop('DESCRIPTOR', {})
# Add constants
dictionary.update(descriptor.get('constants', {}))
# Add enums
enums = descriptor.get('enums', {})
for key in enums:
dictionary[key] = MojoEnumType(key, (object,), { 'VALUES': enums[key] })
# Add fields
groups = descriptor.get('fields', [])
fields = list(
itertools.chain.from_iterable([group.descriptors for group in groups]))
fields.sort(key=lambda f: f.index)
for field in fields:
dictionary[field.name] = _BuildProperty(field)
# Add init
dictionary['__init__'] = _StructInit(fields)
# Add serialization method
serialization_object = serialization.Serialization(groups)
def Serialize(self, handle_offset=0):
return serialization_object.Serialize(self, handle_offset)
dictionary['Serialize'] = Serialize
# pylint: disable=W0212
def AsDict(self):
return self._fields
dictionary['AsDict'] = AsDict
def Deserialize(cls, context):
result = cls.__new__(cls)
fields = {}
serialization_object.Deserialize(fields, context)
result._fields = fields
return result
dictionary['Deserialize'] = classmethod(Deserialize)
dictionary['__eq__'] = _StructEq(fields)
dictionary['__ne__'] = _StructNe
return type.__new__(mcs, name, bases, dictionary)
# Prevent adding new attributes, or mutating constants.
def __setattr__(cls, key, value):
raise AttributeError('can\'t set attribute')
# Prevent deleting constants.
def __delattr__(cls, key):
raise AttributeError('can\'t delete attribute')
class MojoInterfaceType(type):
"""Meta class for interfaces.
Usage:
class MyInterface(object):
__metaclass__ = MojoInterfaceType
DESCRIPTOR = {
'client': MyInterfaceClient,
'methods': [
{
'name': 'FireAndForget',
'ordinal': 0,
'parameters': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
]
},
{
'name': 'Ping',
'ordinal': 1,
'parameters': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
],
'responses': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
],
},
],
}
"""
def __new__(mcs, name, bases, dictionary):
# If one of the base class is already an interface type, do not edit the
# class.
for base in bases:
if isinstance(base, mcs):
return type.__new__(mcs, name, bases, dictionary)
descriptor = dictionary.pop('DESCRIPTOR', {})
methods = [_MethodDescriptor(x) for x in descriptor.get('methods', [])]
for method in methods:
dictionary[method.name] = _NotImplemented
client_class_getter = descriptor.get('client', None)
interface_manager = InterfaceManager(name, methods, client_class_getter)
dictionary.update({
'client': None,
'manager': None,
'_interface_manager': interface_manager,
})
interface_class = type.__new__(mcs, name, bases, dictionary)
interface_manager.interface_class = interface_class
return interface_class
@property
def manager(cls):
return cls._interface_manager
# Prevent adding new attributes, or mutating constants.
def __setattr__(cls, key, value):
raise AttributeError('can\'t set attribute')
# Prevent deleting constants.
def __delattr__(cls, key):
raise AttributeError('can\'t delete attribute')
class InterfaceProxy(object):
"""
A proxy allows to access a remote interface through a message pipe.
"""
pass
class InterfaceRequest(object):
"""
An interface request allows to send a request for an interface to a remote
object and start using it immediately.
"""
def __init__(self, handle):
self._handle = handle
def IsPending(self):
return self._handle.IsValid()
def PassMessagePipe(self):
result = self._handle
self._handle = None
return result
class InterfaceManager(object):
"""
Manager for an interface class. The manager contains the operation that allows
to bind an implementation to a pipe, or to generate a proxy for an interface
over a pipe.
"""
def __init__(self, name, methods, client_class_getter):
self.name = name
self.methods = methods
self.interface_class = None
self._client_class_getter = client_class_getter
self._client_manager = None
self._client_manager_computed = False
self._proxy_class = None
self._stub_class = None
@property
def client_manager(self):
if not self._client_manager_computed:
self._client_manager_computed = True
if self._client_class_getter:
self._client_manager = self._client_class_getter().manager
return self._client_manager
def Proxy(self, handle):
router = messaging.Router(handle)
error_handler = _ProxyErrorHandler()
router.SetErrorHandler(error_handler)
router.Start()
return self._InternalProxy(router, error_handler)
# pylint: disable=W0212
def Bind(self, impl, handle):
router = messaging.Router(handle)
router.SetIncomingMessageReceiver(self._Stub(impl))
error_handler = _ProxyErrorHandler()
router.SetErrorHandler(error_handler)
# Retain the router, until an error happen.
retainer = _Retainer(router)
def Cleanup(_):
retainer.release()
error_handler.AddCallback(Cleanup)
if self.client_manager:
impl.client = self.client_manager._InternalProxy(router, error_handler)
# Give an instance manager to the implementation to allow it to close
# the connection.
impl.manager = InstanceManager(router)
router.Start()
def _InternalProxy(self, router, error_handler):
if not self._proxy_class:
dictionary = {
'__module__': __name__,
'__init__': _ProxyInit,
}
if self.client_manager:
dictionary['client'] = property(_ProxyGetClient, _ProxySetClient)
dictionary['manager'] = None
dictionary['_client_manager'] = self.client_manager
for method in self.methods:
dictionary[method.name] = _ProxyMethodCall(method)
self._proxy_class = type('%sProxy' % self.name,
(self.interface_class, InterfaceProxy),
dictionary)
proxy = self._proxy_class(router, error_handler)
# Give an instance manager to the proxy to allow to close the connection.
proxy.manager = InstanceManager(router)
return proxy
def _Stub(self, impl):
if not self._stub_class:
accept_method = _StubAccept(self.methods)
dictionary = {
'__module__': __name__,
'__init__': _StubInit,
'Accept': accept_method,
'AcceptWithResponder': accept_method,
}
self._stub_class = type('%sStub' % self.name,
(messaging.MessageReceiverWithResponder,),
dictionary)
return self._stub_class(impl)
class InstanceManager(object):
"""
Manager for the implementation of an interface or a proxy. The manager allows
to control the connection over the pipe.
"""
def __init__(self, router):
self.router = router
def Close(self):
self.router.Close()
def PassMessagePipe(self):
return self.router.PassMessagePipe()
class _MethodDescriptor(object):
def __init__(self, descriptor):
self.name = descriptor['name']
self.ordinal = descriptor['ordinal']
self.parameters_struct = _ConstructParameterStruct(
descriptor['parameters'], self.name, "Parameters")
self.response_struct = _ConstructParameterStruct(
descriptor.get('responses'), self.name, "Responses")
def _ConstructParameterStruct(descriptor, name, suffix):
if descriptor is None:
return None
parameter_dictionary = {
'__metaclass__': MojoStructType,
'__module__': __name__,
'DESCRIPTOR': descriptor,
}
return MojoStructType(
'%s%s' % (name, suffix),
(object,),
parameter_dictionary)
class _ProxyErrorHandler(messaging.ConnectionErrorHandler):
def __init__(self):
messaging.ConnectionErrorHandler.__init__(self)
self._callbacks = set()
def OnError(self, result):
exception = messaging.MessagingException('Mojo error: %d' % result)
for callback in list(self._callbacks):
callback(exception)
self._callbacks = None
def AddCallback(self, callback):
if self._callbacks is not None:
self._callbacks.add(callback)
def RemoveCallback(self, callback):
if self._callbacks:
self._callbacks.remove(callback)
class _Retainer(object):
# Set to force instances to be retained.
_RETAINED = set()
def __init__(self, retained):
self._retained = retained
_Retainer._RETAINED.add(self)
def release(self):
self._retained = None
_Retainer._RETAINED.remove(self)
def _StructInit(fields):
def _Init(self, *args, **kwargs):
if len(args) + len(kwargs) > len(fields):
raise TypeError('__init__() takes %d argument (%d given)' %
(len(fields), len(args) + len(kwargs)))
self._fields = {}
for f, a in zip(fields, args):
self.__setattr__(f.name, a)
remaining_fields = set(x.name for x in fields[len(args):])
for name in kwargs:
if not name in remaining_fields:
if name in (x.name for x in fields[:len(args)]):
raise TypeError(
'__init__() got multiple values for keyword argument %r' % name)
raise TypeError('__init__() got an unexpected keyword argument %r' %
name)
self.__setattr__(name, kwargs[name])
return _Init
def _BuildProperty(field):
"""Build the property for the given field."""
# pylint: disable=W0212
def Get(self):
if field.name not in self._fields:
self._fields[field.name] = field.GetDefaultValue()
return self._fields[field.name]
# pylint: disable=W0212
def Set(self, value):
self._fields[field.name] = field.field_type.Convert(value)
return property(Get, Set)
def _StructEq(fields):
def _Eq(self, other):
if type(self) is not type(other):
return False
for field in fields:
if getattr(self, field.name) != getattr(other, field.name):
return False
return True
return _Eq
def _StructNe(self, other):
return not self.__eq__(other)
def _ProxyInit(self, router, error_handler):
self._router = router
self._error_handler = error_handler
self._client = None
# pylint: disable=W0212
def _ProxyGetClient(self):
return self._client
# pylint: disable=W0212
def _ProxySetClient(self, client):
self._client = client
stub = self._client_manager._Stub(client)
self._router.SetIncomingMessageReceiver(stub)
# pylint: disable=W0212
def _ProxyMethodCall(method):
flags = messaging.NO_FLAG
if method.response_struct:
flags = messaging.MESSAGE_EXPECTS_RESPONSE_FLAG
def _Call(self, *args, **kwargs):
def GenerationMethod(resolve, reject):
message = _GetMessage(method, flags, *args, **kwargs)
if method.response_struct:
def Accept(message):
try:
assert message.header.message_type == method.ordinal
payload = message.payload
response = method.response_struct.Deserialize(
serialization.RootDeserializationContext(payload.data,
payload.handles))
as_dict = response.AsDict()
if len(as_dict) == 1:
value = as_dict.values()[0]
if not isinstance(value, dict):
response = value
resolve(response)
return True
except Exception as e:
# Adding traceback similarly to python 3.0 (pep-3134)
e.__traceback__ = sys.exc_info()[2]
reject(e)
return False
finally:
self._error_handler.RemoveCallback(reject)
self._error_handler.AddCallback(reject)
if not self._router.AcceptWithResponder(
message, messaging.ForwardingMessageReceiver(Accept)):
self._error_handler.RemoveCallback(reject)
reject(messaging.MessagingException("Unable to send message."))
else:
if (self._router.Accept(message)):
resolve(None)
else:
reject(messaging.MessagingException("Unable to send message."))
return promise.Promise(GenerationMethod)
return _Call
def _GetMessage(method, flags, *args, **kwargs):
if flags == messaging.MESSAGE_IS_RESPONSE_FLAG:
struct = method.response_struct(*args, **kwargs)
else:
struct = method.parameters_struct(*args, **kwargs)
header = messaging.MessageHeader(method.ordinal, flags)
data = header.Serialize()
(payload, handles) = struct.Serialize()
data.extend(payload)
return messaging.Message(data, handles, header)
def _StubInit(self, impl):
self.impl = impl
def _StubAccept(methods):
methods_by_ordinal = dict((m.ordinal, m) for m in methods)
def Accept(self, message, responder=None):
try:
header = message.header
assert header.expects_response == bool(responder)
assert header.message_type in methods_by_ordinal
method = methods_by_ordinal[header.message_type]
payload = message.payload
parameters = method.parameters_struct.Deserialize(
serialization.RootDeserializationContext(
payload.data, payload.handles)).AsDict()
response = getattr(self.impl, method.name)(**parameters)
if header.expects_response:
def SendResponse(response):
if isinstance(response, dict):
response_message = _GetMessage(method,
messaging.MESSAGE_IS_RESPONSE_FLAG,
**response)
else:
response_message = _GetMessage(method,
messaging.MESSAGE_IS_RESPONSE_FLAG,
response)
response_message.header.request_id = header.request_id
responder.Accept(response_message)
p = promise.Promise.Resolve(response).Then(SendResponse)
if self.impl.manager:
# Close the connection in case of error.
p.Catch(lambda _: self.impl.manager.Close())
return True
# pylint: disable=W0702
except:
# Close the connection in case of error.
logging.warning(
'Error occured in accept method. Connection will be closed.')
if self.impl.manager:
self.impl.manager.Close()
return False
return Accept
def _NotImplemented(*_1, **_2):
raise NotImplementedError()
| bsd-3-clause |
miguelparaiso/PracticaOdoo | addons/procurement/wizard/schedulers_all.py | 306 | 3456 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import threading
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv
from openerp.api import Environment
_logger = logging.getLogger(__name__)
class procurement_compute_all(osv.osv_memory):
_name = 'procurement.order.compute.all'
_description = 'Compute all schedulers'
def _procure_calculation_all(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
with Environment.manage():
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = self.pool.cursor()
scheduler_cron_id = self.pool['ir.model.data'].get_object_reference(new_cr, SUPERUSER_ID, 'procurement', 'ir_cron_scheduler_action')[1]
# Avoid to run the scheduler multiple times in the same time
try:
with tools.mute_logger('openerp.sql_db'):
new_cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron_id,))
except Exception:
_logger.info('Attempt to run procurement scheduler aborted, as already running')
new_cr.rollback()
new_cr.close()
return {}
user = self.pool.get('res.users').browse(new_cr, uid, uid, context=context)
comps = [x.id for x in user.company_ids]
for comp in comps:
proc_obj.run_scheduler(new_cr, uid, use_new_cursor=new_cr.dbname, company_id = comp, context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fxfitz/ansible | lib/ansible/modules/monitoring/sensu_handler.py | 23 | 9200 | #!/usr/bin/python
# (c) 2017, Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_handler
author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu handler configuration
version_added: 2.4
description:
- Manages Sensu handler configuration
- 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
options:
state:
description:
- Whether the handler should be present or not
choices: [ 'present', 'absent' ]
default: present
name:
description:
- A unique name for the handler. The name cannot contain special characters or spaces.
required: True
type:
description:
- The handler type
choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
required: True
filter:
description:
- The Sensu event filter (name) to use when filtering events for the handler.
filters:
description:
- An array of Sensu event filters (names) to use when filtering events for the handler.
- Each array item must be a string.
severities:
description:
- An array of check result severities the handler will handle.
- 'NOTE: event resolution bypasses this filtering.'
choices: [ 'warning', 'critical', 'unknown' ]
mutator:
description:
- The Sensu event mutator (name) to use to mutate event data for the handler.
timeout:
description:
- The handler execution duration timeout in seconds (hard stop).
- Only used by pipe and tcp handler types.
default: 10
handle_silenced:
description:
- If events matching one or more silence entries should be handled.
type: bool
default: 'no'
handle_flapping:
description:
- If events in the flapping state should be handled.
type: bool
default: 'no'
command:
description:
- The handler command to be executed.
- The event data is passed to the process via STDIN.
- 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
socket:
description:
- The socket definition scope, used to configure the TCP/UDP handler socket.
- 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
pipe:
description:
- The pipe definition scope, used to configure the Sensu transport pipe.
- 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
handlers:
description:
- An array of Sensu event handlers (names) to use for events using the handler set.
- Each array item must be a string.
- 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
notes:
- Check mode is supported
'''
EXAMPLES = '''
# Configure a handler that sends event data as STDIN (pipe)
- name: Configure IRC Sensu handler
sensu_handler:
name: "irc_handler"
type: "pipe"
command: "/usr/local/bin/notify-irc.sh"
severities:
- "ok"
- "critical"
- "warning"
- "unknown"
timeout: 15
notify:
- Restart sensu-client
- Restart sensu-server
# Delete a handler
- name: Delete IRC Sensu handler
sensu_handler:
name: "irc_handler"
state: "absent"
# Example of a TCP handler
- name: Configure TCP Sensu handler
sensu_handler:
name: "tcp_handler"
type: "tcp"
timeout: 30
socket:
host: "10.0.1.99"
port: 4444
register: handler
notify:
- Restart sensu-client
- Restart sensu-server
- name: Secure Sensu handler configuration file
file:
path: "{{ handler['file'] }}"
owner: "sensu"
group: "sensu"
mode: "0600"
'''
RETURN = '''
config:
description: Effective handler configuration, when state is present
returned: success
type: dict
sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
file:
description: Path to the handler configuration file
returned: success
type: string
sample: "/etc/sensu/conf.d/handlers/irc.json"
name:
description: Name of the handler
returned: success
type: string
sample: "irc"
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
name=dict(type='str', required=True),
type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
filter=dict(type='str', required=False),
filters=dict(type='list', required=False),
severities=dict(type='list', required=False),
mutator=dict(type='str', required=False),
timeout=dict(type='int', required=False, default=10),
handle_silenced=dict(type='bool', required=False, default=False),
handle_flapping=dict(type='bool', required=False, default=False),
command=dict(type='str', required=False),
socket=dict(type='dict', required=False),
pipe=dict(type='dict', required=False),
handlers=dict(type='list', required=False),
),
required_if=[
['state', 'present', ['type']],
['type', 'pipe', ['command']],
['type', 'tcp', ['socket']],
['type', 'udp', ['socket']],
['type', 'transport', ['pipe']],
['type', 'set', ['handlers']]
]
)
state = module.params['state']
name = module.params['name']
path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
if state == 'absent':
if os.path.exists(path):
if module.check_mode:
msg = '{path} would have been deleted'.format(path=path)
module.exit_json(msg=msg, changed=True)
else:
try:
os.remove(path)
msg = '{path} deleted successfully'.format(path=path)
module.exit_json(msg=msg, changed=True)
except OSError as e:
msg = 'Exception when trying to delete {path}: {exception}'
module.fail_json(
msg=msg.format(path=path, exception=str(e)))
else:
# Idempotency: it's okay if the file doesn't exist
msg = '{path} already does not exist'.format(path=path)
module.exit_json(msg=msg)
# Build handler configuration from module arguments
config = {'handlers': {name: {}}}
args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
'handle_silenced', 'handle_flapping', 'command', 'socket',
'pipe', 'handlers']
for arg in args:
if arg in module.params and module.params[arg] is not None:
config['handlers'][name][arg] = module.params[arg]
# Load the current config, if there is one, so we can compare
current_config = None
try:
current_config = json.load(open(path, 'r'))
except (IOError, ValueError):
# File either doesn't exist or it's invalid JSON
pass
if current_config is not None and current_config == config:
# Config is the same, let's not change anything
module.exit_json(msg='Handler configuration is already up to date',
config=config['handlers'][name],
file=path,
name=name)
# Validate that directory exists before trying to write to it
if not module.check_mode and not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
str(e)))
if module.check_mode:
module.exit_json(msg='Handler configuration would have been updated',
changed=True,
config=config['handlers'][name],
file=path,
name=name)
try:
with open(path, 'w') as handler:
handler.write(json.dumps(config, indent=4))
module.exit_json(msg='Handler configuration updated',
changed=True,
config=config['handlers'][name],
file=path,
name=name)
except (OSError, IOError) as e:
module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
str(e)))
if __name__ == '__main__':
main()
| gpl-3.0 |
snooptheone/LegendasTV.bundle | Contents/Libraries/Shared/requests/packages/chardet/mbcssm.py | 982 | 19608 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
| gpl-2.0 |
RRCKI/panda-jedi | pandajedi/jediorder/ContentsFeeder.py | 1 | 38521 | import os
import sys
import time
import uuid
import math
import socket
import datetime
import traceback
from pandajedi.jedicore.ThreadUtils import ListWithLock,ThreadPool,WorkerThread
from pandajedi.jedicore import Interaction
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedirefine import RefinerUtils
from JediKnight import JediKnight
from TaskGenerator import TaskGenerator
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
from pandajedi.jediconfig import jedi_config
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# worker class to take care of DatasetContents table
class ContentsFeeder (JediKnight):
# constructor
def __init__(self,commuChannel,taskBufferIF,ddmIF,vos,prodSourceLabels):
self.vos = self.parseInit(vos)
self.prodSourceLabels = self.parseInit(prodSourceLabels)
self.pid = '{0}-{1}_{2}-con'.format(socket.getfqdn().split('.')[0],os.getpid(),os.getpgrp())
JediKnight.__init__(self,commuChannel,taskBufferIF,ddmIF,logger)
# main
def start(self):
# start base class
JediKnight.start(self)
# go into main loop
while True:
startTime = datetime.datetime.utcnow()
try:
# loop over all vos
for vo in self.vos:
# loop over all sourceLabels
for prodSourceLabel in self.prodSourceLabels:
# get the list of datasets to feed contents to DB
tmpList = self.taskBufferIF.getDatasetsToFeedContents_JEDI(vo,prodSourceLabel)
if tmpList == None:
# failed
logger.error('failed to get the list of datasets to feed contents')
else:
logger.debug('got %s datasets' % len(tmpList))
# put to a locked list
dsList = ListWithLock(tmpList)
# make thread pool
threadPool = ThreadPool()
# make workers
nWorker = jedi_config.confeeder.nWorkers
for iWorker in range(nWorker):
thr = ContentsFeederThread(dsList,threadPool,
self.taskBufferIF,self.ddmIF,
self.pid)
thr.start()
# join
threadPool.join()
except:
errtype,errvalue = sys.exc_info()[:2]
logger.error('failed in %s.start() with %s %s' % (self.__class__.__name__,errtype.__name__,errvalue))
# sleep if needed
loopCycle = jedi_config.confeeder.loopCycle
timeDelta = datetime.datetime.utcnow() - startTime
sleepPeriod = loopCycle - timeDelta.seconds
if sleepPeriod > 0:
time.sleep(sleepPeriod)
# randomize cycle
self.randomSleep()
# thread for real worker
class ContentsFeederThread (WorkerThread):
# constructor
def __init__(self,taskDsList,threadPool,taskbufferIF,ddmIF,pid):
# initialize woker with no semaphore
WorkerThread.__init__(self,None,threadPool,logger)
# attributres
self.taskDsList = taskDsList
self.taskBufferIF = taskbufferIF
self.ddmIF = ddmIF
self.msgType = 'contentsfeeder'
self.pid = pid
# main
def runImpl(self):
while True:
try:
# get a part of list
nTasks = 10
taskDsList = self.taskDsList.get(nTasks)
# no more datasets
if len(taskDsList) == 0:
self.logger.debug('%s terminating since no more items' % self.__class__.__name__)
return
# loop over all tasks
for jediTaskID,dsList in taskDsList:
allUpdated = True
taskBroken = False
taskOnHold = False
runningTask = False
missingMap = {}
# make logger
tmpLog = MsgWrapper(self.logger,'< jediTaskID={0} >'.format(jediTaskID))
# get task
tmpStat,taskSpec = self.taskBufferIF.getTaskWithID_JEDI(jediTaskID,False,True,self.pid,10)
if not tmpStat or taskSpec == None:
tmpLog.error('failed to get taskSpec for jediTaskID={0}'.format(jediTaskID))
continue
try:
# get task parameters
taskParam = self.taskBufferIF.getTaskParamsWithID_JEDI(jediTaskID)
taskParamMap = RefinerUtils.decodeJSON(taskParam)
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('task param conversion from json failed with {0}:{1}'.format(errtype.__name__,errvalue))
taskBroken = True
# renaming of parameters
if taskParamMap.has_key('nEventsPerInputFile'):
taskParamMap['nEventsPerFile'] = taskParamMap['nEventsPerInputFile']
# the number of files per job
nFilesPerJob = None
if taskParamMap.has_key('nFilesPerJob'):
nFilesPerJob = taskParamMap['nFilesPerJob']
# the number of chunks used by scout
nChunksForScout = 10
# load XML
if taskSpec.useLoadXML():
xmlConfig = taskParamMap['loadXML']
else:
xmlConfig = None
# skip files used by another task
if 'skipFilesUsedBy' in taskParamMap:
skipFilesUsedBy = taskParamMap['skipFilesUsedBy']
else:
skipFilesUsedBy = None
# check no wait
noWaitParent = False
parentOutDatasets = set()
if taskSpec.noWaitParent() and not taskSpec.parent_tid in [None,taskSpec.jediTaskID]:
tmpStat = self.taskBufferIF.checkParentTask_JEDI(taskSpec.parent_tid)
if tmpStat == 'running':
noWaitParent = True
# get output datasets from parent task
tmpParentStat,tmpParentOutDatasets = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.parent_tid,
['output','log'])
# collect dataset names
for tmpParentOutDataset in tmpParentOutDatasets:
parentOutDatasets.add(tmpParentOutDataset.datasetName)
# loop over all datasets
nFilesMaster = 0
checkedMaster = False
setFrozenTime = True
if not taskBroken:
ddmIF = self.ddmIF.getInterface(taskSpec.vo)
origNumFiles = None
if taskParamMap.has_key('nFiles'):
origNumFiles = taskParamMap['nFiles']
for datasetSpec in dsList:
tmpLog.debug('start loop for {0}(id={1})'.format(datasetSpec.datasetName,datasetSpec.datasetID))
# get dataset metadata
tmpLog.debug('get metadata')
gotMetadata = False
stateUpdateTime = datetime.datetime.utcnow()
try:
if not datasetSpec.isPseudo():
tmpMetadata = ddmIF.getDatasetMetaData(datasetSpec.datasetName)
else:
# dummy metadata for pseudo dataset
tmpMetadata = {'state':'closed'}
# set mutable when and the dataset is open and parent is running or task is configured to run until the dataset is closed
if (noWaitParent or taskSpec.runUntilClosed()) and \
(tmpMetadata['state'] == 'open' \
or datasetSpec.datasetName in parentOutDatasets \
or datasetSpec.datasetName.split(':')[-1] in parentOutDatasets):
# dummy metadata when parent is running
tmpMetadata = {'state':'mutable'}
gotMetadata = True
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('{0} failed to get metadata to {1}:{2}'.format(self.__class__.__name__,
errtype.__name__,errvalue))
if errtype == Interaction.JEDIFatalError:
# fatal error
datasetStatus = 'broken'
taskBroken = True
# update dataset status
self.updateDatasetStatus(datasetSpec,datasetStatus,tmpLog)
else:
if not taskSpec.ignoreMissingInDS():
# temporary error
taskOnHold = True
else:
# ignore missing
datasetStatus = 'failed'
# update dataset status
self.updateDatasetStatus(datasetSpec,datasetStatus,tmpLog)
taskSpec.setErrDiag('failed to get metadata for {0}'.format(datasetSpec.datasetName))
if not taskSpec.ignoreMissingInDS():
allUpdated = False
else:
# get file list specified in task parameters
fileList,includePatt,excludePatt = RefinerUtils.extractFileList(taskParamMap,datasetSpec.datasetName)
# get the number of events in metadata
if taskParamMap.has_key('getNumEventsInMetadata'):
getNumEvents = True
else:
getNumEvents = False
# get file list from DDM
tmpLog.debug('get files')
try:
useInFilesWithNewAttemptNr = False
skipDuplicate = not datasetSpec.useDuplicatedFiles()
if not datasetSpec.isPseudo():
if fileList != [] and taskParamMap.has_key('useInFilesInContainer') and \
not datasetSpec.containerName in ['',None]:
# read files from container if file list is specified in task parameters
tmpDatasetName = datasetSpec.containerName
else:
tmpDatasetName = datasetSpec.datasetName
# use long format for LB
longFormat = False
if taskSpec.respectLumiblock():
longFormat = True
tmpRet = ddmIF.getFilesInDataset(tmpDatasetName,
getNumEvents=getNumEvents,
skipDuplicate=skipDuplicate,
longFormat=longFormat
)
tmpLog.debug('got {0} files in {1}'.format(len(tmpRet),tmpDatasetName))
# remove lost files
tmpLostFiles = ddmIF.findLostFiles(tmpDatasetName,tmpRet)
if tmpLostFiles != {}:
tmpLog.debug('found {0} lost files in {1}'.format(len(tmpLostFiles),tmpDatasetName))
for tmpListGUID,tmpLostLFN in tmpLostFiles.iteritems():
tmpLog.debug('removed {0}'.format(tmpLostLFN))
del tmpRet[tmpListGUID]
else:
if datasetSpec.isSeqNumber():
# make dummy files for seq_number
if datasetSpec.getNumRecords() != None:
nPFN = datasetSpec.getNumRecords()
elif origNumFiles != None:
nPFN = origNumFiles
if taskParamMap.has_key('nEventsPerJob') and taskParamMap.has_key('nEventsPerFile') \
and taskParamMap['nEventsPerFile'] > taskParamMap['nEventsPerJob']:
nPFN = nPFN * taskParamMap['nEventsPerFile'] / taskParamMap['nEventsPerJob']
elif taskParamMap.has_key('nEventsPerFile') and taskParamMap.has_key('nEventsPerRange'):
nPFN = nPFN * taskParamMap['nEventsPerFile'] / taskParamMap['nEventsPerRange']
elif 'nEvents' in taskParamMap and 'nEventsPerJob' in taskParamMap:
nPFN = taskParamMap['nEvents'] / taskParamMap['nEventsPerJob']
elif 'nEvents' in taskParamMap and 'nEventsPerFile' in taskParamMap \
and 'nFilesPerJob' in taskParamMap:
nPFN = taskParamMap['nEvents'] / taskParamMap['nEventsPerFile'] / taskParamMap['nFilesPerJob']
else:
# the default number of records for seq_number
seqDefNumRecords = 10000
# get nFiles of the master
tmpMasterAtt = self.taskBufferIF.getDatasetAttributes_JEDI(datasetSpec.jediTaskID,
datasetSpec.masterID,
['nFiles'])
# use nFiles of the master as the number of records if it is larger than the default
if 'nFiles' in tmpMasterAtt and tmpMasterAtt['nFiles'] > seqDefNumRecords:
nPFN = tmpMasterAtt['nFiles']
else:
nPFN = seqDefNumRecords
# check usedBy
if skipFilesUsedBy != None:
for tmpJediTaskID in str(skipFilesUsedBy).split(','):
tmpParentAtt = self.taskBufferIF.getDatasetAttributesWithMap_JEDI(tmpJediTaskID,
{'datasetName':datasetSpec.datasetName},
['nFiles'])
if 'nFiles' in tmpParentAtt and tmpParentAtt['nFiles']:
nPFN += tmpParentAtt['nFiles']
tmpRet = {}
# get offset
tmpOffset = datasetSpec.getOffset()
tmpOffset += 1
for iPFN in range(nPFN):
tmpRet[str(uuid.uuid4())] = {'lfn':iPFN+tmpOffset,
'scope':None,
'filesize':0,
'checksum':None,
}
elif not taskSpec.useListPFN():
# dummy file list for pseudo dataset
tmpRet = {str(uuid.uuid4()):{'lfn':'pseudo_lfn',
'scope':None,
'filesize':0,
'checksum':None,
}
}
else:
# make dummy file list for PFN list
if taskParamMap.has_key('nFiles'):
nPFN = taskParamMap['nFiles']
else:
nPFN = 1
tmpRet = {}
for iPFN in range(nPFN):
tmpRet[str(uuid.uuid4())] = {'lfn':'{0:06d}:{1}'.format(iPFN,taskParamMap['pfnList'][iPFN].split('/')[-1]),
'scope':None,
'filesize':0,
'checksum':None,
}
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed to get files due to {0}:{1} {2}'.format(self.__class__.__name__,
errtype.__name__,errvalue))
if errtype == Interaction.JEDIFatalError:
# fatal error
datasetStatus = 'broken'
taskBroken = True
# update dataset status
self.updateDatasetStatus(datasetSpec,datasetStatus,tmpLog)
else:
# temporary error
taskOnHold = True
taskSpec.setErrDiag('failed to get files for {0}'.format(datasetSpec.datasetName))
allUpdated = False
else:
# parameters for master input
respectLB = False
useRealNumEvents = False
if datasetSpec.isMaster():
# respect LB boundaries
respectLB = taskSpec.respectLumiblock()
# use real number of events
useRealNumEvents = taskSpec.useRealNumEvents()
# the number of events per file
nEventsPerFile = None
nEventsPerJob = None
nEventsPerRange = None
tgtNumEventsPerJob = None
if (datasetSpec.isMaster() and (taskParamMap.has_key('nEventsPerFile') or useRealNumEvents)) or \
(datasetSpec.isPseudo() and taskParamMap.has_key('nEvents') and not datasetSpec.isSeqNumber()):
if taskParamMap.has_key('nEventsPerFile'):
nEventsPerFile = taskParamMap['nEventsPerFile']
elif datasetSpec.isMaster() and datasetSpec.isPseudo() and taskParamMap.has_key('nEvents'):
# use nEvents as nEventsPerFile for pseudo input
nEventsPerFile = taskParamMap['nEvents']
if taskParamMap.has_key('nEventsPerJob'):
nEventsPerJob = taskParamMap['nEventsPerJob']
elif taskParamMap.has_key('nEventsPerRange'):
nEventsPerRange = taskParamMap['nEventsPerRange']
if 'tgtNumEventsPerJob' in taskParamMap:
tgtNumEventsPerJob = taskParamMap['tgtNumEventsPerJob']
# reset nEventsPerJob
nEventsPerJob = None
# max attempts
maxAttempt = None
maxFailure = None
if datasetSpec.isMaster() or datasetSpec.toKeepTrack():
# max attempts
if taskSpec.disableAutoRetry():
# disable auto retry
maxAttempt = 1
elif taskParamMap.has_key('maxAttempt'):
maxAttempt = taskParamMap['maxAttempt']
else:
# use default value
maxAttempt = 3
# max failure
if 'maxFailure' in taskParamMap:
maxFailure = taskParamMap['maxFailure']
# first event number
firstEventNumber = None
if datasetSpec.isMaster():
# first event number
firstEventNumber = 1 + taskSpec.getFirstEventOffset()
# nMaxEvents
nMaxEvents = None
if datasetSpec.isMaster() and taskParamMap.has_key('nEvents'):
nMaxEvents = taskParamMap['nEvents']
# nMaxFiles
nMaxFiles = None
if taskParamMap.has_key('nFiles'):
if datasetSpec.isMaster():
nMaxFiles = taskParamMap['nFiles']
else:
# calculate for secondary
nMaxFiles = datasetSpec.getNumMultByRatio(origNumFiles)
# multipled by the number of jobs per file for event-level splitting
if nMaxFiles != None and taskParamMap.has_key('nEventsPerFile'):
if taskParamMap.has_key('nEventsPerJob'):
if taskParamMap['nEventsPerFile'] > taskParamMap['nEventsPerJob']:
nMaxFiles *= float(taskParamMap['nEventsPerFile'])/float(taskParamMap['nEventsPerJob'])
nMaxFiles = int(math.ceil(nMaxFiles))
elif taskParamMap.has_key('nEventsPerRange'):
if taskParamMap['nEventsPerFile'] > taskParamMap['nEventsPerRange']:
nMaxFiles *= float(taskParamMap['nEventsPerFile'])/float(taskParamMap['nEventsPerRange'])
nMaxFiles = int(math.ceil(nMaxFiles))
# use scout
useScout = False
if datasetSpec.isMaster() and taskSpec.useScout() and (datasetSpec.status != 'toupdate' or not taskSpec.isPostScout()):
useScout = True
# use files with new attempt numbers
useFilesWithNewAttemptNr = False
if not datasetSpec.isPseudo() and fileList != [] and taskParamMap.has_key('useInFilesWithNewAttemptNr'):
useFilesWithNewAttemptNr = True
#ramCount
ramCount = 0
# feed files to the contents table
tmpLog.debug('update contents')
retDB,missingFileList,nFilesUnique,diagMap = self.taskBufferIF.insertFilesForDataset_JEDI(datasetSpec,tmpRet,
tmpMetadata['state'],
stateUpdateTime,
nEventsPerFile,
nEventsPerJob,
maxAttempt,
firstEventNumber,
nMaxFiles,
nMaxEvents,
useScout,
fileList,
useFilesWithNewAttemptNr,
nFilesPerJob,
nEventsPerRange,
nChunksForScout,
includePatt,
excludePatt,
xmlConfig,
noWaitParent,
taskSpec.parent_tid,
self.pid,
maxFailure,
useRealNumEvents,
respectLB,
tgtNumEventsPerJob,
skipFilesUsedBy,
ramCount)
if retDB == False:
taskSpec.setErrDiag('failed to insert files for {0}. {1}'.format(datasetSpec.datasetName,
diagMap['errMsg']))
allUpdated = False
taskBroken = True
break
elif retDB == None:
# the dataset is locked by another or status is not applicable
allUpdated = False
tmpLog.debug('escape since task or dataset is locked')
break
elif missingFileList != []:
# files are missing
tmpErrStr = '{0} files missing in {1}'.format(len(missingFileList),datasetSpec.datasetName)
tmpLog.debug(tmpErrStr)
taskSpec.setErrDiag(tmpErrStr)
allUpdated = False
taskOnHold = True
missingMap[datasetSpec.datasetName] = {'datasetSpec':datasetSpec,
'missingFiles':missingFileList}
else:
# reduce the number of files to be read
if taskParamMap.has_key('nFiles'):
if datasetSpec.isMaster():
taskParamMap['nFiles'] -= nFilesUnique
# reduce the number of files for scout
if useScout:
nChunksForScout = diagMap['nChunksForScout']
# number of master input files
if datasetSpec.isMaster():
checkedMaster = True
nFilesMaster += nFilesUnique
# running task
if diagMap['isRunningTask']:
runningTask = True
# no activated pending input for noWait
if noWaitParent and diagMap['nActivatedPending'] == 0 and not (useScout and nChunksForScout == 0) \
and tmpMetadata['state'] != 'closed' and datasetSpec.isMaster():
tmpErrStr = 'insufficient inputs are ready. '
tmpErrStr += diagMap['errMsg']
tmpLog.debug(tmpErrStr)
taskSpec.setErrDiag(tmpErrStr)
taskOnHold = True
setFrozenTime = False
break
tmpLog.debug('end loop')
# no mater input
if not taskOnHold and not taskBroken and allUpdated and nFilesMaster == 0 and checkedMaster:
tmpErrStr = 'no master input files. input dataset is empty'
tmpLog.error(tmpErrStr)
taskSpec.setErrDiag(tmpErrStr,None)
if taskSpec.allowEmptyInput() or noWaitParent:
taskOnHold = True
else:
taskBroken = True
# update task status
if taskBroken:
# task is broken
taskSpec.status = 'tobroken'
tmpMsg = 'set task.status={0}'.format(taskSpec.status)
tmpLog.info(tmpMsg)
tmpLog.sendMsg(tmpMsg,self.msgType)
allRet = self.taskBufferIF.updateTaskStatusByContFeeder_JEDI(jediTaskID,taskSpec,pid=self.pid)
# change task status unless the task is running
if not runningTask:
if taskOnHold:
# go to pending state
if not taskSpec.status in ['broken','tobroken']:
taskSpec.setOnHold()
tmpMsg = 'set task.status={0}'.format(taskSpec.status)
tmpLog.info(tmpMsg)
tmpLog.sendMsg(tmpMsg,self.msgType)
allRet = self.taskBufferIF.updateTaskStatusByContFeeder_JEDI(jediTaskID,taskSpec,pid=self.pid,setFrozenTime=setFrozenTime)
elif allUpdated:
# all OK
allRet,newTaskStatus = self.taskBufferIF.updateTaskStatusByContFeeder_JEDI(jediTaskID,getTaskStatus=True,pid=self.pid,
useWorldCloud=taskSpec.useWorldCloud())
tmpMsg = 'set task.status={0}'.format(newTaskStatus)
tmpLog.info(tmpMsg)
tmpLog.sendMsg(tmpMsg,self.msgType)
# just unlock
retUnlock = self.taskBufferIF.unlockSingleTask_JEDI(jediTaskID,self.pid)
tmpLog.debug('unlock not-running task with {0}'.format(retUnlock))
else:
# just unlock
retUnlock = self.taskBufferIF.unlockSingleTask_JEDI(jediTaskID,self.pid)
tmpLog.debug('unlock task with {0}'.format(retUnlock))
tmpLog.debug('done')
except:
errtype,errvalue = sys.exc_info()[:2]
logger.error('{0} failed in runImpl() with {1}:{2}'.format(self.__class__.__name__,errtype.__name__,errvalue))
# update dataset
def updateDatasetStatus(self,datasetSpec,datasetStatus,tmpLog):
# update dataset status
datasetSpec.status = datasetStatus
datasetSpec.lockedBy = None
tmpLog.info('update dataset status with {0}'.format(datasetSpec.status))
self.taskBufferIF.updateDataset_JEDI(datasetSpec,
{'datasetID':datasetSpec.datasetID,
'jediTaskID':datasetSpec.jediTaskID},
lockTask=True)
########## lauch
def launcher(commuChannel,taskBufferIF,ddmIF,vos=None,prodSourceLabels=None):
p = ContentsFeeder(commuChannel,taskBufferIF,ddmIF,vos,prodSourceLabels)
p.start()
| apache-2.0 |
MridulS/BinPy | BinPy/examples/source/ic/Series_4000/IC4069.py | 5 | 1254 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 4069
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 4069:
ic = IC_4069()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {2: 0, 3: 1, 4: 0, 5: 1, 7: 0, 9: 1, 10: 1, 11: 1, 12: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0}) -- \n
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(2, c)
print(c)
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_workflow.py | 1 | 10935 | """
Test run/stop aspects of a simple workflow.
"""
import unittest
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.main.case import Case
from openmdao.main.exceptions import RunStopped
from openmdao.main.datatypes.api import Int, Bool, Float
from openmdao.main.hasparameters import HasParameters
from openmdao.main.hasobjective import HasObjective
from openmdao.main.test.test_assembly import Simple
from openmdao.main.interfaces import implements, ICaseRecorder
from openmdao.util.decorators import add_delegate
from openmdao.main.case import CaseTreeNode
# pylint: disable=E1101,E1103
# "Instance of <class> has no <attr> member"
dummyval = 1
class TestComponent(Component):
"""
Component which tracks it's total executions
and can request that the run be stopped.
"""
dummy_input = Int(0, iotype='in')
set_stop = Bool(False, iotype='in')
total_executions = Int(0, iotype='out')
def execute(self):
self.total_executions += 1
if self.set_stop:
self.parent.driver.stop()
class Model(Assembly):
""" Just a simple three-component workflow. """
def configure(self):
self.add('comp_a', TestComponent())
self.add('comp_b', TestComponent())
self.add('comp_c', TestComponent())
self.driver.workflow.add(['comp_a', 'comp_b', 'comp_c'])
self.connect('comp_a.total_executions', 'comp_b.dummy_input')
self.connect('comp_b.total_executions', 'comp_c.dummy_input')
class LazyModel(Assembly):
def configure(self):
self.add('driver', NTimes(1))
self.add('D2', NTimes(1))
self.add('C1', Simple())
self.add('C2', Simple())
self.add('C3', Simple())
self.add('C4', Simple())
# C1 --> C2 --> C3 --> C4
self.connect('C1.c', 'C2.a')
self.connect('C2.c', 'C3.a')
self.connect('C3.c', 'C4.a')
@add_delegate(HasParameters, HasObjective)
class NTimes(Driver):
def __init__(self, max_iterations):
super(NTimes, self).__init__()
self.max_iterations = max_iterations
def execute(self):
for i in range(self.max_iterations):
super(NTimes, self).execute()
@add_delegate(HasParameters, HasObjective)
class CaseDriver(Driver):
def __init__(self, max_iterations):
super(CaseDriver, self).__init__()
self.max_iterations = max_iterations
def execute(self):
for i in range(self.max_iterations):
self.set_parameters([float(i)])
super(CaseDriver, self).execute()
self.eval_objective()
class CaseComponent(Component):
x = Float(iotype='in')
y = Float(iotype='out')
def execute(self):
self.y = self.x
class DumbRecorder(object):
"""Stores cases in a list."""
implements(ICaseRecorder)
def __init__(self):
self.cases = []
self._name_map = {}
def startup(self):
pass
def register(self, src, inputs, outputs):
self._name_map[src] = (inputs, outputs)
def record_constants(self, constants):
pass
def record(self, src, inputs, outputs, err, case_uuid, parent_uuid):
in_names, out_names = self._name_map[src]
inputs = zip(in_names, inputs)
outputs = zip(out_names, outputs)
self.cases.append(Case(inputs, outputs,
case_uuid=case_uuid, parent_uuid=parent_uuid))
def close(self):
return
def get_iterator(self):
return iter(self.cases)
class TestCase(unittest.TestCase):
""" Test run/stop aspects of a simple workflow. """
def setUp(self):
""" Called before each test. """
self.model = set_as_top(Model())
def tearDown(self):
""" Called after each test. """
pass
def test_bad_workflow_reference(self):
self.model.driver.workflow.add('foobar')
try:
self.model.run()
except Exception as err:
self.assertEqual(str(err),
"'Model' object has no attribute 'foobar'")
def test_simple(self):
self.assertEqual(self.model.comp_a.total_executions, 0)
self.assertEqual(self.model.comp_b.total_executions, 0)
self.assertEqual(self.model.comp_c.total_executions, 0)
self.model.run()
self.assertEqual(self.model.comp_a.total_executions, 1)
self.assertEqual(self.model.comp_b.total_executions, 1)
self.assertEqual(self.model.comp_c.total_executions, 1)
self.model.run()
self.assertEqual(self.model.comp_a.total_executions, 2)
self.assertEqual(self.model.comp_b.total_executions, 2)
self.assertEqual(self.model.comp_c.total_executions, 2)
def test_run_stop_run(self):
self.model.comp_b.set_stop = True
try:
self.model.run()
except RunStopped as exc:
self.assertTrue('Stop requested' in str(exc))
else:
self.fail('Expected RunStopped')
self.assertEqual(self.model.comp_a.total_executions, 1)
self.assertEqual(self.model.comp_b.total_executions, 1)
self.assertEqual(self.model.comp_c.total_executions, 0)
self.model.comp_b.set_stop = False
self.model.run()
self.assertEqual(self.model.comp_a.total_executions, 2)
self.assertEqual(self.model.comp_b.total_executions, 2)
self.assertEqual(self.model.comp_c.total_executions, 1)
def test_checks(self):
# Tests out the validity checks.
# Test 1, add a driver to its own workflow
try:
self.model.driver.workflow.add('driver', check=True)
except AttributeError, err:
msg = 'You cannot add a driver to its own workflow'
self.assertEqual(str(err), msg)
else:
self.fail('Expected AttributeError')
# Test 2, add a comp that is out of scope.
self.model.add('sub', Assembly())
self.model.sub.add('comp', Component())
try:
self.model.driver.workflow.add('sub.comp', check=True)
except AttributeError, err:
msg = "Component 'sub.comp' is not in the scope of the top assembly."
self.assertEqual(str(err), msg)
else:
self.fail('Expected AttributeError')
# Test 3, add a comp that does not exist
try:
self.model.driver.workflow.add('stuff', check=True)
except AttributeError, err:
msg = "Component 'stuff' does not exist in the top assembly."
self.assertEqual(str(err), msg)
else:
self.fail('Expected AttributeError')
### TODO: this test needs to move to setup time
## Test 4, create a driver recursion loop
#self.model.add('driver2', Driver())
#self.model.driver.workflow.add('driver2', check=True)
#try:
#self.model.driver2.workflow.add('driver', check=True)
#except AttributeError, err:
#msg = "Driver recursion loop detected"
#self.assertEqual(str(err), msg)
#else:
#self.fail('Expected AttributeError')
def test_casetree(self):
# Record tree of cases via workflow.
top = Assembly()
top.recorders = [DumbRecorder()]
top.add('driver2', CaseDriver(3))
top.add('comp2', CaseComponent())
top.driver2.workflow.add('comp2')
top.driver2.add_parameter('comp2.x', low=0, high=10)
top.driver2.add_objective('comp2.y')
top.add('driver1', CaseDriver(2))
top.add('comp1', CaseComponent())
top.driver1.add_parameter('comp1.x', low=0, high=10)
top.driver1.add_objective('comp1.y')
top.driver1.workflow.add(['comp1', 'driver2'])
top.driver.workflow.add('driver1')
top.run()
print
print 'Forest:'
roots = CaseTreeNode.sort(top.recorders[0].get_iterator())
for root in roots:
root.dump(1)
print
print 'Iternames:'
for root in roots:
for name in root.iternames():
print ' ', name
expected = [
'1',
'1-driver1.1',
'1-driver1.1-driver2.1',
'1-driver1.1-driver2.2',
'1-driver1.1-driver2.3',
'1-driver1.2',
'1-driver1.2-driver2.1',
'1-driver1.2-driver2.2',
'1-driver1.2-driver2.3'
]
for i, name in enumerate(roots[0].iternames()):
self.assertEqual(name, expected[i])
def test_lazy_auto_top(self):
# lazy evaluation with auto determination of top level workflow
top = set_as_top(LazyModel())
top.driver.add_parameter('C2.b', low=-99, high=99)
top.driver.add_objective('C3.d')
top.run()
self.assertEqual(top.C2.exec_count, 1)
self.assertEqual(top.C3.exec_count, 1)
self.assertEqual(top.C1.exec_count, 1)
# now test strict mode
try:
top.check_config(strict=True)
except Exception as err:
self.assertEqual(str(err), ": The following components are not in any workflow but are needed by other workflows: ['C1']")
else:
self.fail("Exception expected")
def test_lazy_auto_nested(self):
# lazy evaluation with auto determination of D2 workflow
top = set_as_top(LazyModel())
top.driver.workflow.add(['D2', 'C1'])
top.D2.add_parameter('C2.b', low=-99, high=99)
top.D2.add_objective('C3.d')
top.run()
self.assertEqual(top.C2.exec_count, 1)
self.assertEqual(top.C3.exec_count, 1)
self.assertEqual(top.C1.exec_count, 1)
# now test strict mode
try:
top.check_config(strict=True)
except Exception as err:
self.assertEqual(str(err), ": The following components are not in any workflow and WILL NOT EXECUTE: ['C4']")
else:
self.fail("Exception expected")
def test_lazy_manual_top(self):
# manual top level workflow
top = set_as_top(LazyModel())
top.driver.add_parameter('C2.b', low=-99, high=99)
top.driver.add_objective('C3.d')
top.driver.workflow.add(['C2', 'C3'])
top.run()
self.assertEqual(top.C2.exec_count, 1)
self.assertEqual(top.C3.exec_count, 1)
self.assertEqual(top.C1.exec_count, 1)
# now test strict mode
try:
top.check_config(strict=True)
except Exception as err:
self.assertEqual(str(err), ": The following components are not in any workflow but are needed by other workflows: ['C1']")
else:
self.fail("Exception expected")
if __name__ == '__main__':
import nose
import sys
sys.argv.append('--cover-package=openmdao.main')
sys.argv.append('--cover-erase')
nose.runmodule()
| gpl-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/google/appengine/_internal/django/core/files/utils.py | 901 | 1230 | class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
| mit |
Fox-McCloud/-tg-station | tools/expand_filedir_paths.py | 166 | 3839 | #!/usr/bin/env python
import re, os, sys, fnmatch
# Regex pattern to extract the directory path in a #define FILE_DIR
filedir_pattern = re.compile(r'^#define\s*FILE_DIR\s*"(.*?)"')
# Regex pattern to extract any single quoted piece of text. This can also
# match single quoted strings inside of double quotes, which is part of a
# regular text string and should not be replaced. The replacement function
# however will any match that doesn't appear to be a filename so these
# extra matches should not be a problem.
rename_pattern = re.compile(r"'(.+?)'")
# Only filenames matching this pattern will have their resources renamed
source_pattern = re.compile(r"^.*?\.(dm|dmm)$")
# Open the .dme file and return a list of all FILE_DIR paths in it
def read_filedirs(filename):
result = []
dme_file = file(filename, "rt")
# Read each line from the file and check for regex pattern match
for row in dme_file:
match = filedir_pattern.match(row)
if match:
result.append(match.group(1))
dme_file.close()
return result
# Search through a list of directories, and build a dictionary which
# maps every file to its full pathname (relative to the .dme file)
# If the same filename appears in more than one directory, the earlier
# directory in the list takes preference.
def index_files(file_dirs):
result = {}
# Reverse the directory list so the earlier directories take precedence
# by replacing the previously indexed file of the same name
for directory in reversed(file_dirs):
for name in os.listdir(directory):
# Replace backslash path separators on Windows with forward slash
# Force "name" to lowercase when used as a key since BYOND resource
# names are case insensitive, even on Linux.
if name.find(".") == -1:
continue
result[name.lower()] = directory.replace('\\', '/') + '/' + name
return result
# Recursively search for every .dm/.dmm file in the .dme file directory. For
# each file, search it for any resource names in single quotes, and replace
# them with the full path previously found by index_files()
def rewrite_sources(resources):
# Create a closure for the regex replacement function to capture the
# resources dictionary which can't be passed directly to this function
def replace_func(name):
key = name.group(1).lower()
if key in resources:
replacement = resources[key]
else:
replacement = name.group(1)
return "'" + replacement + "'"
# Search recursively for all .dm and .dmm files
for (dirpath, dirs, files) in os.walk("."):
for name in files:
if source_pattern.match(name):
path = dirpath + '/' + name
source_file = file(path, "rt")
output_file = file(path + ".tmp", "wt")
# Read file one line at a time and perform replacement of all
# single quoted resource names with the fullpath to that resource
# file. Write the updated text back out to a temporary file.
for row in source_file:
row = rename_pattern.sub(replace_func, row)
output_file.write(row)
output_file.close()
source_file.close()
# Delete original source file and replace with the temporary
# output. On Windows, an atomic rename() operation is not
# possible like it is under POSIX.
os.remove(path)
os.rename(path + ".tmp", path)
dirs = read_filedirs("tgstation.dme");
resources = index_files(dirs)
rewrite_sources(resources)
| agpl-3.0 |
MrTheodor/espressopp | src/integrator/MinimizeEnergy.py | 7 | 4313 | # Copyright (C) 2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************
espressopp.integrator.MinimizeEnergy
************************************
This is a very simple approach to perform energy minimization of the system. The module uses
a `steepest descent method <https://en.wikipedia.org/wiki/Gradient_descent>`_. The position of particles is updated following the equation:
.. math::
p_{i+1} = p_i + min(\gamma F_i, d_{max})
where :math:`p_{i+}` is a new position, :math:`p_i` is a position at current step with corresponding
force :math:`F_i`. The parameters :math:`\gamma` and :math:`d_{max}` are set by user and control the relaxation
of the energy and the maximum update of the coordinates per step.
Additionaly, a variable :math:`\gamma` step is also implemented. In this case, the position of particles is updated following the equation:
.. math::
p_{i+1} = p_i + d_{max}/f_{max} F_i
where :math:`f_{max}` is a maximum force in a single step of steepest descent method. :math:`\gamma=d_{max}/f_{max}` is automatically adjusted to a force magnitude.
In both cases, the routine runs until the maximum force is bigger than :math:`f_{max}` or for at most *n* steps.
**Please note**
This module does not support any integrator extensions.
Example
>>> em = espressopp.integrator.MinimizeEnergy(system, gamma=0.001, ftol=0.01, max_displacement=0.0001)
>>> em.run(10000)
Example
>>> em = espressopp.integrator.MinimizeEnergy(system, gamma=0.01, ftol=0.01, max_displacement=0.01, variable_step_flag=True)
>>> em.run(10000)
**API**
.. function:: espressopp.integrator.MinimizeEnergy(system, gamma, ftol, max_displacement, variable_step_flag)
:param system: The espressopp system object.
:type system: espressopp.System
:param gamma: The gamma value.
:type gamma: float
:param ftol: The force tolerance
:type ftol: float
:param max_displacement: The maximum displacement.
:type max_displacement: float
:param variable_step_flag: The flag of adjusting gamma to the force strength.
:type variable_step_flag: bool
.. function:: espressopp.integrator.MinimizeEnergy.run(max_steps, verbose)
:param max_steps: The maximum number of steps to run.
:type max_steps: int
:param verbose: If set to True then display information about maximum force during the iterations.
:type verbose: bool
:return: The true if the maximum force in the system is lower than ftol otherwise false.
:rtype: bool
.. py:data:: f_max
The maximum force in the system.
.. py:data:: displacement
The maximum displacement used during the run of MinimizeEnergy
.. py:data:: step
The current iteration step.
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from _espressopp import integrator_MinimizeEnergy
class MinimizeEnergyLocal(integrator_MinimizeEnergy):
def __init__(self, system, gamma, ftol, max_displacement, variable_step_flag=False):
if pmi.workerIsActive():
cxxinit(self, integrator_MinimizeEnergy, system, gamma, ftol*ftol, max_displacement, variable_step_flag)
def run(self, niter, verbose=False):
if pmi.workerIsActive():
return self.cxxclass.run(self, niter, verbose)
if pmi.isController:
class MinimizeEnergy:
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.MinimizeEnergyLocal',
pmiproperty = ('f_max', 'displacement', 'step'),
pmicall = ('run', )
)
| gpl-3.0 |
openstack/ceilometer | ceilometer/publisher/http.py | 1 | 7834 | #
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_log import log
from oslo_utils import strutils
import requests
from requests import adapters
from urllib import parse as urlparse
from ceilometer import publisher
LOG = log.getLogger(__name__)
class HttpPublisher(publisher.ConfigPublisherBase):
"""Publish metering data to a http endpoint
This publisher pushes metering data to a specified http endpoint. The
endpoint should be configured in ceilometer pipeline configuration file.
If the `timeout` and/or `max_retries` are not specified, the default
`timeout` and `max_retries` will be set to 5 and 2 respectively. Additional
parameters are:
- ssl certificate verification can be disabled by setting `verify_ssl`
to False
- batching can be configured by `batch`
- Basic authentication can be configured using the URL authentication
scheme: http://username:password@example.com
- For certificate authentication, `clientcert` and `clientkey` are the
paths to the certificate and key files respectively. `clientkey` is
only required if the clientcert file doesn't already contain the key.
All of the parameters mentioned above get removed during processing,
with the remaining portion of the URL being used as the actual endpoint.
e.g. https://username:password@example.com/path?verify_ssl=False&q=foo
will result in a call to https://example.com/path?q=foo
To use this publisher for samples, add the following section to the
/etc/ceilometer/pipeline.yaml file or simply add it to an existing
pipeline::
- name: meter_file
meters:
- "*"
publishers:
- http://host:80/path?timeout=1&max_retries=2&batch=False
In the event_pipeline.yaml file, you can use the publisher in one of
the sinks like the following:
- name: event_sink
publishers:
- http://host:80/path?timeout=1&max_retries=2
"""
HEADERS = {'Content-type': 'application/json'}
def __init__(self, conf, parsed_url):
super(HttpPublisher, self).__init__(conf, parsed_url)
if not parsed_url.hostname:
raise ValueError('The hostname of an endpoint for '
'HttpPublisher is required')
# non-numeric port from the url string will cause a ValueError
# exception when the port is read. Do a read to make sure the port
# is valid, if not, ValueError will be thrown.
parsed_url.port
# Handling other configuration options in the query string
params = urlparse.parse_qs(parsed_url.query)
self.timeout = self._get_param(params, 'timeout', 5, int)
self.max_retries = self._get_param(params, 'max_retries', 2, int)
self.poster = (
self._batch_post if strutils.bool_from_string(self._get_param(
params, 'batch', True)) else self._individual_post)
verify_ssl = self._get_param(params, 'verify_ssl', True)
try:
self.verify_ssl = strutils.bool_from_string(verify_ssl,
strict=True)
except ValueError:
self.verify_ssl = (verify_ssl or True)
username = parsed_url.username
password = parsed_url.password
if username:
self.client_auth = (username, password)
netloc = parsed_url.netloc.replace(username + ':' + password + '@',
'')
else:
self.client_auth = None
netloc = parsed_url.netloc
clientcert = self._get_param(params, 'clientcert', None)
clientkey = self._get_param(params, 'clientkey', None)
if clientcert:
if clientkey:
self.client_cert = (clientcert, clientkey)
else:
self.client_cert = clientcert
else:
self.client_cert = None
self.raw_only = strutils.bool_from_string(
self._get_param(params, 'raw_only', False))
kwargs = {'max_retries': self.max_retries,
'pool_connections': conf.max_parallel_requests,
'pool_maxsize': conf.max_parallel_requests}
self.session = requests.Session()
if parsed_url.scheme in ["http", "https"]:
scheme = parsed_url.scheme
else:
ssl = self._get_param(params, 'ssl', False)
try:
ssl = strutils.bool_from_string(ssl, strict=True)
except ValueError:
ssl = (ssl or False)
scheme = "https" if ssl else "http"
# authentication & config params have been removed, so use URL with
# updated query string
self.target = urlparse.urlunsplit([
scheme,
netloc,
parsed_url.path,
urlparse.urlencode(params, doseq=True),
parsed_url.fragment])
self.session.mount(self.target, adapters.HTTPAdapter(**kwargs))
LOG.debug('HttpPublisher for endpoint %s is initialized!' %
self.target)
@staticmethod
def _get_param(params, name, default_value, cast=None):
try:
return cast(params.pop(name)[-1]) if cast else params.pop(name)[-1]
except (ValueError, TypeError, KeyError):
LOG.debug('Default value %(value)s is used for %(name)s' %
{'value': default_value, 'name': name})
return default_value
def _individual_post(self, data):
for d in data:
self._do_post(json.dumps(data))
def _batch_post(self, data):
if not data:
LOG.debug('Data set is empty!')
return
self._do_post(json.dumps(data))
def _do_post(self, data):
LOG.trace('Message: %s', data)
try:
res = self.session.post(self.target, data=data,
headers=self.HEADERS, timeout=self.timeout,
auth=self.client_auth,
cert=self.client_cert,
verify=self.verify_ssl)
res.raise_for_status()
LOG.debug('Message posting to %s: status code %d.',
self.target, res.status_code)
except requests.exceptions.HTTPError:
LOG.exception('Status Code: %(code)s. '
'Failed to dispatch message: %(data)s' %
{'code': res.status_code, 'data': data})
def publish_samples(self, samples):
"""Send a metering message for publishing
:param samples: Samples from pipeline after transformation
"""
self.poster([sample.as_dict() for sample in samples])
def publish_events(self, events):
"""Send an event message for publishing
:param events: events from pipeline after transformation
"""
if self.raw_only:
data = [evt.as_dict()['raw']['payload'] for evt in events
if evt.as_dict().get('raw', {}).get('payload')]
else:
data = [event.serialize() for event in events]
self.poster(data)
| apache-2.0 |
ehealthafrica-ci/formhub | odk_logger/models/instance.py | 2 | 6664 | import re
from django.db import models
from django.db.models.signals import post_save
from django.db.models.signals import post_delete
from django.contrib.auth.models import User
from django.utils import timezone
from .xform import XForm
from .survey_type import SurveyType
from odk_logger.xform_instance_parser import XFormInstanceParser, \
clean_and_parse_xml, get_uuid_from_xml
from utils.model_tools import set_uuid
from django.utils.translation import ugettext as _
from taggit.managers import TaggableManager
class FormInactiveError(Exception):
def __unicode__(self):
return _("Form is inactive")
def __str__(self):
return unicode(self).encode('utf-8')
# need to establish id_string of the xform before we run get_dict since
# we now rely on data dictionary to parse the xml
def get_id_string_from_xml_str(xml_str):
xml_obj = clean_and_parse_xml(xml_str)
root_node = xml_obj.documentElement
return root_node.getAttribute(u"id")
class Instance(models.Model):
# I should rename this model, maybe Survey
xml = models.TextField()
user = models.ForeignKey(User, related_name='surveys', null=True)
# using instances instead of surveys breaks django
xform = models.ForeignKey(XForm, null=True, related_name='surveys')
start_time = models.DateTimeField(null=True)
date = models.DateField(null=True)
survey_type = models.ForeignKey(SurveyType)
# shows when we first received this instance
date_created = models.DateTimeField(auto_now_add=True)
# this will end up representing "date last parsed"
date_modified = models.DateTimeField(auto_now=True)
# this will end up representing "date instance was deleted"
deleted_at = models.DateTimeField(null=True, default=None)
is_deleted = models.BooleanField(null=False, default=False)
# ODK keeps track of three statuses for an instance:
# incomplete, submitted, complete
# we will add a fourth status: submitted_via_web
status = models.CharField(max_length=20,
default=u'submitted_via_web')
uuid = models.CharField(max_length=249, default=u'')
tags = TaggableManager()
class Meta:
app_label = 'odk_logger'
def _set_xform(self, id_string):
self.xform = XForm.objects.get(
id_string=id_string, user=self.user)
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
def get_root_node(self):
self._set_parser()
return self._parser.get_root_node()
def get(self, abbreviated_xpath):
self._set_parser()
return self._parser.get(abbreviated_xpath)
def _set_survey_type(self, doc):
self.survey_type, created = \
SurveyType.objects.get_or_create(slug=self.get_root_node_name())
# todo: get rid of these fields
def _set_start_time(self, doc):
self.start_time = None
def _set_date(self, doc):
self.date = None
def _set_uuid(self):
if self.xml and not self.uuid:
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def save(self, *args, **kwargs):
self._set_xform(get_id_string_from_xml_str(self.xml))
doc = self.get_dict()
if self.xform and not self.xform.form_active:
raise FormInactiveError()
self._set_start_time(doc)
self._set_date(doc)
self._set_survey_type(doc)
self._set_uuid()
super(Instance, self).save(*args, **kwargs)
def _set_parser(self):
if not hasattr(self, "_parser"):
self._parser = XFormInstanceParser(
self.xml, self.xform.data_dictionary())
def get_dict(self, force_new=False, flat=True):
"""Return a python object representation of this instance's XML."""
self._set_parser()
if flat:
return self._parser.get_flat_dict_with_attributes()
else:
return self._parser.to_dict()
def set_deleted(self, deleted_at=timezone.now()):
self.deleted_at = deleted_at
self.is_deleted = True
self.save()
self.parsed_instance.save()
@classmethod
def set_deleted_at(cls, instance_id, deleted_at=timezone.now()):
try:
instance = cls.objects.get(id=instance_id)
except cls.DoesNotExist:
pass
else:
instance.set_deleted(deleted_at)
def update_xform_submission_count(sender, instance, created, **kwargs):
if created:
xform = XForm.objects.select_related().select_for_update()\
.get(pk=instance.xform.pk)
if xform.num_of_submissions == -1:
xform.num_of_submissions = 0
xform.num_of_submissions += 1
xform.last_submission_time = instance.date_created
xform.save()
profile_qs = User.profile.get_query_set()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions += 1
profile.save()
post_save.connect(update_xform_submission_count, sender=Instance,
dispatch_uid='update_xform_submission_count')
def update_xform_submission_count_delete(sender, instance, **kwargs):
try:
xform = XForm.objects.select_for_update().get(pk=instance.xform.pk)
except XForm.DoesNotExist:
pass
else:
xform.num_of_submissions -= 1
if xform.num_of_submissions < 0:
xform.num_of_submissions = 0
xform.save()
profile_qs = User.profile.get_query_set()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions -= 1
if profile.num_of_submissions < 0:
profile.num_of_submissions = 0
profile.save()
post_delete.connect(update_xform_submission_count_delete, sender=Instance,
dispatch_uid='update_xform_submission_count_delete')
class InstanceHistory(models.Model):
class Meta:
app_label = 'odk_logger'
xform_instance = models.ForeignKey(
Instance, related_name='submission_history')
xml = models.TextField()
# old instance id
uuid = models.CharField(max_length=249, default=u'')
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
| bsd-2-clause |
ppiotr/Invenio | modules/webmessage/lib/webmessage_templates.py | 33 | 28064 | # -*- coding: utf-8 -*-
##
## handles rendering of webmessage module
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" templates for webmessage module """
__revision__ = "$Id$"
from invenio.webmessage_mailutils import email_quoted_txt2html, email_quote_txt
from invenio.webmessage_config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_RESULTS_FIELD
from invenio.config import CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES
from invenio.dateutils import convert_datetext_to_dategui, \
datetext_default, \
create_day_selectbox, \
create_month_selectbox, \
create_year_selectbox
from invenio.urlutils import create_html_link, create_url
from invenio.htmlutils import escape_html
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.webuser import get_user_info
class Template:
"""Templates for WebMessage module"""
def tmpl_display_inbox(self, messages, infos=[], warnings=[], nb_messages=0, no_quota=0, ln=CFG_SITE_LANG):
"""
Displays a list of messages, with the appropriate links and buttons
@param messages: a list of tuples:
[(message_id,
user_from_id,
user_from_nickname,
subject,
sent_date,
status=]
@param infos: a list of informations to print on top of page
@param warnings: a list of warnings to display
@param nb_messages: number of messages user has
@param no_quota: 1 if user has no quota (admin) or 0 else.
@param ln: language of the page.
@return: the list in HTML format
"""
_ = gettext_set_language(ln)
dummy = 0
inbox = self.tmpl_warning(warnings, ln)
inbox += self.tmpl_infobox(infos, ln)
if not(no_quota):
inbox += self.tmpl_quota(nb_messages, ln)
inbox += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Subject"),
_("Sender"),
_("Date"),
_("Action"))
if len(messages) == 0:
inbox += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="4" style="text-align: center;">
<b>%s</b>
</td>
</tr>""" %(_("No messages"),)
for (msgid, id_user_from, user_from_nick,
subject, sent_date, status) in messages:
if not(subject):
subject = _("No subject")
subject_link = create_html_link(
CFG_SITE_URL + '/yourmessages/display_msg',
{'msgid': msgid, 'ln': ln},
escape_html(subject))
if user_from_nick:
from_link = '%s'% (user_from_nick)
else:
from_link = get_user_info(id_user_from, ln)[2]
action_link = create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_reply_id': msgid, 'ln': ln},
_("Reply"))
action_link += ' '
action_link += create_html_link(CFG_SITE_URL + '/yourmessages/delete',
{'msgid': msgid, 'ln': ln},
_("Delete"))
s_date = convert_datetext_to_dategui(sent_date, ln)
stat_style = ''
if (status == CFG_WEBMESSAGE_STATUS_CODE['NEW']):
stat_style = ' style="font-weight:bold"'
inbox += """
<tr class="mailboxrecord">
<td%s>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>""" %(stat_style, subject_link, from_link, s_date, action_link)
inbox += """
<tr class="mailboxfooter">
<td colspan="2">
<form name="newMessage" action="%(url_new)s" method="post">
<input type="submit" name="del_all" value="%(write_label)s" class="formbutton" />
</form>
</td>
<td> </td>
<td>
<form name="deleteAll" action="%(url_delete_all)s" method="post">
<input type="submit" name="del_all" value="%(delete_all_label)s" class="formbutton" />
</form>
</td>
</tr>
</tbody>
</table>""" % {'url_new': create_url(CFG_SITE_URL + '/yourmessages/write',
{'ln': ln}),
'url_delete_all': create_url(CFG_SITE_URL + '/yourmessages/delete_all',
{'ln': ln}),
'write_label': _("Write new message"),
'delete_all_label': _("Delete All")}
return inbox
def tmpl_write(self,
msg_to="", msg_to_group="",
msg_id=0,
msg_subject="", msg_body="",
msg_send_year=0, msg_send_month=0, msg_send_day=0,
warnings=[],
search_results_list=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
ln=CFG_SITE_LANG):
"""
Displays a writing message form with optional prefilled fields
@param msg_to: nick of the user (prefills the To: field)
@param msg_subject: subject of the message (prefills the Subject: field)
@param msg_body: body of the message (prefills the Message: field)
@param msg_send_year: prefills to year field
@param msg_send_month: prefills the month field
@param msg_send_day: prefills the day field
@param warnings: display warnings on top of page
@param search_results_list: list of tuples. (user/groupname, is_selected)
@param search_pattern: pattern used for searching
@param results_field: 'none', 'user' or 'group', see CFG_WEBMESSAGE_RESULTS_FIELD
@param ln: language of the form
@return: the form in HTML format
"""
_ = gettext_set_language(ln)
write_box = self.tmpl_warning(warnings)
# escape forbidden character
msg_to = escape_html(msg_to)
msg_to_group = escape_html(msg_to_group)
msg_subject = escape_html(msg_subject)
search_pattern = escape_html(search_pattern)
to_select = self.tmpl_user_or_group_search(search_results_list,
search_pattern,
results_field,
ln)
if msg_id:
msg_subject = _("Re:") + " " + msg_subject
msg_body = email_quote_txt(msg_body)
write_box += """
<form name="write_message" action="%(url_form)s" method="post">
<div style="float: left; vertical-align:text-top; margin-right: 10px;">
<table class="mailbox">
<thead class="mailboxheader">
<tr>
<td class="inboxheader" colspan="2">
<table class="messageheader">
<tr>
<td class="mailboxlabel">%(to_label)s</td>
<td class="mailboxlabel">%(users_label)s</td>
<td style="width:100%%;">
<input class="mailboxinput" type="text" name="msg_to_user" value="%(to_users)s" />
</td>
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td class="mailboxlabel">%(groups_label)s</td>
<td style="width:100%%;">
<input class="mailboxinput" type="text" name="msg_to_group" value="%(to_groups)s" />
</td>
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td> </td>
<td> </td>
</tr>
<tr>
<td class="mailboxlabel">%(subject_label)s</td>
<td colspan="2">
<input class="mailboxinput" type="text" name="msg_subject" value="%(subject)s" />
</td>
</tr>
</table>
</td>
</tr>
</thead>
<tfoot>
<tr>
<td style="height:0px" colspan="2"></td>
</tr>
</tfoot>
<tbody class="mailboxbody">
<tr>
<td class="mailboxlabel">%(message_label)s</td>
<td>
<textarea name="msg_body" rows="10" cols="50">"""
write_box_part2 = """
</td>
</tr>
<tr>
<td class="mailboxlabel">%(send_later_label)s</td>
<td>
%(day_field)s
%(month_field)s
%(year_field)s
</td>
</tr>
<tr class="mailboxfooter">
<td colspan="2" class="mailboxfoot">
<input type="submit" name="send_button" value="%(send_label)s" class="formbutton"/>
</td>
</tr>
</tbody>
</table>
</div>
<div style="vertical-align:top; margin-left: 5px; float: left;">
%(to_select)s
</div>
</form>
"""
write_box += "%(body)s</textarea>" + write_box_part2
day_field = create_day_selectbox('msg_send_day',
msg_send_day, ln)
month_field = create_month_selectbox('msg_send_month',
msg_send_month, ln)
year_field = create_year_selectbox('msg_send_year', -1, 10,
msg_send_year, ln)
write_box = write_box % {'url_form': create_url(
CFG_SITE_URL + '/yourmessages/send',
{'ln': ln}),
'to_users' : msg_to,
'to_groups': msg_to_group,
'subject' : msg_subject,
'body' : msg_body,
'ln': ln,
'day_field': day_field,
'month_field': month_field,
'year_field': year_field,
'to_select': to_select,
'send_later_label': _("Send later?"),
'to_label': _("To:"),
'users_label': _("Users"),
'groups_label': _("Groups"),
'subject_label': _("Subject:"),
'message_label': _("Message:"),
'send_label': _("SEND")}
return write_box
def tmpl_display_msg(self,
msg_id="",
msg_from_id="",
msg_from_nickname="",
msg_sent_to="",
msg_sent_to_group="",
msg_subject="",
msg_body="",
msg_sent_date="",
msg_received_date=datetext_default,
ln=CFG_SITE_LANG):
"""
Displays a given message
@param msg_id: id of the message
@param msg_from_id: id of user who sent the message
@param msg_from_nickname: nickname of the user who sent the message
@param msg_sent_to: list of users who received the message
(comma separated string)
@param msg_sent_to_group: list of groups who received the message
(comma separated string)
@param msg_subject: subject of the message
@param msg_body: body of the message
@param msg_sent_date: date at which the message was sent
@param msg_received_date: date at which the message had to be received
(if this argument != 0000-00-00 => reminder
@param ln: language of the page
@return: the message in HTML format
"""
# load the right message language
_ = gettext_set_language(ln)
sent_to_link = ''
tos = msg_sent_to.split(CFG_WEBMESSAGE_SEPARATOR)
if (tos):
for to in tos[0:-1]:
to_display = to
if to.isdigit():
(dummy, to, to_display) = get_user_info(int(to), ln)
sent_to_link += create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_to': to, 'ln': ln},
escape_html(to_display))
sent_to_link += CFG_WEBMESSAGE_SEPARATOR
to_display = tos[-1]
to = tos[-1]
if to.isdigit():
(dummy, to, to_display) = get_user_info(int(to), ln)
sent_to_link += create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_to': to, 'ln': ln},
escape_html(to_display))
group_to_link = ""
groups = msg_sent_to_group.split(CFG_WEBMESSAGE_SEPARATOR)
if (groups):
for group in groups[0:-1]:
group_to_link += create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to_group': group, 'ln': ln},
escape_html(group))
group_to_link += CFG_WEBMESSAGE_SEPARATOR
group_to_link += create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to_group': groups[-1], 'ln': ln},
escape_html(groups[-1]))
# format the msg so that the '>>' chars give vertical lines
final_body = email_quoted_txt2html(msg_body)
out = """
<table class="mailbox" style="width: 70%%;">
<thead class="mailboxheader">
<tr>
<td class="inboxheader" colspan="2">
<table class="messageheader">
<tr>
<td class="mailboxlabel">%(from_label)s</td>
<td>%(from_link)s</td>
</tr>
<tr>
<td class="mailboxlabel">%(subject_label)s</td>
<td style="width: 100%%;">%(subject)s</td>
</tr>
<tr>
<td class="mailboxlabel">%(sent_label)s</td>
<td>%(sent_date)s</td>
</tr>"""
if (msg_received_date != datetext_default):
out += """
<tr>
<td class="mailboxlabel">%(received_label)s</td>
<td>%(received_date)s</td>
</tr>"""
out += """
<tr>
<td class="mailboxlabel">%(sent_to_label)s</td>
<td>%(sent_to)s</td>
</tr>"""
if (msg_sent_to_group != ""):
out += """
<tr>
<td class="mailboxlabel">%(groups_label)s</td>
<td>%(sent_to_group)s</td>
</tr>"""
out += """
</table>
</td>
</tr>
</thead>
<tfoot>
<tr>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">
<tr class="mailboxrecord">
<td colspan="2">%(body)s</td>
</tr>
<tr class="mailboxfooter">
<td>
<form name="reply" action="%(reply_url)s" method="post">
<input class="formbutton" name="reply" value="%(reply_but_label)s" type="submit" />
</form>
</td>
<td>
<form name="deletemsg" action="%(delete_url)s" method="post">
<input class="formbutton" name="delete" value="%(delete_but_label)s" type="submit" />
</form>
</td>
</tr>
</tbody>
</table>
"""
if msg_from_nickname:
msg_from_display = msg_from_nickname
else:
msg_from_display = get_user_info(msg_from_id, ln)[2]
msg_from_nickname = msg_from_id
return out % {'from_link': create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to': msg_from_nickname,
'ln': ln},
msg_from_display),
'reply_url': create_url(CFG_SITE_URL + '/yourmessages/write',
{'msg_reply_id': msg_id,
'ln': ln}),
'delete_url': create_url(CFG_SITE_URL + '/yourmessages/delete',
{'msgid': msg_id,
'ln': ln}),
'sent_date' : convert_datetext_to_dategui(msg_sent_date, ln),
'received_date': convert_datetext_to_dategui(msg_received_date, ln),
'sent_to': sent_to_link,
'sent_to_group': group_to_link,
'subject' : msg_subject,
'body' : final_body,
'reply_to': msg_from_id,
'ln': ln,
'from_label':_("From:"),
'subject_label':_("Subject:"),
'sent_label': _("Sent on:"),
'received_label':_("Received on:"),
'sent_to_label': _("Sent to:"),
'groups_label': _("Sent to groups:"),
'reply_but_label':_("REPLY"),
'delete_but_label': _("DELETE")}
def tmpl_navtrail(self, ln=CFG_SITE_LANG, title=""):
"""
display the navtrail, e.g.:
Your account > Your messages > title
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
_ = gettext_set_language(ln)
nav_h1 = create_html_link(CFG_SITE_URL + '/youraccount/display',
{'ln': ln},
_("Your Account"),
{'class': 'navtrail'})
nav_h2 = ""
if (title != ""):
nav_h2 += create_html_link(CFG_SITE_URL + '/yourmessages/display',
{'ln': ln},
_("Your Messages"),
{'class': 'navtrail'})
return nav_h1 + ' > ' + nav_h2
return nav_h1
def tmpl_confirm_delete(self, ln=CFG_SITE_LANG):
"""
display a confirm message
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
out = """
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<form name="validate" action="delete_all" method="post">
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="submit" value="%(yes_label)s" class="formbutton" />
</form>
</td>
<td>
<form name="cancel" action="display" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</form>
</td>
</tr>
</table>"""% {'message': _("Are you sure you want to empty your whole mailbox?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No")}
return out
def tmpl_infobox(self, infos, ln=CFG_SITE_LANG):
"""Display len(infos) information fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(infos) is list) or (type(infos) is tuple)):
infos = [infos]
infobox = ""
for info in infos:
infobox += "<div class=\"infobox\">"
lines = info.split("\n")
for line in lines[0:-1]:
infobox += line + "<br />\n"
infobox += lines[-1] + "</div><br />\n"
return infobox
def tmpl_warning(self, warnings, ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(warnings) is list) or (type(warnings) is tuple)):
warnings = [warnings]
warningbox = ""
if warnings != []:
warningbox = "<div class=\"warningbox\">\n <b>Warning:</b>\n"
for warning in warnings:
lines = warning.split("\n")
warningbox += " <p>"
for line in lines[0:-1]:
warningbox += line + " <br />\n"
warningbox += lines[-1] + " </p>"
warningbox += "</div><br />\n"
return warningbox
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_quota(self, nb_messages=0, ln=CFG_SITE_LANG):
"""
Display a quota bar.
@nb_messages: number of messages in inbox.
@ln=language
@return: html output
"""
_ = gettext_set_language(ln)
quota = float(CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES)
ratio = float(nb_messages) / quota
out = """
%(quota_label)s<br />
<div class="quotabox">
<div class="quotabar" style="width:%(width)ipx"></div>
</div>""" %{'quota_label' : _("Quota used: %(x_nb_used)i messages out of max. %(x_nb_total)i") % {'x_nb_used': nb_messages,
'x_nb_total': CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES},
'width' : int(ratio * 200)
}
return out
def tmpl_multiple_select(self, select_name, tuples_list, ln=CFG_SITE_LANG):
"""displays a multiple select environment
@param tuples_list: a list of (value, isSelected) tuples
@return: HTML output
"""
_ = gettext_set_language(ln)
if not((type(tuples_list) is list) or (type(tuples_list) is tuple)):
tuples_list = [tuples_list]
out = """
%s
<select name="%s" multiple="multiple" style="width:100%%">"""% (_("Please select one or more:"), select_name)
for (value, is_selected) in tuples_list:
out += ' <option value="%s"'% value
if is_selected:
out += " selected=\"selected\""
out += ">%s</option>\n"% value
out += "</select>\n"
return out
def tmpl_user_or_group_search(self,
tuples_list=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
ln=CFG_SITE_LANG):
"""
Display a box for user searching
@param tuples_list: list of (value, is_selected) tuples
@param search_pattern: text to display in this field
@param results_field: either 'none', 'user', 'group', look at CFG_WEBMESSAGE_RESULTS_FIELD
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
multiple_select = ''
add_button = ''
if results_field != CFG_WEBMESSAGE_RESULTS_FIELD['NONE'] and results_field in CFG_WEBMESSAGE_RESULTS_FIELD.values():
if len(tuples_list):
multiple_select = self.tmpl_multiple_select('names_selected', tuples_list)
add_button = '<input type="submit" name="%s" value="%s" class="nonsubmitbutton" />'
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
add_button = add_button % ('add_user', _("Add to users"))
else:
add_button = add_button % ('add_group', _("Add to groups"))
else:
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
multiple_select = _("No matching user")
else:
multiple_select = _("No matching group")
out = """
<table class="mailbox">
<thead class="mailboxheader">
<tr class ="inboxheader">
<td colspan="3">
%(title_label)s
<input type="hidden" name="results_field" value="%(results_field)s" />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="3"></td></tr>
</tfoot>
<tbody class="mailboxbody">
<tr class="mailboxsearch">
<td>
<input type="text" name="search_pattern" value="%(search_pattern)s" />
</td>
<td>
<input type="submit" name="search_user" value="%(search_user_label)s" class="nonsubmitbutton" />
</td>
<td>
<input type="submit" name="search_group" value="%(search_group_label)s" class="nonsubmitbutton" />
</td>
</tr>
<tr class="mailboxresults">
<td colspan="2">
%(multiple_select)s
</td>
<td>
%(add_button)s
</td>
</tr>
</tbody>
</table>
"""
out = out % {'title_label' : _("Find users or groups:"),
'search_user_label' : _("Find a user"),
'search_group_label' : _("Find a group"),
'results_field' : results_field,
'search_pattern' : search_pattern,
'multiple_select' : multiple_select,
'add_button' : add_button}
return out
def tmpl_account_new_mail(self, nb_new_mail=0, total_mail=0, ln=CFG_SITE_LANG):
"""
display infos about inbox (used by myaccount.py)
@param nb_new_mail: number of new mails
@param ln: language
return: html output.
"""
_ = gettext_set_language(ln)
out = _("You have %(x_nb_new)s new messages out of %(x_nb_total)s messages") % \
{'x_nb_new': '<b>' + str(nb_new_mail) + '</b>',
'x_nb_total': create_html_link(CFG_SITE_URL + '/yourmessages/',
{'ln': ln},
str(total_mail),
{},
False, False)}
return out + '.'
| gpl-2.0 |
browseinfo/odoo_saas3_nicolas | win32/setup.py | 105 | 2541 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import glob
from distutils.core import setup
import py2exe
meta = {}
execfile(os.path.join(os.path.dirname(__file__), '..', 'openerp', 'release.py'), meta)
def generate_files():
actions = {
'start': ['stop', 'start'],
'stop': ['stop'],
}
files = []
if os.name == 'nt':
files.append(("Microsoft.VC90.CRT", glob.glob('C:\Microsoft.VC90.CRT\*.*')))
for action, steps in actions.items():
fname = action + '.bat'
files.append(fname)
with open(fname, 'w') as fp:
fp.write('@PATH=%WINDIR%\system32;%WINDIR%;%WINDIR%\System32\Wbem;.\n')
for step in steps:
fp.write('@net %s %s\n' % (step, meta['nt_service_name']))
files.append('meta.py')
with open('meta.py', 'w') as fp:
for m in 'description serie nt_service_name'.split():
fp.write("%s = %r\n" % (m, meta[m],))
return files
excludes = "Tkconstants Tkinter tcl _imagingtk PIL._imagingtk ImageTk PIL.ImageTk FixTk".split()
setup(service = ["OpenERPServerService"],
version = meta['version'],
license = meta['license'],
url = meta['url'],
author = meta['author'],
author_email = meta['author_email'],
data_files = generate_files(),
options = {"py2exe": {
"excludes": excludes,
"skip_archive": 1,
"optimize": 2,
}},
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mluscon/dnf | tests/test_package.py | 4 | 4694 | # Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.pycomp import long
from tests import support
from tests.support import mock
import binascii
import hawkey
import rpm
TOUR_MD5 = binascii.unhexlify("68e9ded8ea25137c964a638f12e9987c")
TOUR_SHA256 = binascii.unhexlify("ce77c1e5694b037b6687cf0ab812ca60431ec0b65116abbb7b82684f0b092d62")
TOUR_WRONG_MD5 = binascii.unhexlify("ffe9ded8ea25137c964a638f12e9987c")
TOUR_SIZE = 2317
class PackageTest(support.TestCase):
def setUp(self):
base = support.MockBase("main")
self.sack = base.sack
self.pkg = self.sack.query().available().filter(name="pepper")[1]
def test_from_cmdline(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(support.TOUR_44_PKG_PATH)
self.assertTrue(local_pkg._from_cmdline)
self.assertFalse(self.pkg._from_cmdline)
def test_from_system(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertTrue(pkg._from_system)
self.assertFalse(self.pkg._from_system)
def test_header(self):
self.sack.create_cmdline_repo()
pkg = self.sack.add_cmdline_package(support.TOUR_44_PKG_PATH)
header = pkg._header
self.assertIsInstance(header, rpm.hdr)
fn_getter = lambda: support.NONEXISTENT_FILE
with mock.patch.object(pkg, 'localPkg', fn_getter):
with self.assertRaises(IOError):
pkg._header
@mock.patch("dnf.package.Package.rpmdbid", long(3))
def test_idx(self):
""" pkg.idx is an int. """
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertEqual(type(pkg.idx), int)
def test_pkgtup(self):
self.assertEqual(self.pkg.pkgtup, ('pepper', 'x86_64', '0', '20', '0'))
@mock.patch("dnf.package.Package.location", 'f/foo.rpm')
def test_localPkg(self):
self.pkg.repo.basecachedir = '/cachedir'
self.pkg.repo.baseurl = ['file:///mnt/cd']
self.assertTrue(self.pkg.repo._local)
self.assertEqual(self.pkg.localPkg(), '/mnt/cd/f/foo.rpm')
self.pkg.repo.baseurl = ['http://remote']
self.assertFalse(self.pkg.repo._local)
self.assertEqual(self.pkg.localPkg(),
self.pkg.repo._cachedir + '/packages/foo.rpm')
def test_verify(self):
with mock.patch.object(self.pkg, 'localPkg',
return_value=support.TOUR_44_PKG_PATH):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.pkg._size = TOUR_SIZE
self.assertTrue(self.pkg.verifyLocalPkg())
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_WRONG_MD5)
self.assertFalse(self.pkg.verifyLocalPkg())
def test_return_id_sum(self):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.assertEqual(self.pkg.returnIdSum(),
('md5', '68e9ded8ea25137c964a638f12e9987c'))
def test_verify_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(support.TOUR_44_PKG_PATH)
self.assertEqual(local_pkg.reponame, hawkey.CMDLINE_REPO_NAME)
self.assertTrue(local_pkg.verifyLocalPkg())
def test_chksum_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(support.TOUR_44_PKG_PATH)
chksum = local_pkg._chksum
self.assertEqual(chksum[0], hawkey.CHKSUM_SHA256)
self.assertEqual(chksum[1], TOUR_SHA256)
def test_verify_installed(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertRaises(ValueError, pkg.verifyLocalPkg)
| gpl-2.0 |
krafczyk/root | interpreter/llvm/src/bindings/python/llvm/tests/test_object.py | 95 | 2143 | from .base import TestBase
from ..object import ObjectFile
from ..object import Relocation
from ..object import Section
from ..object import Symbol
class TestObjectFile(TestBase):
def get_object_file(self):
source = self.get_test_binary()
return ObjectFile(filename=source)
def test_create_from_file(self):
self.get_object_file()
def test_get_sections(self):
o = self.get_object_file()
count = 0
for section in o.get_sections():
count += 1
assert isinstance(section, Section)
assert isinstance(section.name, str)
assert isinstance(section.size, long)
assert isinstance(section.contents, str)
assert isinstance(section.address, long)
assert len(section.contents) == section.size
self.assertGreater(count, 0)
for section in o.get_sections():
section.cache()
def test_get_symbols(self):
o = self.get_object_file()
count = 0
for symbol in o.get_symbols():
count += 1
assert isinstance(symbol, Symbol)
assert isinstance(symbol.name, str)
assert isinstance(symbol.address, long)
assert isinstance(symbol.size, long)
self.assertGreater(count, 0)
for symbol in o.get_symbols():
symbol.cache()
def test_symbol_section_accessor(self):
o = self.get_object_file()
for symbol in o.get_symbols():
section = symbol.section
assert isinstance(section, Section)
break
def test_get_relocations(self):
o = self.get_object_file()
for section in o.get_sections():
for relocation in section.get_relocations():
assert isinstance(relocation, Relocation)
assert isinstance(relocation.address, long)
assert isinstance(relocation.offset, long)
assert isinstance(relocation.type_number, long)
assert isinstance(relocation.type_name, str)
assert isinstance(relocation.value_string, str)
| lgpl-2.1 |
infoxchange/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/admin/sites.py | 44 | 21836 | import re
from django import http, template
from django.contrib.admin import ModelAdmin
from django.contrib.admin import actions
from django.contrib.auth import authenticate, login
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.functional import update_wrapper
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy, ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the root() method can then be used as a Django view function
that presents a full admin interface for the collection of registered models.
"""
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name=None, app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.root_path = None
if name is None:
self.name = 'admin'
else:
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.iteritems()
actions = property(actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls.defaults import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
'django.views.defaults.shortcut'),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in self._registry.iteritems():
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.module_name),
include(model_admin.urls))
)
return urlpatterns
def urls(self):
return self.get_urls(), self.app_name, self.name
urls = property(urls)
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
if self.root_path is not None:
url = '%spassword_change/done/' % self.root_path
else:
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages='django.conf')
def logout(self, request):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
logout = never_cache(logout)
def login(self, request):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.models import User
# If this isn't already the login page, display it.
if not request.POST.has_key(LOGIN_FORM_KEY):
if request.POST:
message = _("Please log in again, because your session has expired.")
else:
message = ""
return self.display_login_form(request, message)
# Check that the user accepts cookies.
if not request.session.test_cookie_worked():
message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
return self.display_login_form(request, message)
else:
request.session.delete_test_cookie()
# Check the password.
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
message = ERROR_MESSAGE
if username is not None and u'@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
try:
user = User.objects.get(email=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
pass
else:
if user.check_password(password):
message = _("Your e-mail address is not your username."
" Try '%s' instead.") % user.username
return self.display_login_form(request, message)
# The user data is correct; log in the user in and continue.
else:
if user.is_active and user.is_staff:
login(request, user)
return http.HttpResponseRedirect(request.get_full_path())
else:
return self.display_login_form(request, ERROR_MESSAGE)
login = never_cache(login)
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),
'perms': perms,
}
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': app_label + '/',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = app_dict.values()
app_list.sort(lambda x, y: cmp(x['name'], y['name']))
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(lambda x, y: cmp(x['name'], y['name']))
context = {
'title': _('Site administration'),
'app_list': app_list,
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.index_template or 'admin/index.html', context,
context_instance=context_instance
)
index = never_cache(index)
def display_login_form(self, request, error_message='', extra_context=None):
request.session.set_test_cookie()
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
'error_message': error_message,
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.login_template or 'admin/login.html', context,
context_instance=context_instance
)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': '%s/' % model.__name__.lower(),
'perms': perms,
}
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise http.Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(lambda x, y: cmp(x['name'], y['name']))
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.app_index_template or ('admin/%s/app_index.html' % app_label,
'admin/app_index.html'), context,
context_instance=context_instance
)
def root(self, request, url):
"""
DEPRECATED. This function is the old way of handling URL resolution, and
is deprecated in favor of real URL resolution -- see ``get_urls()``.
This function still exists for backwards-compatibility; it will be
removed in Django 1.3.
"""
import warnings
warnings.warn(
"AdminSite.root() is deprecated; use include(admin.site.urls) instead.",
DeprecationWarning
)
#
# Again, remember that the following only exists for
# backwards-compatibility. Any new URLs, changes to existing URLs, or
# whatever need to be done up in get_urls(), above!
#
if request.method == 'GET' and not request.path.endswith('/'):
return http.HttpResponseRedirect(request.path + '/')
if settings.DEBUG:
self.check_dependencies()
# Figure out the admin base URL path and stash it for later use
self.root_path = re.sub(re.escape(url) + '$', '', request.path)
url = url.rstrip('/') # Trim trailing slash, if it exists.
# The 'logout' view doesn't require that the person is logged in.
if url == 'logout':
return self.logout(request)
# Check permission to continue or display login form.
if not self.has_permission(request):
return self.login(request)
if url == '':
return self.index(request)
elif url == 'password_change':
return self.password_change(request)
elif url == 'password_change/done':
return self.password_change_done(request)
elif url == 'jsi18n':
return self.i18n_javascript(request)
# URLs starting with 'r/' are for the "View on site" links.
elif url.startswith('r/'):
from django.contrib.contenttypes.views import shortcut
return shortcut(request, *url.split('/')[1:])
else:
if '/' in url:
return self.model_page(request, *url.split('/', 2))
else:
return self.app_index(request, url)
raise http.Http404('The requested admin page does not exist.')
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
DEPRECATED. This is the old way of handling a model view on the admin
site; the new views should use get_urls(), above.
"""
from django.db import models
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
admin_obj = self._registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with the admin site.")
return admin_obj(request, rest_of_url)
model_page = never_cache(model_page)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| gpl-3.0 |
ville-k/tensorflow | tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py | 127 | 5046 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for sparse cross operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.layers.ops import gen_sparse_feature_cross_op
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_sparse_feature_cross_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so"))
# Default hash key for the FingerprintCat64.
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE
@deprecated_arg_values(
"2016-11-20",
"The default behavior of sparse_feature_cross is changing, the default\n"
"value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n"
"From that point on sparse_feature_cross will always use FingerprintCat64\n"
"to concatenate the feature fingerprints. And the underlying\n"
"_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n"
"as deprecated.",
hash_key=None)
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None, hash_key=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_feature_cross_kernel.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
The default value is None, but will become
SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY after 2016-11-20 (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, sparse_tensor.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs
if isinstance(i, sparse_tensor.SparseTensor)]
dense_inputs = [i for i in inputs
if not isinstance(i, sparse_tensor.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
if hash_key:
indices_out, values_out, shape_out = (
gen_sparse_feature_cross_op.sparse_feature_cross_v2(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
hash_key=hash_key,
out_type=out_type,
internal_type=internal_type,
name=name))
else:
indices_out, values_out, shape_out = (
gen_sparse_feature_cross_op.sparse_feature_cross(
indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
ops.NotDifferentiable("SparseFeatureCross")
ops.NotDifferentiable("SparseFeatureCrossV2")
| apache-2.0 |
thomashaw/SecGen | modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/pwd.py | 17 | 2724 | """
This module provides access to the Unix password database.
Password database entries are reported as 7-tuples containing the
following items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. The
uid and gid items are integers, all others are strings. An exception
is raised if the entry asked for cannot be found.
"""
__all__ = ['getpwuid', 'getpwnam', 'getpwall']
try:
from os import _name, _posix_impl
from org.python.core.Py import newStringOrUnicode
except:
raise ImportError
import sys
if _name == 'nt':
raise ImportError, 'pwd module not supported on Windows'
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
attrs = ['pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos',
'pw_dir', 'pw_shell']
def __new__(cls, pwd):
pwd = (newStringOrUnicode(pwd.loginName), newStringOrUnicode(pwd.password), int(pwd.UID),
int(pwd.GID), newStringOrUnicode(pwd.GECOS), newStringOrUnicode(pwd.home),
newStringOrUnicode(pwd.shell))
return tuple.__new__(cls, pwd)
def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except ValueError:
raise AttributeError
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
if uid > sys.maxint or uid < 0:
raise KeyError(uid)
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry)
def getpwnam(name):
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwnam(name)
if not entry:
raise KeyError(name)
return struct_passwd(entry)
def getpwall():
"""
getpwall() -> list_of_entries
Return a list of all available password database entries,
in arbitrary order.
See pwd.__doc__ for more on password database entries.
"""
entries = []
while True:
entry = _posix_impl.getpwent()
if not entry:
break
entries.append(struct_passwd(entry))
return entries
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.