code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint, nowdate
from frappe import throw, _
import frappe.defaults
from frappe.utils import getdate
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.utils import get_account_currency
from frappe.desk.notifications import clear_doctype_notifications
from frappe.model.mapper import get_mapped_doc
from erpnext.buying.utils import check_on_hold_or_closed_status
from erpnext.assets.doctype.asset.asset import get_asset_account, is_cwip_accounting_disabled
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
from six import iteritems
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseReceipt(BuyingController):
def __init__(self, *args, **kwargs):
super(PurchaseReceipt, self).__init__(*args, **kwargs)
self.status_updater = [{
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_dt': 'Purchase Receipt Item',
'source_field': 'received_qty',
'second_source_dt': 'Purchase Invoice Item',
'second_source_field': 'received_qty',
'second_join_field': 'po_detail',
'percent_join_field': 'purchase_order',
'overflow_type': 'receipt',
'second_source_extra_cond': """ and exists(select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Material Request Item',
'join_field': 'material_request_item',
'target_field': 'received_qty',
'target_parent_dt': 'Material Request',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'material_request'
}]
if cint(self.is_return):
self.status_updater.append({
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'returned_qty',
'source_field': '-1 * qty',
'second_source_dt': 'Purchase Invoice Item',
'second_source_field': '-1 * qty',
'second_join_field': 'po_detail',
'extra_cond': """ and exists (select name from `tabPurchase Receipt`
where name=`tabPurchase Receipt Item`.parent and is_return=1)""",
'second_source_extra_cond': """ and exists (select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and is_return=1 and update_stock=1)"""
})
def validate(self):
self.validate_posting_time()
super(PurchaseReceipt, self).validate()
if self._action=="submit":
self.make_batches('warehouse')
else:
self.set_status()
self.po_required()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.check_on_hold_or_closed_status()
if getdate(self.posting_date) > getdate(nowdate()):
throw(_("Posting Date cannot be future date"))
def validate_with_previous_doc(self):
super(PurchaseReceipt, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "purchase_order_item",
"compare_fields": [["project", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([["Purchase Order", "purchase_order", "purchase_order_item"]])
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_order:
frappe.throw(_("Purchase Order number required for Item {0}").format(d.item_code))
def get_already_received_qty(self, po, po_detail):
qty = frappe.db.sql("""select sum(qty) from `tabPurchase Receipt Item`
where purchase_order_item = %s and docstatus = 1
and purchase_order=%s
and parent != %s""", (po_detail, po, self.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = frappe.db.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
# Check for Closed status
def check_on_hold_or_closed_status(self):
check_list =[]
for d in self.get('items'):
if (d.meta.get_field('purchase_order') and d.purchase_order
and d.purchase_order not in check_list):
check_list.append(d.purchase_order)
check_on_hold_or_closed_status('Purchase Order', d.purchase_order)
# on submit
def on_submit(self):
super(PurchaseReceipt, self).on_submit()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
self.update_prevdoc_status()
if flt(self.per_billed) < 100:
self.update_billing_status()
else:
self.status = "Completed"
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty, reserved_qty_for_subcontract in bin
# depends upon updated ordered qty in PO
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(self.submit_rv[0][0]))
def on_cancel(self):
super(PurchaseReceipt, self).on_cancel()
self.check_on_hold_or_closed_status()
# Check if Purchase Invoice has been submitted against current Purchase Order
submitted = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
self.name)
if submitted:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(submitted[0][0]))
self.update_prevdoc_status()
self.update_billing_status()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
def get_current_stock(self):
for d in self.get('supplied_items'):
if self.supplier_warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import process_gl_map
stock_rbnb = self.get_company_default("stock_received_but_not_billed")
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
gl_entries = []
warehouse_with_no_account = []
negative_expense_to_be_booked = 0.0
stock_items = self.get_stock_items()
for d in self.get("items"):
if d.item_code in stock_items and flt(d.valuation_rate) and flt(d.qty):
if warehouse_account.get(d.warehouse):
stock_value_diff = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Purchase Receipt", "voucher_no": self.name,
"voucher_detail_no": d.name, "warehouse": d.warehouse}, "stock_value_difference")
if not stock_value_diff:
continue
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[d.warehouse]["account"],
"against": stock_rbnb,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": stock_value_diff
}, warehouse_account[d.warehouse]["account_currency"], item=d))
# stock received but not billed
stock_rbnb_currency = get_account_currency(stock_rbnb)
gl_entries.append(self.get_gl_dict({
"account": stock_rbnb,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.base_net_amount, d.precision("base_net_amount")),
"credit_in_account_currency": flt(d.base_net_amount, d.precision("base_net_amount")) \
if stock_rbnb_currency==self.company_currency else flt(d.net_amount, d.precision("net_amount"))
}, stock_rbnb_currency, item=d))
negative_expense_to_be_booked += flt(d.item_tax_amount)
# Amount added through landed-cost-voucher
if flt(d.landed_cost_voucher_amount):
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_valuation,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.landed_cost_voucher_amount),
"project": d.project
}, item=d))
# sub-contracting warehouse
if flt(d.rm_supp_cost) and warehouse_account.get(self.supplier_warehouse):
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[self.supplier_warehouse]["account"],
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.rm_supp_cost)
}, warehouse_account[self.supplier_warehouse]["account_currency"], item=d))
# divisional loss adjustment
valuation_amount_as_per_doc = flt(d.base_net_amount, d.precision("base_net_amount")) + \
flt(d.landed_cost_voucher_amount) + flt(d.rm_supp_cost) + flt(d.item_tax_amount)
divisional_loss = flt(valuation_amount_as_per_doc - stock_value_diff,
d.precision("base_net_amount"))
if divisional_loss:
if self.is_return or flt(d.item_tax_amount):
loss_account = expenses_included_in_valuation
else:
loss_account = stock_rbnb
gl_entries.append(self.get_gl_dict({
"account": loss_account,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": divisional_loss,
"project": d.project
}, stock_rbnb_currency, item=d))
elif d.warehouse not in warehouse_with_no_account or \
d.rejected_warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(d.warehouse)
self.get_asset_gl_entry(gl_entries, expenses_included_in_valuation)
# Cost center-wise amount breakup for other charges included for valuation
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
if not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.cost_center, 0)
valuation_tax[tax.cost_center] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
if negative_expense_to_be_booked and valuation_tax:
# Backward compatibility:
# If expenses_included_in_valuation account has been credited in against PI
# and charges added via Landed Cost Voucher,
# post valuation related charges on "Stock Received But Not Billed"
negative_expense_booked_in_pi = frappe.db.sql("""select name from `tabPurchase Invoice Item` pi
where docstatus = 1 and purchase_receipt=%s
and exists(select name from `tabGL Entry` where voucher_type='Purchase Invoice'
and voucher_no=pi.parent and account=%s)""", (self.name, expenses_included_in_valuation))
if negative_expense_booked_in_pi:
expenses_included_in_valuation = stock_rbnb
against_account = ", ".join([d.account for d in gl_entries if flt(d.debit) > 0])
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = negative_expense_to_be_booked
i = 1
for cost_center, amount in iteritems(valuation_tax):
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = negative_expense_to_be_booked * (amount / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": expenses_included_in_valuation,
"cost_center": cost_center,
"credit": applicable_amount,
"remarks": self.remarks or _("Accounting Entry for Stock"),
"against": against_account
})
)
i += 1
if warehouse_with_no_account:
frappe.msgprint(_("No accounting entries for the following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_entries)
def get_asset_gl_entry(self, gl_entries, expenses_included_in_valuation=None):
arbnb_account, cwip_account = None, None
cwip_disabled = is_cwip_accounting_disabled()
if not expenses_included_in_valuation:
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
for d in self.get("items"):
if d.is_fixed_asset and not (arbnb_account and cwip_account):
arbnb_account = self.get_company_default("asset_received_but_not_billed")
# CWIP entry
cwip_account = get_asset_account("capital_work_in_progress_account", d.asset,
company = self.company)
if d.is_fixed_asset and not cwip_disabled:
asset_amount = flt(d.net_amount) + flt(d.item_tax_amount/self.conversion_rate)
base_asset_amount = flt(d.base_net_amount + d.item_tax_amount)
cwip_account_currency = get_account_currency(cwip_account)
gl_entries.append(self.get_gl_dict({
"account": cwip_account,
"against": arbnb_account,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"debit": base_asset_amount,
"debit_in_account_currency": (base_asset_amount
if cwip_account_currency == self.company_currency else asset_amount)
}, item=d))
# Asset received but not billed
asset_rbnb_currency = get_account_currency(arbnb_account)
gl_entries.append(self.get_gl_dict({
"account": arbnb_account,
"against": cwip_account,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"credit": base_asset_amount,
"credit_in_account_currency": (base_asset_amount
if asset_rbnb_currency == self.company_currency else asset_amount)
}, item=d))
if d.is_fixed_asset and flt(d.landed_cost_voucher_amount):
asset_account = (get_asset_category_account(d.asset, 'fixed_asset_account',
company = self.company) if cwip_disabled else cwip_account)
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_valuation,
"against": asset_account,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.landed_cost_voucher_amount),
"project": d.project
}, item=d))
gl_entries.append(self.get_gl_dict({
"account": asset_account,
"against": expenses_included_in_valuation,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": flt(d.landed_cost_voucher_amount),
"project": d.project
}, item=d))
if d.asset:
doc = frappe.get_doc("Asset", d.asset)
frappe.db.set_value("Asset", d.asset, "gross_purchase_amount",
doc.gross_purchase_amount + flt(d.landed_cost_voucher_amount))
frappe.db.set_value("Asset", d.asset, "purchase_receipt_amount",
doc.purchase_receipt_amount + flt(d.landed_cost_voucher_amount))
return gl_entries
def update_status(self, status):
self.set_status(update=True, status = status)
self.notify_update()
clear_doctype_notifications(self)
def update_billing_status(self, update_modified=True):
updated_pr = [self.name]
for d in self.get("items"):
if d.purchase_order_item:
updated_pr += update_billed_amount_based_on_po(d.purchase_order_item, update_modified)
for pr in set(updated_pr):
pr_doc = self if (pr == self.name) else frappe.get_doc("Purchase Receipt", pr)
pr_doc.update_billing_percentage(update_modified=update_modified)
self.load_from_db()
def update_billed_amount_based_on_po(po_detail, update_modified=True):
# Billed against Sales Order directly
billed_against_po = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where po_detail=%s and (pr_detail is null or pr_detail = '') and docstatus=1""", po_detail)
billed_against_po = billed_against_po and billed_against_po[0][0] or 0
# Get all Delivery Note Item rows against the Sales Order Item row
pr_details = frappe.db.sql("""select pr_item.name, pr_item.amount, pr_item.parent
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name=pr_item.parent and pr_item.purchase_order_item=%s
and pr.docstatus=1 and pr.is_return = 0
order by pr.posting_date asc, pr.posting_time asc, pr.name asc""", po_detail, as_dict=1)
updated_pr = []
for pr_item in pr_details:
# Get billed amount directly against Purchase Receipt
billed_amt_agianst_pr = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where pr_detail=%s and docstatus=1""", pr_item.name)
billed_amt_agianst_pr = billed_amt_agianst_pr and billed_amt_agianst_pr[0][0] or 0
# Distribute billed amount directly against PO between PRs based on FIFO
if billed_against_po and billed_amt_agianst_pr < pr_item.amount:
pending_to_bill = flt(pr_item.amount) - billed_amt_agianst_pr
if pending_to_bill <= billed_against_po:
billed_amt_agianst_pr += pending_to_bill
billed_against_po -= pending_to_bill
else:
billed_amt_agianst_pr += billed_against_po
billed_against_po = 0
frappe.db.set_value("Purchase Receipt Item", pr_item.name, "billed_amt", billed_amt_agianst_pr, update_modified=update_modified)
updated_pr.append(pr_item.parent)
return updated_pr
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
doc = frappe.get_doc('Purchase Receipt', source_name)
returned_qty_map = get_returned_qty_map(source_name)
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
if len(target.get("items")) == 0:
frappe.throw(_("All items have already been invoiced"))
doc = frappe.get_doc(target)
doc.ignore_pricing_rule = 1
doc.run_method("onload")
doc.run_method("set_missing_values")
doc.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty, returned_qty = get_pending_qty(source_doc)
returned_qty_map[source_doc.item_code] = returned_qty
def get_pending_qty(item_row):
pending_qty = item_row.qty - invoiced_qty_map.get(item_row.name, 0)
returned_qty = flt(returned_qty_map.get(item_row.item_code, 0))
if returned_qty:
if returned_qty >= pending_qty:
pending_qty = 0
returned_qty -= pending_qty
else:
pending_qty -= returned_qty
returned_qty = 0
return pending_qty, returned_qty
doclist = get_mapped_doc("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"field_map": {
"supplier_warehouse":"supplier_warehouse",
"is_return": "is_return"
},
"validation": {
"docstatus": ["=", 1],
},
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"purchase_order_item": "po_detail",
"purchase_order": "purchase_order",
"is_fixed_asset": "is_fixed_asset",
"asset": "asset",
},
"postprocess": update_item,
"filter": lambda d: get_pending_qty(d)[0] <= 0 if not doc.get("is_return") else get_pending_qty(d)[0] > 0
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in frappe.db.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
def get_returned_qty_map(purchase_receipt):
"""returns a map: {so_detail: returned_qty}"""
returned_qty_map = frappe._dict(frappe.db.sql("""select pr_item.item_code, sum(abs(pr_item.qty)) as qty
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name = pr_item.parent
and pr.docstatus = 1
and pr.is_return = 1
and pr.return_against = %s
group by pr_item.item_code
""", purchase_receipt))
return returned_qty_map
@frappe.whitelist()
def make_purchase_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Receipt", source_name, target_doc)
@frappe.whitelist()
def update_purchase_receipt_status(docname, status):
pr = frappe.get_doc("Purchase Receipt", docname)
pr.update_status(status)
@frappe.whitelist()
def make_stock_entry(source_name,target_doc=None):
def set_missing_values(source, target):
target.stock_entry_type = "Material Transfer"
target.purpose = "Material Transfer"
doclist = get_mapped_doc("Purchase Receipt", source_name,{
"Purchase Receipt": {
"doctype": "Stock Entry",
},
"Purchase Receipt Item": {
"doctype": "Stock Entry Detail",
"field_map": {
"warehouse": "s_warehouse",
"parent": "reference_purchase_receipt"
},
},
}, target_doc, set_missing_values)
return doclist
|
libracore/erpnext
|
erpnext/stock/doctype/purchase_receipt/purchase_receipt.py
|
Python
|
gpl-3.0
| 22,560
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 29 12:28:08 2014
@author: david
"""
import numpy as np
import sys
import os
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo import gdal_array
from osgeo import gdalconst
from scipy.ndimage import filters
class Empty_Grid():
def __init__(self, minx, maxx, miny, maxy, pix):
self.rows = int((maxy-miny)/pix)
print maxx,minx
print 'rows', self.rows
self.cols = int((maxx-minx)/pix)
print 'cols',self.cols
self.empty = np.zeros((self.rows,self.cols))
print 'Array dimensions', self.empty.shape
self.x_vals = np.arange(minx,maxx,pix)
self.y_vals = np.arange(miny,maxy,pix)
print self.y_vals.shape, self.x_vals.shape
if not self.empty[1].shape == self.x_vals.shape:
if self.empty[1].shape < self.x_vals.shape:
print 'x empty < xvals'
diff = self.empty[1].shape[0]-self.x_vals.shape[0]
self.x_vals = self.x_vals[0:-diff]
if self.empty.shape[1] > self.x_vals.shape:
print 'x empty > xvals'
diff = self.empty[1].shape[0]-self.x_vals.shape[0]
newmax = self.x_vals[-1]+(diff*pix)
self.x_vals = np.append((self.x_vals,np.arange(self.x_vals[-1],newmax,pix)))
if not self.empty[0].shape[0] == self.y_vals.shape[0]:
if self.empty[0].shape[0] < self.y_vals.shape[0]:
print 'y empty < yvals'
print self.empty[0].shape, self.y_vals.shape
diff = self.empty.shape[0]-self.y_vals.shape[0]
self.y_vals = self.y_vals[0:-diff]
if self.empty[0].shape > self.y_vals.shape:
print 'y empty > yvals'
diff = self.empty[0].shape[0] - self.y_vals.shape[0]
print self.y_vals.shape[0], self.empty[0].shape[0]
print diff
newmax = self.y_vals[-1]+(diff*pix)
print y_vals[-1],newmax
self.y_vals = np.hstack((self.y_vals,np.arange(self.y_vals[-1],newmax,pix)))
class Grid_Data():
def __init__(self, points, pix, rad):
minx = np.min(points[:,0])
maxx = np.max(points[:,0])
miny = np.min(points[:,1])
maxy = np.max(points[:,1])
grid = Empty_Grid(minx,maxx,miny,maxy,pix)
print 'shapes:',grid.empty.shape, grid.x_vals.shape, grid.y_vals.shape
#instantiate counters
direct = 0
null = 0
interpolated = 0
row_ref = 0
self.gridded = grid.empty
for row in grid.y_vals:
col_ref = 0
#define the minimum & maximum coordinates of the row
cellymin = row
cellymax = row+pix
#define the centre of the cell
cell_centy = cellymin+(pix/2)
#use this to define search radius for interpolation later
rad_miny = cell_centy-(pix*rad)
rad_maxy = cell_centy+(pix*rad)
# use this to find poits along the row within the radius
# this will constrain the search space for later
rad_row_idx = np.where((points[:,1]>=rad_miny)&
(points[:,1]<=rad_maxy))
#slice
rad_row_pts = points[rad_row_idx]
# find points coincident with the cells in the row
#doing this with the search-radius subset to keep everything efficient
row_idx = np.where((rad_row_pts[:,1]>=cellymin)&
(rad_row_pts[:,1]<=cellymax))
#slice
row_pts = rad_row_pts[row_idx]
# iterate through the columns at each y value
for column in grid.x_vals:
# define the boundaries of th cell
cellxmin = column
cellxmax = column+pix
#find points coincident with cell
col_idx = np.where((row_pts[:,0]>=cellxmin)&
(row_pts[:,0]<=cellxmax))
# create an array of z values in each cell
col_pts = row_pts[col_idx]
cell_zvals = col_pts[:,2]
# get the shape of this
cell_znum = cell_zvals.shape[0]
# if there's only one point the becomes the z value of the cell
if cell_znum == 1:
cell_z = cell_zvals[0]
direct = direct+1
#method = 1
# if there's more than one point z = the mean of the points
elif cell_znum >1:
cell_z = np.mean(cell_zvals)
direct = direct+1
#method = 2
# if there's no points...
else:
# find the centre of the cell
cell_centx = cellxmin+(pix/2)
# define a search radius
rad_minx = cell_centx-(pix*rad)
rad_maxx = cell_centx+(pix*rad)
# find the lidar points within the search radius
rad_points = np.where((rad_row_pts[:,0]>=rad_minx)&
(rad_row_pts[:,0]<=rad_maxx))
rad_zs = rad_row_pts[rad_points]
# work out how many points fall within this
rad_num = rad_zs.shape[0]
#if no points within the radius cell value = no data
if rad_num == 0:
cell_z = -999
null = null+1
#method = 0
#otherwise pass the points to be interpolated using idw
else:
cell_z = self.interpolate_idw(rad_points,
cellxmin,
cellymin,
rad_row_pts)
interpolated = interpolated+1
#method = 3
#print row_ref,col_ref
self.gridded[row_ref,col_ref] = cell_z
#sys.stdout.write("\r\Direct %s Interpolated %s Null %s" %(direct, interpolated, null))
#sys.stdout.flush()
col_ref = col_ref+1
row_ref = row_ref+1
#stack z value into columns along row
def distance_matrix(self,rad_points, column, row, flightline):
#slice the input points using the indices of points in the search radius
points = flightline[rad_points]
#make a matrix
cell = np.vstack((column,row)).T
#unfuncs....
d0 = np.subtract.outer(points[:,0], cell[:,0])
d1 = np.subtract.outer(points[:,1], cell[:,1])
#return distance between points and points
return np.hypot(d0,d1)
def interpolate_idw(self,rad_points, column, row, flightline):
#make the distance matrix
d = self.distance_matrix(rad_points, column, row, flightline)
#slice the points
points = flightline[rad_points]
#define distance weights using the distance matrix
weights = 1.0/d
#divide and resassign weights using average
weights /= weights.sum(axis=0)
#matrix multiplication
cell_z = np.dot(weights.T, points[:,2])
return cell_z
class WriteImage():
def __init__(self,
outpath,
outname,
xsize,
ysize,
array,
minx,
maxy,
pix):
print 'call to WriteImage'
data_out = np.flipud(array)
print 'ROWS,COLS,BANDS',array.shape
print 'Call to write image'
os.chdir(outpath)
print 'OUTPATH',outpath
print 'OUTNAME',outname
os.chdir(outpath)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need:
if len(array.shape) == 2:
bands = 1
elif len(array.shape)==3:
bands = array.shape[2]
else:
print ('ERROR: Input array dimensions are crazy!')
raise Exception()
print 'BANDS OUT', bands
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, array.shape[1], array.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform((minx,pix,0,maxy,0,0-pix))
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:4277")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print 'Saving %s/%s' % (band,bands)
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print 'Saving %s/%s' % (band,bands)
band = band+1
out = None
print 'Processing of %s complete' % (outname)
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.abspath('...'))
output_path = os.path.join(dir_path, 'output')
if not os.path.exists(output_path):
os.mkdir(output_path)
pix = 0.5
rad = 2
for file in os.listdir(dir_path):
if not os.path.isdir(file):
points = np.genfromtxt(file, delimiter=',')
print 'DATA DIMENSIONS', points.shape
name = file[:-4]
print name
if points.shape[1]>=3:
x_vals = points[:,0]
y_vals = points[:,1]
for col in range(3, points.shape[1]):
data = np.column_stack((x_vals,y_vals,points[:,col]))
print data.shape
interpolate = Grid_Data(data,pix,rad)
filtered = filters.gaussian_filter(interpolate.gridded,3)
print filtered.shape
if col == 3:
array = filtered
elif col>3:
array = np.dstack((array,filtered))
print 'array',array.shape
image = WriteImage(output_path,
name,
array.shape[1],
array.shape[0],
array,
np.min(x_vals),
np.max(y_vals),
pix)
os.chdir(dir_path)
|
dav-stott/phd-thesis
|
fw_gridding_idw.py
|
Python
|
mit
| 12,343
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
"""ONNX: Open Neural Network Exchange frontend for Relay."""
from __future__ import absolute_import as _abs
import logging
import numpy as np
import tvm
from ... import nd as _nd
from .. import analysis
from .. import transform as _transform
from .. import expr as _expr
from .. import module as _module
from .. import op as _op
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels, get_name
__all__ = ['from_onnx']
def dimension_picker(prefix, surfix=''):
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 2:
return prefix + '2d' + surfix
msg = 'Only 2D kernels are supported for operator {}.'
op_name = prefix + '2d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid(
'Number of pads must be either 2 or 4.')
return pads
def onnx_storage_order2layout(storage_order):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid('Mode of storage_order must be either 0 or 1')
return 'NCHW' if sotrage_order == 0 else 'NHWC'
def dimension_constraint():
def _dim_check(attrs):
if len(attrs['kernel_shape']) == 2:
return True
return False
return _dim_check, "Only 2d kernel supported."
class OnnxOpConverter(object):
""" A helper class for holding onnx op converters.
"""
@classmethod
def get_converter(cls, opset):
""" Get converter matches given opset.
Parameters
----------
opset: int
opset from model.
Returns
-------
converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [
int(d.replace('_impl_v', '')) for d in dir(cls) if '_impl_v' in d
]
versions = sorted(versions + [opset])
version = versions[
max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, '_impl_v{}'.format(version)):
return getattr(cls, '_impl_v{}'.format(version))
raise NotImplementedError(
'opset version {} of {} not implemented'.format(
version, cls.__name__))
class Elemwise(OnnxOpConverter):
""" A helper class for elemwise op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(
len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if attr.get('broadcast', 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(attr.get('axis', 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad)
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
# TODO(zhreshold): make sure ceil_mode in onnx, and layout?
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, attr, params)
class Absolute(OnnxOpConverter):
""" Operator converter for Absolute.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.nn.relu(inputs[0]) + _op.nn.relu(_op.negative(inputs[0]))
class Add(Elemwise):
""" Operator converter for Add.
"""
name = 'add'
class AveragePool(Pool):
""" Operator converter for AveragePool.
"""
name = 'avg_pool'
class BatchNorm(OnnxOpConverter):
""" Operator converter for BatchNorm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
out = AttrCvt(
op_name='batch_norm',
ignores=['spatial', 'is_test', 'consumed_inputs', 'momentum'])(inputs, attr,
params)
return out[0]
class Conv(OnnxOpConverter):
""" Operator converter for Conv.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
out = AttrCvt(op_name=dimension_picker('conv'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', (0, 0)),
'pads': ('padding', (0, 0), revert_caffe2_pad),
'group': ('groups', 1)},
ignores=['auto_pad'],
custom_check=dimension_constraint())(inputs[:2], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(OnnxOpConverter):
""" Operator converter for ConvTranspose.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], True)
attr['channels'] = channels
groups = attr.pop('group')
attr['groups'] = groups
out = AttrCvt(
op_name=dimension_picker('conv', '_transpose'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', (0, 0)),
'pads': ('padding', (0, 0), revert_caffe2_pad)
},
disables=['output_shape'],
custom_check=dimension_constraint())(inputs[:2], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Div(Elemwise):
name = 'divide'
class Elu(OnnxOpConverter):
""" Operator converter for Elu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
return _expr.const(-alpha) * _op.nn.relu(_expr.const(1.) - _op.exp(inputs[0])) + \
_op.nn.relu(inputs[0])
class Gemm(OnnxOpConverter):
""" Operator converter for Gemm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 3, "Gemm op take 3 inputs, {} given".format(
len(inputs))
# Y = alpha * A * B + beta * C
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
transA = int(attr.get('transA', 0))
transB = int(attr.get('transB', 0))
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
inputs[0] = _op.nn.batch_flatten(inputs[0])
out = _op.nn.dense(_expr.const(alpha) * inputs[0],
inputs[1], units=channels)
return _op.nn.bias_add(out, _expr.const(beta) * inputs[2])
class MatMul(OnnxOpConverter):
""" Operator converter for MatMul.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs))
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
return _op.nn.dense(inputs[0], input_1_t)
class MaxPool(Pool):
""" Operator converter for MaxPool
"""
name = 'max_pool'
@classmethod
def _impl_v8(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
# TODO(higumachan): make sure ceil_mode in onnx, and layout?
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, attr, params)
@classmethod
def _impl_v10(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
'ceil_mode': 'ceil_mode'
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
custom_check=dimension_constraint())(inputs, attr, params)
class Mul(Elemwise):
name = 'multiply'
class Pad(OnnxOpConverter):
""" Operator converter for Pad.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('paddings')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
return AttrCvt(
_op.nn.pad,
transforms={
'value': 'pad_value',
},
ignores=['mode'],
custom_check=(lambda attrs: attrs.get('mode', 'constant').decode("utf-8") == 'constant',
'split mode != constant'))(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('pads')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
return AttrCvt(
'pad',
transforms={
'value': 'pad_value',
},
ignores=['mode'],
custom_check=(lambda attrs: attrs.get('mode', 'constant').decode("utf-8") == 'constant',
'split mode != constant'))(inputs, attr, params)
class ParametricSoftPlus(OnnxOpConverter):
""" Operator converter for ParametricSoftPlus.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(float(attr.get('alpha', 1.0)))
beta = _expr.const(float(attr.get('beta', 1.0)))
return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.)) * alpha
class Prelu(OnnxOpConverter):
""" Operator converter for Prelu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs))
return _op.nn.prelu(inputs[0], inputs[1])
class Reciprocal(OnnxOpConverter):
""" Operator converter for Reciprocal.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _expr.const(1.0) / inputs[0]
class Flatten(OnnxOpConverter):
""" Operator converter for Flatten.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 1)
if axis == 1:
out = _op.nn.batch_flatten(inputs[0])
else:
newshape = [0] * (axis + 1)
newshape[axis] = -1
out = _op.reshape(inputs[0], list(newshape))
return out
class Reshape(OnnxOpConverter):
""" Operator converter for Reshape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if 'shape' in attr:
return _op.reshape(inputs[0], attr['shape'])
if get_name(inputs[1]) in params:
shape = tuple(params[inputs[1].name_hint].asnumpy())
out = _op.reshape(inputs[0], shape)
else:
data, shape = inputs
logging.warning("Constant evaluating Reshape's shape argument, may reduce performance")
shape_params = analysis.free_vars(shape)
func = _expr.Function(shape_params, shape)
mod = _module.Module.from_expr(func)
seq = _transform.Sequential([_transform.InferType(),
_transform.FoldConstant(),
_transform.FuseOps(0),
_transform.InferType()])
with tvm.relay.PassContext(opt_level=2):
mod = seq(mod)
with tvm.relay.build_config(opt_level=0):
ex = tvm.relay.create_executor("debug", mod=mod)
inputs = []
for sp in shape_params:
if not sp.name_hint in params:
sh = [int(i) for i in sp.type_annotation.shape]
inputs.append(
tvm.nd.array(np.random.rand(*sh).astype('float32')))
static_shape = ex.evaluate()(*inputs, **params)
out = _op.reshape(data, newshape=tuple(static_shape.asnumpy()))
return out
class Concat(OnnxOpConverter):
""" Operator converter for Concat.
"""
@classmethod
def _impl_v1(cls, inputs, args, params):
return AttrCvt(op_name='concatenate')((inputs,), args)
class Scale(OnnxOpConverter):
""" Operator converter for Scale.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get('scale', 1.0))
return inputs[0] * _expr.const(scale)
class Selu(OnnxOpConverter):
""" Operator converter for Selu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.6732))
gamma = float(attr.get('gamma', 1.0507))
return _expr.const(gamma) * (_expr.const(-alpha) *
_op.nn.relu(_expr.const(1.) - _op.exp(inputs[0])) +
_op.nn.relu(inputs[0]))
class ScaledTanh(OnnxOpConverter):
""" Operator converter for ScaledTanh.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)
class SoftPlus(OnnxOpConverter):
""" Operator converter for SoftPlus.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.log(_op.exp(inputs[0]) + _expr.const(1.))
class Softsign(OnnxOpConverter):
""" Operator converter for Softsign.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (_expr.const(1.) + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
name = 'subtract'
class Sum(OnnxOpConverter):
""" Operator converter for Sum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class ThresholdedRelu(OnnxOpConverter):
""" Operator converter for ThresholdedRelu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 0.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get('axis', None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr['units'] = channels
return AttrCvt('dense', ignores=['axis', 'axis_w'])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get('scales')
if not scales:
#Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
scales = params[inputs[1].name_hint].asnumpy()
inputs = inputs[:1]
assert len(scales) == 4 and scales[0] == 1.0 and scales[1] == 1.0 and scales[2] == scales[3]
mode = attr.get('mode')
if mode == b'nearest':
method = "NEAREST_NEIGHBOR"
elif mode == b'linear':
method = "BILINEAR"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
attr = {'scale':int(scales[-1]), 'method':method, 'layout':'NCHW'}
return AttrCvt('upsampling')(inputs, attr)
class Shape(OnnxOpConverter):
""" Operator converter for Shape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(@jroesch): use shape_of once it has been fixed)
return _op.shape_of(inputs[0])
class Cast(OnnxOpConverter):
""" Operator converter for Cast.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
@classmethod
def _impl_v5(cls, inputs, attr, params):
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr['to'] = str(TENSOR_TYPE_TO_NP_TYPE[attr['to']])
except ImportError as e:
raise ImportError(
"Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
class Unsqueeze(OnnxOpConverter):
""" Operator converter for Unsqueeze.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
for axes in attr['axes']:
inputs[0] = _op.expand_dims(inputs[0], axis=axes, num_newaxis=1)
return inputs[0]
class Split(OnnxOpConverter):
""" Operator converter for Split.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
attr['indices_or_sections'] = []
index = 0
for i in attr['split'][:-1]:
index += i
attr['indices_or_sections'].append(index)
return AttrCvt(
'split',
ignores=['split'])(inputs, attr, params)
class Slice(OnnxOpConverter):
""" Operator converter for Slice.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr['starts'], int):
attr['starts'] = (attr['starts'],)
attr['ends'] = (attr['ends'],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr['axes'], int):
attr['axes'] = (attr['axes'],)
if (max(attr['axes']) + 1) != len(attr['axes']):
new_axes = []
new_starts = []
new_ends = []
pop_index = 0
for i in range(max(attr['axes']) + 1):
if i in attr['axes']:
new_axes.append(i)
new_starts.append(attr['starts'][pop_index])
new_ends.append(attr['ends'][pop_index])
pop_index += 1
else:
new_axes.append(i)
new_starts.append(0)
new_ends.append(np.iinfo(np.int32).max)
attr['axes'] = new_axes
attr['starts'] = new_starts
attr['ends'] = new_ends
except KeyError:
pass
return AttrCvt('strided_slice',
transforms={'starts': 'begin',
'ends': 'end'},
ignores=['axes'])(inputs, attr)
class Gather(OnnxOpConverter):
""" Operator converter for Gather.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
return AttrCvt('take',
extras={'axis':axis})(inputs, {})
#return _op.take(inputs[0], inputs[1], axis)
class Greater(OnnxOpConverter):
""" Operator logical greater.
"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.greater(inputs[0], inputs[1])
class Less(OnnxOpConverter):
""" Operator logical less than.
"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.less(inputs[0], inputs[1])
class LRN(OnnxOpConverter):
""" Operator converter for Local Response Normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/master/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get('alpha', 0.0001)
beta = attr.get('beta', 0.75)
bias = attr.get('bias', 1.0)
nsize = attr.get('size')
attr = {'size':nsize, 'axis':axis, 'alpha':alpha, 'beta':beta, 'bias':bias}
return AttrCvt('lrn')(inputs, attr)
class Maximum(OnnxOpConverter):
""" Operator converter for Maximum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt('maximum')([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
""" Operator converter for Minimum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt('minimum')([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
""" Operator converter for Mean.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
# avoid overflow
concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)
return _op.mean(concat, axis=0, keepdims=False)
class HardSigmoid(OnnxOpConverter):
""" Operator converter for HardSigmoid.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get('alpha', 0.2)
beta = attr.get('beta', 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {'a_min':0, 'a_max':1}
return AttrCvt('clip')([transformX], attr)
class Reduce(OnnxOpConverter):
""" Operator converter for reduce ops.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
if 'axes' in attr:
axis = attr.get('axes', 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {'axis':axis, 'keepdims':attr.get('keepdims', True)}
return AttrCvt(cls.name)(inputs, attr)
class ReduceMax(Reduce):
""" Operator converter for ArgMax.
"""
name = 'max'
class ReduceMin(Reduce):
""" Operator converter for ArgMax.
"""
name = 'min'
class ReduceSum(Reduce):
""" Operator converter for ArgMax.
"""
name = 'sum'
class ReduceMean(Reduce):
""" Operator converter for ArgMax.
"""
name = 'mean'
class ReduceProd(Reduce):
""" Operator converter for ArgMax.
"""
name = 'prod'
class ArgMax(OnnxOpConverter):
""" Operator converter for ArgMax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis':axis, 'keepdims':keepdims}
return AttrCvt('argmax')(inputs, attr)
class ArgMin(OnnxOpConverter):
""" Operator converter for ArgMin.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis':axis, 'keepdims':keepdims}
return AttrCvt('argmin')(inputs, attr)
class Softmax(OnnxOpConverter):
""" Operator converter for Softmax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# set default value when axis is not set in the model
if 'axis' not in attr:
attr['axis'] = 1
return AttrCvt('softmax', transforms={'axis': ('axis', 1)})(inputs, attr, params)
class ConstantFill(OnnxOpConverter):
""" Operator converter for ConstantFill.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
num_inputs = len(inputs)
if 'shape' in attr:
if num_inputs > 1:
raise ImportError(
"Can't set shape and input tensor at a time")
shape = attr.pop('shape')
else:
if num_inputs == 1:
raise ImportError(
"Either shape attribute or input should be set")
if 'input_as_shape' in attr and attr['input_as_shape']:
shape = params[get_name(inputs[0])].asnumpy()
else:
if 'extra_shape' in attr:
raise tvm.error.OpAttributeInvalid('Attribute "extra_shape" not '
'supported with "fill_like" for '
'operator ConstantFill.')
return _op.full_like(inputs[0], inputs[1])
if 'extra_shape' in attr:
shape = shape + attr.pop('extra_shape')
return _op.full(inputs[0], shape)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
'Identity': Renamer('copy'),
# 'Affine'
'ThresholdedRelu': ThresholdedRelu.get_converter(opset),
'ScaledTanh': ScaledTanh.get_converter(opset),
'ParametricSoftplus': ParametricSoftPlus.get_converter(opset),
'ConstantFill': ConstantFill.get_converter(opset),
# 'GivenTensorFill'
'FC': AttrCvt('dense', ignores=['axis', 'axis_w']),
'Scale': Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
# 'ImageScaler'
# 'MeanVarianceNormalization'
# 'Crop'
# 'Embedding'
'Upsample' : Upsample.get_converter(opset),
'SpatialBN': BatchNorm.get_converter(opset),
# defs/generator
# 'Constant' # Implemented
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
# 'RandomNormalLike'
# defs/logical
# defs/math
'Add': Add.get_converter(opset),
'Sub': Sub.get_converter(opset),
'Mul': Mul.get_converter(opset),
'Div': Div.get_converter(opset),
'Neg': Renamer('negative'),
'Abs': Absolute.get_converter(opset),
'Reciprocal': Reciprocal.get_converter(opset),
'Floor': Renamer('floor'),
'Ceil': Renamer('ceil'),
'Sqrt': Renamer('sqrt'),
'Relu': Renamer('relu'),
'LeakyRelu': Renamer('leaky_relu'),
'Selu': Selu.get_converter(opset),
'Elu': Elu.get_converter(opset),
'Exp': Renamer('exp'),
'Greater': Greater.get_converter(opset),
'Less': Less.get_converter(opset),
'Log': Renamer('log'),
'Tanh': Renamer('tanh'),
'Pow': Renamer('power'),
'PRelu': Prelu.get_converter(opset),
'Sigmoid': Renamer('sigmoid'),
'HardSigmoid': HardSigmoid.get_converter(opset),
'Max': Maximum.get_converter(opset),
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
'Mean': Mean.get_converter(opset),
'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
# softmax default axis is different in onnx
'Softmax': Softmax.get_converter(opset),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
# 'Hardmax'
'Softsign': Softsign.get_converter(opset),
'SoftPlus': SoftPlus.get_converter(opset),
'Gemm': Gemm.get_converter(opset),
'MatMul': MatMul.get_converter(opset),
# defs/nn
'AveragePool': AveragePool.get_converter(opset),
'MaxPool': MaxPool.get_converter(opset),
'Conv': Conv.get_converter(opset),
'ConvTranspose': ConvTranspose.get_converter(opset),
'GlobalAveragePool': Renamer('global_avg_pool2d'),
'GlobalMaxPool': Renamer('global_max_pool2d'),
'BatchNormalization': BatchNorm.get_converter(opset),
# 'InstanceNormalization'
# 'LpNormalization'
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
'Flatten': Flatten.get_converter(opset),
'LRN': LRN.get_converter(opset),
# defs/reduction
'ReduceMax': ReduceMax.get_converter(opset),
'ReduceMin': ReduceMin.get_converter(opset),
'ReduceSum': ReduceSum.get_converter(opset),
'ReduceMean': ReduceMean.get_converter(opset),
'ReduceProd': ReduceProd.get_converter(opset),
# 'ReduceProd'
# 'ReduceLogSumExp'
'ArgMax': ArgMax.get_converter(opset),
'ArgMin': ArgMin.get_converter(opset),
# defs/tensor
'Cast': Cast.get_converter(opset),
'Reshape': Reshape.get_converter(opset),
'Concat': Concat.get_converter(opset),
'Split': Split.get_converter(opset),
'Slice': Slice.get_converter(opset),
'Transpose': AttrCvt('transpose', {'perm': 'axes'}),
'Gather': Gather.get_converter(opset),
'Squeeze': AttrCvt('squeeze', {'axes': 'axis'}),
'Unsqueeze': Unsqueeze.get_converter(opset),
'Pad': Pad.get_converter(opset),
'Shape': Shape.get_converter(opset),
}
class GraphProto(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
self._shape = shape if shape else {}
self._dtype = dtype
def from_onnx(self, graph, opset):
"""Construct Relay expression from ONNX graph.
Onnx graph is a python protobuf object.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
Returns
-------
mod : tvm.relay.Module
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# parse network inputs to relay, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
self._nodes[init_tensor.name] = new_var(init_tensor.name,
shape=self._params[init_tensor.name].shape,
dtype=self._params[init_tensor.name].dtype)
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name = self._parse_value_proto(i)
d_type = self._parse_dtype(i, 'float32')
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._params[i_name] = self._params.pop(i_name)
self._nodes[i_name] = new_var(i_name,
shape=self._params[i_name].shape,
dtype=self._params[i_name].dtype)
else:
self._num_input += 1
if i_name in self._shape:
tshape = self._shape[i_name]
else:
raise ValueError("Must provide an input shape for `{0}`.".format(i_name))
if isinstance(self._dtype, dict):
dtype = self._dtype[i_name] if i_name in self._dtype else d_type
else:
dtype = d_type
self._nodes[i_name] = new_var(i_name, shape=tshape, dtype=dtype)
# get list of unsupported ops
convert_map = _get_convert_map(opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if op_name not in convert_map and \
op_name != 'Constant' and \
op_name not in _identity_list:
unsupported_ops.add(op_name)
if unsupported_ops:
msg = 'The following operators are not supported for frontend ONNX: '
msg += ', '.join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
# construct nodes, nodes are stored as directed acyclic graph
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]
if op_name == "Constant":
t_proto = self._parse_attr(node.attribute)["value"]
self._num_param += 1
# We should convert scalar integers to int32, to normalize.
array = self._parse_array(t_proto)
if len(array.shape) == 0 and array.dtype == 'int64':
array = _nd.array(array.asnumpy().astype('int32'))
self._params[node.output[0]] = array
self._nodes[node.output[0]] = new_var(
node.output[0],
shape=list(t_proto.dims),
dtype=array.dtype)
else:
if op_name == "ConstantFill":
fill_value = attr.get('value', 0.0)
dtype = attr.get('dtype', b'int32').decode("utf-8")
i_name = node.output[0]
self._params[i_name] = fill_value
self._nodes[i_name] = new_var(node.output[0], shape=(), dtype=dtype)
inputs.append(self._nodes[i_name])
i_name = self._parse_value_proto(node)
attr['tvm_custom'] = {}
attr['tvm_custom']['name'] = i_name
op = self._convert_operator(op_name, inputs, attr, opset)
node_output = self._fix_outputs(op_name, node.output)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
assert len(node_output) == outputs_num, (
"Number of output mismatch {} vs {} in {}.".format(
len(node_output), outputs_num, op_name))
if outputs_num == 1:
self._nodes[node_output[0]] = op
else:
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
# now return the outputs
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _expr.Function(analysis.free_vars(outputs), outputs)
return _module.Module.from_expr(func), self._params
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_dtype(self, value_proto, dtype):
"""Parse dtype."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
return TENSOR_TYPE_TO_NP_TYPE[value_proto.type.tensor_type.elem_type].name
except AttributeError:
return dtype
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError(
"Unable to import onnx which is required {}".format(e))
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
return _nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['g']:
if a.HasField(f):
raise NotImplementedError(
"Filed {} is not supported in relay.".format(f))
for f in ['graphs']:
if list(getattr(a, f)):
raise NotImplementedError(
"Filed {} is not supported in relay.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
def _convert_operator(self,
op_name,
inputs,
attrs,
opset):
"""Convert ONNX operator into a Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.expr.Function
List of inputs.
attrs : dict
Dict of operator attributes
opset : int
Opset version
Returns
-------
sym : tvm.relay.expr.Function
Converted relay function
"""
convert_map = _get_convert_map(opset)
if op_name in _identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise NotImplementedError(
"Operator {} not implemented.".format(op_name))
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == 'Dropout':
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def from_onnx(model,
shape=None,
dtype="float32"):
"""Convert a ONNX model into an equivalent Relay Function.
ONNX graphs are represented as Python Protobuf objects.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
Returns
-------
mod : tvm.relay.Module
The relay module for compilation
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
try:
import onnx
if hasattr(onnx.checker, 'check_model'):
# try use onnx's own model checker before converting any model
try:
onnx.checker.check_model(model)
except onnx.onnx_cpp2py_export.checker.ValidationError as e:
import warnings
# the checker is a bit violent about errors, so simply print warnings here
warnings.warn(str(e))
except ImportError:
pass
g = GraphProto(shape, dtype)
graph = model.graph
try:
opset = model.opset_import[0].version if model.opset_import else 1
except AttributeError:
opset = 1
mod, params = g.from_onnx(graph, opset)
return mod, params
|
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/frontend/onnx.py
|
Python
|
apache-2.0
| 43,835
|
from django.shortcuts import render
from django.contrib.auth.models import User
from .forms import RegistrationForm
def regform(request):
# If the request method is POST, it means that the form has been submitted
# and we need to validate it.
if request.method == 'POST':
# Create a RegistrationForm instance with the submitted data
form = RegistrationForm(request.POST)
# is_valid validates a form and returns True if it is valid and
# False if it is invalid.
if form.is_valid():
# ensure that username is unique
try:
if User.objects.get(username=form.cleaned_data['username']):
message = "Username already exists !!!"
return render(request, "login.html", {"message": message, "register": "register", "error": 1} )
except:
pass
# registering new username
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email']
)
message = "Registration successful, try logging now..."
return render(request, "login.html", {"message": message} )
else:
# sending back error codes for "Invalid form"
message = "Invalid entires, try again !!!"
return render(request, "login.html", {"message": message, "register": "register", "error": 1} )
# This means that the request is a GET request. So we need to
# create an instance of the RegistrationForm class and render it in
# the template
else:
form = RegistrationForm()
# Render the registration form template with a RegistrationForm instance. If the
# form was submitted and the data found to be invalid, the template will
# be rendered with the entered data and error messages. Otherwise an empty
# form will be rendered. Check the comments in the registration_form.html template
# to understand how this is done.
# NOTE
# pass register value to template so that template can show register form
return render(request, "login.html", { "form" : form, "register": "register" })
|
SherSingh07/favorite_places
|
reg/views.py
|
Python
|
gpl-3.0
| 2,333
|
__author__ = 'horacioibrahim'
# python-iugu package modules
import base, config, errors
class IuguPlan(object):
"""
This class allows handling plans. Basically contains a CRUD
:attribute data: is a descriptor and their setters carries the rules
=> http://iugu.com/referencias/api#criar-um-plano
"""
__conn = base.IuguRequests()
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.name = kwargs.get("name")
self.identifier = kwargs.get("identifier")
self.interval = kwargs.get("interval")
self.interval_type = kwargs.get("interval_type")
self.created_at = kwargs.get("created_at")
self.updated_at = kwargs.get("updated_at")
self.currency = kwargs.get("currency") # API move it to prices scope
self.value_cents = kwargs.get("value_cents") # API move it to prices scope
self._data = None
self._prices = kwargs.get("prices")
self.prices = []
self._features = kwargs.get("features")
self.features = []
if isinstance(self._prices, list):
for price in self._prices:
obj_price = Price(**price)
self.prices.append(obj_price)
if isinstance(self._features, list):
for feature in self._features:
obj_feature = Feature(**feature)
self.features.append(obj_feature)
def is_valid(self):
"""Checks required fields to send to API.
IMPORTANT: Only to use before send request for API. The fields currency
and value_cents will saved in prices scope. Because not to use validate
with returned data by API.
"""
if self.name and self.identifier and self.interval and \
self.interval_type and self.currency and self.value_cents:
return True
else:
return False
@property
def data(self):
return self._data
@data.setter
def data(self, kwargs):
"""Defines data and validates required fields to send to API.
Returns data as list for urlencoded.
"""
data = []
# required fields
self.name = kwargs.get("name")
self.identifier = kwargs.get("identifier")
self.interval = kwargs.get("interval")
self.interval_type = kwargs.get("interval_type")
self.currency = kwargs.get("currency")
self.value_cents = kwargs.get("value_cents")
# optional fields
self.prices = kwargs.get("prices")
self.features = kwargs.get("features")
# required fields. if not passed the API return an exception
if self.name:
data.append(("name", self.name))
if self.identifier:
data.append(("identifier", self.identifier))
if self.interval:
data.append(("interval", self.interval))
if self.interval_type:
data.append(("interval_type", self.interval_type))
if self.currency:
if self.currency == "BRL":
data.append(("currency", self.currency))
else:
raise errors.IuguPlansException(value="Only BRL supported")
if self.value_cents:
data.append(("value_cents", self.value_cents))
# optional fields
if self.prices:
if isinstance(self.prices, list):
# each prices items must be instance's Price class
for price in self.prices:
data.extend(price.to_data())
else:
raise errors.IuguPlansException(value="The fields prices must "\
"be a list of obj Price")
if self.features:
if isinstance(self.features, list):
for feature in self.features:
data.extend(feature.to_data())
else:
raise errors.IuguPlansException(value="The fields features " \
"must be a list of obj Feature")
self._data = data
@data.deleter
def data(self):
del self._data
def create(self, name=None, identifier=None, interval=None,
interval_type=None, currency=None, value_cents=None,
features=None, prices=None):
"""
Creates a new plans in API and returns an IuguPlan's instance. The
fields required are name, identifier, interval, interval_type and
values_cents.
:param name: name of a plan
:param identifier: unique name identifier in API plan context
:param interval: an integer that define duration (e.g 12 to one year)
:param interval_type: a string with "weeks" or "months"
:param currency: only support BRL. If different raise exception
:param value_cents: an integer with price in cents (e.g 1000 > 10.00)
:param prices: a list of prices. The definition in API is obscure
:param features: details with features that must be a list with
instance of Features
"""
urn = "/v1/plans"
if not name:
if self.name:
name = self.name
else:
raise errors.IuguPlansException(value="Name is required")
if not identifier:
if self.identifier:
identifier = self.identifier
else:
raise errors.IuguPlansException(value="identifier is required")
if not interval:
if self.interval:
interval = self.interval
else:
raise errors.IuguPlansException(value="interval is required")
if not interval_type:
if self.interval_type:
interval_type = self.interval_type
else:
raise errors.IuguPlansException(value="interval_type is required")
if not features:
if self.features:
features = self.features
if not prices:
if self.prices:
prices = self.prices
if not value_cents:
if self.value_cents:
value_cents = self.value_cents
else:
raise errors.IuguPlansException(value="value_cents is required")
if not currency:
if self.currency:
currency = self.currency
kwargs_local = locals().copy()
kwargs_local.pop('self') # prevent error of multiple value for args
self.data = kwargs_local
response = self.__conn.post(urn, self.data)
return IuguPlan(**response)
def set(self, plan_id, name=None, identifier=None, interval=None,
interval_type=None, currency=None, value_cents=None,
features=None, prices=None):
"""
Edits/changes existent plan and returns IuguPlan's instance
:param plan_id: ID number of a existent plan
"""
urn = "/v1/plans/{plan_id}".format(plan_id=plan_id)
kwargs_local = locals().copy()
kwargs_local.pop('self')
self.data = kwargs_local
response = self.__conn.put(urn, self.data)
return IuguPlan(**response)
def save(self):
"""Saves an instance of IuguPlan and return own class instance
modified"""
urn = "/v1/plans/{plan_id}".format(plan_id=self.id)
self.data = self.__dict__
response = self.__conn.put(urn, self.data)
return IuguPlan(**response)
@classmethod
def get(self, plan_id):
"""Gets one plan based in ID and returns an instance"""
data = []
urn = "/v1/plans/{plan_id}".format(plan_id=plan_id)
response = self.__conn.get(urn, data)
return IuguPlan(**response)
@classmethod
def get_by_identifier(self, identifier):
"""Gets one plan based in identifier and returns an instance
:param identifier: it's an unique identifier plan in API
"""
data = []
urn = "/v1/plans/identifier/{identifier}".format(identifier=identifier)
response = self.__conn.get(urn, data)
return IuguPlan(**response)
@classmethod
def getitems(self, limit=None, skip=None, query=None, updated_since=None,
sort=None):
"""
Gets plans by API default limited 100.
:param limit: limits the number of plans returned by API (default
and immutable of API is 100)
:param skip: skips a numbers of plans where more recent insert
ordering. Useful to pagination.
:param query: filters based in value (case insensitive)
:param sort: sorts based in field. Use minus signal to determine the
direction DESC or ASC (e.g sort="-email"). IMPORTANT: not work by API
:return: list of IuguPlan's instances
"""
data = []
urn = "/v1/plans/"
# Set options
if limit:
data.append(("limit", limit))
if skip:
data.append(("start", skip))
if updated_since:
data.append(("updated_since", updated_since))
if query:
data.append(("query", query))
# TODO: sort not work fine. Waiting support of API providers
if sort:
assert sort is not str, "sort must be string as -name or name"
if sort.startswith("-"):
sort = sort[1:]
key = "sortBy[{field}]".format(field=sort)
data.append((key, "desc"))
else:
key = "sortBy[{field}]".format(field=sort)
data.append((key, "asc"))
plans = self.__conn.get(urn, data)
plans_objects = []
for plan_item in plans["items"]:
obj_plan = IuguPlan(**plan_item)
plans_objects.append(obj_plan)
return plans_objects
def remove(self, plan_id=None):
"""
Removes an instance or passing a plan_id
"""
if plan_id:
to_remove = plan_id
else:
to_remove = self.id
if not to_remove:
raise errors.IuguPlansException(value="Instance or plan id is required")
urn = "/v1/plans/{plan_id}".format(plan_id=to_remove)
response = self.__conn.delete(urn, [])
# check if result can to generate instance of IuguPlan
obj = IuguPlan(**response)
if obj:
for k, v in self.__dict__.items():
self.__dict__[k] = None
class Price(object):
"""
This class is useful for handling field prices of API. Prices in API is a
field of plans context it contains list of values with some fields
exclusively returned by API.
:method is_valid: check if required fields are correct
:method to_data: returns a list of tuples for urlencoded
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.plan_id = kwargs.get("plan_id")
self.created_at = kwargs.get("created_at")
self.updated_at = kwargs.get("updated_at")
self.value_cents = kwargs.get("value_cents")
self.currency = kwargs.get("currency")
def is_valid(self):
"""Required fields to send to API"""
if self.value_cents and self.currency:
return True
else:
return False
def to_data(self):
"""
Returns a list of tuples with ("prices[field]", value). Use it to
return a data that will extend the data params in request.
"""
if not self.is_valid():
blanks = [ k for k, v in self.__dict__.items() if v is None]
raise TypeError("All fields are required to %s. Blanks fields given %s" %
(self.__class__, blanks))
data = []
for k, v in self.__dict__.items():
if v is not None:
key = "prices[][{key_name}]".format(key_name=k)
data.append((key, v))
return data
class Feature(object):
"""
This class abstract features of Plan context.
:method is_valid: check if required fields are correct
:method to_data: returns a list of tuples for urlencoded
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.identifier = kwargs.get("identifier")
self.important = kwargs.get("important")
self.name = kwargs.get("name")
self.plan_id = kwargs.get("plan_id")
self.position = kwargs.get("position")
self.created_at = kwargs.get("created_at")
self.updated_at = kwargs.get("updated_at")
self.value = kwargs.get("value")
def is_valid(self):
"""
Required to send to API
"""
if self.name and self.identifier and self.value > 0:
return True
else:
return False
def to_data(self):
"""
Returns a list of tuples with ("features[field]", value). Use it to
return a data that will extend the data params in request.
"""
if not self.is_valid():
blanks = [ k for k, v in self.__dict__.items() if v is None ]
raise TypeError("All fields are required to class %s. Blanks fields given %s" %
(self.__class__, blanks))
data = []
for k, v in self.__dict__.items():
if v is not None:
key = "features[][{key_name}]".format(key_name=k)
data.append((key, v))
return data
|
horacioibrahim/iugu-python
|
lib/iugu/plans.py
|
Python
|
apache-2.0
| 13,466
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/Vectors/E9 case
# Create Vectors for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("Vectors/E9")
file = datadir + "test_hydro.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.VECTORS], picturedir, pictureext)
|
FedoraScientific/salome-paravis
|
test/VisuPrs/Vectors/E9.py
|
Python
|
lgpl-2.1
| 1,498
|
"""
OOB configuration.
This module should be included in (or replace) the
default module set in settings.OOB_PLUGIN_MODULES
All functions defined in this module are made available
to be called by the OOB handler.
See src/server/oob_msdp.py for more information.
function execution - the oob protocol can execute a function directly on
the server. The available functions must be defined
as global functions via settings.OOB_PLUGIN_MODULES.
repeat func execution - the oob protocol can request a given function be
executed repeatedly at a regular interval. This
uses an internal script pool.
tracking - the oob protocol can request Evennia to track changes to
fields on objects, as well as changes in Attributes. This is
done by dynamically adding tracker-objects on entities. The
behaviour of those objects can be customized via
settings.OOB_PLUGIN_MODULES.
oob functions have the following call signature:
function(caller, session, *args, **kwargs)
oob trackers should inherit from the OOBTracker class in src/server.oob_msdp.py
and implement a minimum of the same functionality.
a global function oob_error will be used as optional error management.
"""
# import the contents of the default msdp module
from src.server.oob_cmds import *
|
Pathel/deuterium
|
game/gamesrc/conf/examples/oobfuncs.py
|
Python
|
bsd-3-clause
| 1,435
|
import rospy
from std_msgs.msg import Float32
from Interpreter import Interpreter
import numpy as np
class Interpreter_position_speed(Interpreter):
def __init__(self, interpreter_info):
super(Interpreter_position_speed, self).__init__(interpreter_info)
self.cmd.val = 0.0 # position
self.speed = 0.0
self.pub = rospy.Publisher(self._config['topic'], Float32, queue_size=1)
print('created publisher on', self._config['topic'])
# Override
def process_input(self, val, cmd_type):
if cmd_type == self.SLIDER:
self.speed = val
if cmd_type == self.BUTTON:
# BACK keyword
if val == self.BACK and self.speed != 0.0:
self.speed = max(min(-self.speed / abs(self.speed), 1.0), -1.0)
# STOP keyword
elif val == self.STOP:
self.speed = 0.0
# Cas classique
else:
self.speed += val * self._config['key_precision']
# Saturation
self.speed = np.clip(self.speed, -1.0, 1.0)
def send_msg(self):
cmd_val_tmp = self.cmd.val + self.speed * self._config['gain_speed']
if cmd_val_tmp != self.cmd.val:
self.cmd.val = cmd_val_tmp
msg = Float32()
min_cmd = float(self._config['min'])
max_cmd = float(self._config['max'])
range_cmd = (max_cmd - min_cmd)/2.0 # car de -1 a 1 ca fait range = 2
offset = range_cmd + min_cmd
msg.data = np.clip(self.cmd.val * range_cmd + offset, min_cmd, max_cmd)
self.pub.publish(msg)
|
simchanu29/ros_teleop
|
src/message_handler/Interpreter_position_speed.py
|
Python
|
apache-2.0
| 1,631
|
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2015 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
import wx._core
class Page(wx.Panel):
def __init__(self, parent, title="Title", subTitle="", left="Left", right="Right",
buttonLeftCallback=None, buttonRightCallback=None,
panelOrientation=wx.VERTICAL, viewProgress=False):
wx.Panel.__init__(self, parent)
self.buttonLeftCallback = buttonLeftCallback
self.buttonRightCallback = buttonRightCallback
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.panelBox = wx.BoxSizer(panelOrientation)
self._panel = wx.Panel(self)
self._downPanel = wx.Panel(self)
titleText = wx.StaticText(self, label=title)
titleText.SetFont((wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.FONTWEIGHT_BOLD)))
if subTitle != "":
self.subTitleText = wx.StaticText(self, label=subTitle)
self.gauge = wx.Gauge(self, range=100, size=(-1, 30))
self._leftButton = wx.Button(self._downPanel, -1, left)
self._rightButton = wx.Button(self._downPanel, -1, right)
# Layout
vbox.Add(titleText, 0, wx.ALL | wx.EXPAND, 10)
if subTitle != "":
vbox.Add(self.subTitleText, 0, wx.ALL | wx.EXPAND, 10)
vbox.Add(self._panel, 1, wx.ALL | wx.EXPAND, 8)
vbox.Add(self.gauge, 0, wx.ALL | wx.EXPAND, 8)
self._panel.SetSizer(self.panelBox)
vbox.Add(self._downPanel, 0, wx.ALL | wx.EXPAND, 1)
hbox.Add(self._leftButton, 0, wx.ALL | wx.EXPAND |
wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT, 7)
hbox.Add((0, 0), 1, wx.EXPAND)
hbox.Add(self._rightButton, 0, wx.ALL | wx.EXPAND |
wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 7)
self._downPanel.SetSizer(hbox)
if not viewProgress:
self.gauge.Hide()
self.SetSizer(vbox)
# Events
self._leftButton.Bind(wx.EVT_BUTTON, self._onLeftButtonPressed)
self._rightButton.Bind(wx.EVT_BUTTON, self._onRightButtonPressed)
self.Layout()
def addToPanel(self, _object, _size):
if _object is not None:
self.panelBox.Add(_object, _size, wx.ALL | wx.EXPAND, 3)
def _onLeftButtonPressed(self, event):
if self.buttonLeftCallback is not None:
self.buttonLeftCallback()
def _onRightButtonPressed(self, event):
if self.buttonRightCallback is not None:
self.buttonRightCallback()
|
3cky/horus
|
src/horus/gui/workbench/calibration/page.py
|
Python
|
gpl-2.0
| 2,722
|
import rebound
import unittest
import warnings
class TestPlotting(unittest.TestCase):
def setUp(self):
self.sim = rebound.Simulation()
self.sim.add(m=1)
self.sim.add(m=1e-3,a=1,e=0.1,omega=0.1,M=0.1,inc=0.1,Omega=0.1)
self.sim.add(m=1e-3,a=-2,e=1.1,omega=0.1,M=0.1,inc=0.1,Omega=0.1)
def tearDown(self):
self.sim = None
def test_orbitplot(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import matplotlib; matplotlib.use("pdf")
import numpy as np
t = np.array(1.)
plot = rebound.OrbitPlot(self.sim,periastron=True)
self.assertIsInstance(plot,matplotlib.figure.Figure)
plot = rebound.OrbitPlot(self.sim,periastron=True,color=True,trails=True,unitlabel="AU")
self.assertIsInstance(plot,matplotlib.figure.Figure)
def test_orbitplot_slices(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import matplotlib; matplotlib.use("pdf")
import numpy as np
t = np.array(1.)
plot = rebound.OrbitPlot(self.sim,periastron=True,slices=True)
self.assertIsInstance(plot,matplotlib.figure.Figure)
plot = rebound.OrbitPlot(self.sim,periastron=True,color=True,trails=True,unitlabel="AU",slices=True,limz=1.)
self.assertIsInstance(plot,matplotlib.figure.Figure)
if __name__ == "__main__":
unittest.main()
|
dtamayo/rebound
|
rebound/tests/test_plotting.py
|
Python
|
gpl-3.0
| 1,551
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "runipy-"
cfg.versionfile_source = "runipy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
nanshe-org/runipy
|
runipy/_version.py
|
Python
|
bsd-2-clause
| 15,758
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtWidgets import (
QWidget,
QVBoxLayout,
QHBoxLayout,
QSizePolicy,
QSpacerItem,
QFileDialog,
QGroupBox,
QLabel,
QLineEdit,
QCompleter,
QRadioButton,
QButtonGroup,
QPushButton,
QDirModel
)
from PyQt5.QtCore import (
QDir,
pyqtSlot
)
from ninja_ide.gui.ide import IDE
from ninja_ide.gui.dialogs.preferences import preferences
from ninja_ide import translations
from ninja_ide.core import settings
from ninja_ide.tools import ui_tools
class GeneralExecution(QWidget):
"""General Execution widget class"""
def __init__(self, parent):
super().__init__()
self._preferences = parent
box = QVBoxLayout(self)
group_python_path = QGroupBox(translations.TR_WORKSPACE_PROJECTS)
grid = QVBoxLayout(group_python_path)
# Python Path
python_path_bgroup = QButtonGroup(group_python_path)
box_path = QVBoxLayout()
# Line python path
self._txt_python_path = QLineEdit()
act = self._txt_python_path.addAction(
ui_tools.get_icon('open-project'), QLineEdit.TrailingPosition)
act.triggered.connect(self._load_python_path)
import sys
self._txt_python_path.setText(sys.executable)
self._txt_python_path.textChanged.connect(self._python_exec_changed)
box_path.addWidget(QLabel("Select the Python interpreter"))
# Default
default_interpreter_radio = QRadioButton("Default")
default_interpreter_radio.toggled.connect(
self._txt_python_path.setDisabled)
python_path_bgroup.addButton(default_interpreter_radio)
# Custom
self._custom_interpreter_radio = QRadioButton(
"Use this Python interpreter:")
self._custom_interpreter_radio.toggled.connect(
self._txt_python_path.setEnabled)
python_path_bgroup.addButton(self._custom_interpreter_radio)
box_path.addWidget(default_interpreter_radio)
box_path.addWidget(self._custom_interpreter_radio)
box_path.addWidget(self._txt_python_path)
"""
completer = QCompleter(self)
dirs = QDirModel(self)
dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)
completer.setModel(dirs)
self._txt_python_path.setCompleter(completer)
box_path.addWidget(default_interpreter_radio)
box_path.addWidget(custom_interpreter_radio)
"""
# self._btn_python_path = QPushButton('o')
# box_path.addWidget(self._btn_python_path)
grid.addLayout(box_path)
box.addWidget(group_python_path)
box.addItem(QSpacerItem(0, 0,
QSizePolicy.Expanding, QSizePolicy.Expanding))
# Connections
# self._txt_python_path.buttonClicked.connect(self._load_python_path)
self._preferences.savePreferences.connect(self.save)
@pyqtSlot('QString')
def _python_exec_changed(self, python_exec):
print(python_exec)
@pyqtSlot()
def _load_python_path(self):
"""Ask the user for a Python Path"""
path = QFileDialog.getOpenFileName(
self, translations.TR_SELECT_SELECT_PYTHON_EXEC)
if path:
self._txt_python_path.setText(path)
def save(self):
"""Save all Execution Preferences"""
qsettings = IDE.ninja_settings()
qsettings.beginGroup("preferences")
qsettings.beginGroup("execution")
qsettings.setValue("python_path", self._txt_python_path.text())
settings.PYTHON_EXEC = self._txt_python_path.text()
qsettings.endGroup()
qsettings.endGroup()
print(settings.PYTHON_EXEC)
preferences.Preferences.register_configuration(
'GENERAL',
GeneralExecution,
translations.TR_PREFERENCES_EXECUTION,
weight=1,
subsection='EXECUTION'
)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QGroupBox
from PyQt4.QtGui import QCheckBox
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QFileDialog
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QCompleter
from PyQt4.QtGui import QDirModel
from PyQt4.QtCore import SIGNAL
from PyQt4.QtCore import QDir
from ninja_ide import translations
from ninja_ide.core import settings
from ninja_ide.gui.ide import IDE
from ninja_ide.gui.dialogs.preferences import preferences
class GeneralExecution(QWidget):
# General Execution widget class
def __init__(self, parent):
super(GeneralExecution, self).__init__()
self._preferences = parent
vbox = QVBoxLayout(self)
groupExecution = QGroupBox(translations.TR_WORKSPACE_PROJECTS)
grid = QVBoxLayout(groupExecution)
#Python Path
hPath = QHBoxLayout()
self._txtPythonPath = QLineEdit()
self._btnPythonPath = QPushButton(QIcon(':img/open'), '')
self.completer, self.dirs = QCompleter(self), QDirModel(self)
self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)
self.completer.setModel(self.dirs)
self._txtPythonPath.setCompleter(self.completer)
hPath.addWidget(QLabel(translations.TR_SELECT_PYTHON_EXEC))
hPath.addWidget(self._txtPythonPath)
hPath.addWidget(self._btnPythonPath)
grid.addLayout(hPath)
#Python Miscellaneous Execution options
self.check_B = QCheckBox(translations.TR_SELECT_EXEC_OPTION_B)
self.check_d = QCheckBox(translations.TR_SELECT_EXEC_OPTION_D)
self.check_E = QCheckBox(translations.TR_SELECT_EXEC_OPTION_E)
self.check_O = QCheckBox(translations.TR_SELECT_EXEC_OPTION_O)
self.check_OO = QCheckBox(translations.TR_SELECT_EXEC_OPTION_OO)
self.check_Q = QCheckBox(translations.TR_SELECT_EXEC_OPTION_Q)
self.comboDivision = QComboBox()
self.comboDivision.addItems(['old', 'new', 'warn', 'warnall'])
self.check_s = QCheckBox(translations.TR_SELECT_EXEC_OPTION_s)
self.check_S = QCheckBox(translations.TR_SELECT_EXEC_OPTION_S)
self.check_t = QCheckBox(translations.TR_SELECT_EXEC_OPTION_T)
self.check_tt = QCheckBox(translations.TR_SELECT_EXEC_OPTION_TT)
self.check_v = QCheckBox(translations.TR_SELECT_EXEC_OPTION_V)
self.check_W = QCheckBox(translations.TR_SELECT_EXEC_OPTION_W)
self.comboWarning = QComboBox()
self.comboWarning.addItems(
['default', 'ignore', 'all', 'module', 'once', 'error'])
self.check_x = QCheckBox(translations.TR_SELECT_EXEC_OPTION_X)
self.check_3 = QCheckBox(translations.TR_SELECT_EXEC_OPTION_3)
grid.addWidget(self.check_B)
grid.addWidget(self.check_d)
grid.addWidget(self.check_E)
grid.addWidget(self.check_O)
grid.addWidget(self.check_OO)
hDiv = QHBoxLayout()
hDiv.addWidget(self.check_Q)
hDiv.addWidget(self.comboDivision)
grid.addLayout(hDiv)
grid.addWidget(self.check_s)
grid.addWidget(self.check_S)
grid.addWidget(self.check_t)
grid.addWidget(self.check_tt)
grid.addWidget(self.check_v)
hWarn = QHBoxLayout()
hWarn.addWidget(self.check_W)
hWarn.addWidget(self.comboWarning)
grid.addLayout(hWarn)
grid.addWidget(self.check_x)
grid.addWidget(self.check_3)
#Settings
self._txtPythonPath.setText(settings.PYTHON_EXEC)
options = settings.EXECUTION_OPTIONS.split()
if '-B' in options:
self.check_B.setChecked(True)
if '-d' in options:
self.check_d.setChecked(True)
if '-E' in options:
self.check_E.setChecked(True)
if '-O' in options:
self.check_O.setChecked(True)
if '-OO' in options:
self.check_OO.setChecked(True)
if settings.EXECUTION_OPTIONS.find('-Q') > -1:
self.check_Q.setChecked(True)
index = settings.EXECUTION_OPTIONS.find('-Q')
opt = settings.EXECUTION_OPTIONS[index + 2:].split(' ', 1)[0]
index = self.comboDivision.findText(opt)
self.comboDivision.setCurrentIndex(index)
if '-s' in options:
self.check_s.setChecked(True)
if '-S' in options:
self.check_S.setChecked(True)
if '-t' in options:
self.check_t.setChecked(True)
if '-tt' in options:
self.check_tt.setChecked(True)
if '-v' in options:
self.check_v.setChecked(True)
if settings.EXECUTION_OPTIONS.find('-W') > -1:
self.check_W.setChecked(True)
index = settings.EXECUTION_OPTIONS.find('-W')
opt = settings.EXECUTION_OPTIONS[index + 2:].split(' ', 1)[0]
index = self.comboWarning.findText(opt)
self.comboWarning.setCurrentIndex(index)
if '-x' in options:
self.check_x.setChecked(True)
if '-3' in options:
self.check_3.setChecked(True)
vbox.addWidget(groupExecution)
#Signals
self.connect(self._btnPythonPath,
SIGNAL("clicked()"), self._load_python_path)
self.connect(self._preferences, SIGNAL("savePreferences()"), self.save)
def _load_python_path(self):
# Ask the user for a Python Path
path = QFileDialog.getOpenFileName(self,
translations.TR_SELECT_SELECT_PYTHON_EXEC)
if path:
self._txtPythonPath.setText(path)
def save(self):
# Save all the Execution Preferences
qsettings = IDE.ninja_settings()
qsettings.beginGroup('preferences')
qsettings.beginGroup('execution')
qsettings.setValue('pythonPath', self._txtPythonPath.text())
settings.PYTHON_PATH = self._txtPythonPath.text()
options = ''
if self.check_B.isChecked():
options += ' -B'
if self.check_d.isChecked():
options += ' -d'
if self.check_E.isChecked():
options += ' -E'
if self.check_O.isChecked():
options += ' -O'
if self.check_OO.isChecked():
options += ' -OO'
if self.check_Q.isChecked():
options += ' -Q' + self.comboDivision.currentText()
if self.check_s.isChecked():
options += ' -s'
if self.check_S.isChecked():
options += ' -S'
if self.check_t.isChecked():
options += ' -t'
if self.check_tt.isChecked():
options += ' -tt'
if self.check_v.isChecked():
options += ' -v'
if self.check_W.isChecked():
options += ' -W' + self.comboWarning.currentText()
if self.check_x.isChecked():
options += ' -x'
if self.check_3.isChecked():
options += ' -3'
settings.EXECUTION_OPTIONS = options
qsettings.setValue('executionOptions', options)
qsettings.endGroup()
qsettings.endGroup()
preferences.Preferences.register_configuration('GENERAL', GeneralExecution,
translations.TR_PREFERENCES_EXECUTION,
weight=1, subsection='EXECUTION')
"""
|
centaurialpha/ninja-ide
|
ninja_ide/gui/dialogs/preferences/preferences_execution.py
|
Python
|
gpl-3.0
| 12,033
|
# -*- coding: utf-8 -*-
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# TWISTED LIBRARY
# See LICENSE for details.
#
# Copyright (c) 2001-2006
# Allen Short
# Andrew Bennetts
# Apple Computer, Inc.
# Benjamin Bruheim
# Bob Ippolito
# Canonical Limited
# Christopher Armstrong
# David Reid
# Donovan Preston
# Eric Mangold
# Itamar Shtull-Trauring
# James Knight
# Jason A. Mobarak
# Jonathan Lange
# Jonathan D. Simms
# Jp Calderone
# Jürgen Hermann
# Kevin Turner
# Mary Gardiner
# Matthew Lefkowitz
# Massachusetts Institute of Technology
# Moshe Zadka
# Paul Swartz
# Pavel Pergamenshchik
# Ralph Meijer
# Sean Riley
# Travis B. Hartwell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
=======================
OSCAR Utility functions
=======================
This file includes functions for dealing with OSCAR datatypes and passwords.
This is the second of two utility modules. OscarUtil is the other. Most of
the AIM components require both OscarUtil and OscarUtil2. All the code in this
module was originally written for Twisted or is derived from Twisted code.
Original Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>} for Twisted
Modified 12 Jul 2007 by Jinna Lei for Kamaelia.
"""
from __future__ import nested_scopes
import struct
import md5
from Kamaelia.Support.OscarUtil import *
def SNAC(fam,sub,data,id=1, flags=[0,0]):
"""construct a SNAC from the given data"""
#the reqid mostly doesn't matter, unless this is a query-response situation
return Double(fam) + Double(sub) + Single(flags[0]) + Single(flags[1]) + Quad(id) + data
def readSNAC(data):
"""puts a SNAC off the wire into a slightly more useable form"""
header="!HHBBL"
try:
head=[list(struct.unpack(header,data[:10]))]
return head+[data[10:]]
except struct.error:
return error, data
def TLV(type,value):
"""constructs a TLV based on given data"""
header="!HH"
head=struct.pack(header,type,len(value))
return head+str(value)
def readTLVs(data,count=None):
"""
takes a string of TLVs and returns a dictionary {TLV type: TLV value}
Optional keywords:
- count -- how many TLVs we want to unpack at a time. If count is less than
the number of TLVs in our string, then we return the dictionary
plus the remaining TLV string.
"""
header="!HH"
dict={}
while data and len(dict)!=count:
head=struct.unpack(header,data[:4])
dict[head[0]]=data[4:4+head[1]]
data=data[4+head[1]:]
if not count:
return dict
return dict,data
def encryptPasswordMD5(password,key):
"""returns a password hash"""
m=md5.new()
m.update(key)
m.update(md5.new(password).digest())
m.update("AOL Instant Messenger (SM)")
return m.digest()
def encryptPasswordICQ(password):
"""
encrypts passwords the old way, relatively insecure way. Not used very often.
"""
key=[0xF3,0x26,0x81,0xC4,0x39,0x86,0xDB,0x92,0x71,0xA3,0xB9,0xE6,0x53,0x7A,0x95,0x7C]
bytes=list(map(ord,password))
r=""
for i in range(len(bytes)):
r=r+chr(bytes[i]^key[i%len(key)])
return r
|
sparkslabs/kamaelia_
|
Code/Python/Kamaelia/Kamaelia/Support/OscarUtil2.py
|
Python
|
apache-2.0
| 4,178
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception handling statements: assert, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import tf_inspect
def assert_stmt(expression1, expression2):
"""Functional form of an assert statement.
This follows the semantics of the Python assert statement, however the
concrete implementations may deviate from it. See the respective
implementation for details.
In general, the assert statement should not be used for control flow.
Furthermore, it is encouraged that the assertion expressions should not have
side effects.
Args:
expression1: Any
expression2: Callable[[], Any], returns the expression to include in the
error message when expression1 evaluates to False. When expression1 is
True, the result of expression2 will not be evaluated, however,
expression2 itself may be evaluated in some implementations.
Returns:
Any, implementation-dependent.
Raises:
ValueError: if any arguments are illegal.
"""
if not callable(expression2):
raise ValueError('{} must be a callable'.format(expression2))
args, _, keywords, _ = tf_inspect.getargspec(expression2)
if args or keywords:
raise ValueError('{} may not have any arguments'.format(expression2))
if tensor_util.is_tf_type(expression1):
return _tf_assert_stmt(expression1, expression2)
else:
return _py_assert_stmt(expression1, expression2)
def _tf_assert_stmt(expression1, expression2):
"""Overload of assert_stmt that stages a TF Assert.
This implementation deviates from Python semantics as follows:
(1) the assertion is verified regardless of the state of __debug__
(2) on assertion failure, the graph execution will fail with
tensorflow.errors.ValueError, rather than AssertionError.
Args:
expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar
expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]]
Returns:
tensorflow.Operation
"""
expression2_tensors = expression2()
if not isinstance(expression2_tensors, list):
expression2_tensors = [expression2_tensors]
return control_flow_ops.Assert(expression1, expression2_tensors)
def _py_assert_stmt(expression1, expression2):
"""Overload of assert_stmt that executes a Python assert statement."""
assert expression1, expression2()
return None
|
frreiss/tensorflow-fred
|
tensorflow/python/autograph/operators/exceptions.py
|
Python
|
apache-2.0
| 3,224
|
#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: Core module - Modules
# © Copyright 2013-2015 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
__all__ = [
'NPModule',
'AddressType',
'PhoneType',
'ContactInfoType',
'UserState',
'User',
'Group',
'Privilege',
'Capability',
'UserCapability',
'GroupCapability',
'ACL',
'UserACL',
'GroupACL',
'UserGroup',
'SecurityPolicyOnExpire',
'SecurityPolicy',
'FileFolderAccessRule',
'FileFolder',
'File',
'FileChunk',
'Tag',
'LogType',
'LogAction',
'LogData',
'NPSession',
'PasswordHistory',
'GlobalSettingSection',
'UserSettingSection',
'GlobalSetting',
'UserSettingType',
'UserSetting',
'DataCache',
'DAVLock',
'Calendar',
'CalendarImport',
'Event',
'CommunicationType',
'UserCommunicationChannel',
'UserPhone',
'UserEmail',
'HWAddrHexIEEEFunction',
'HWAddrHexLinuxFunction',
'HWAddrHexWindowsFunction',
'HWAddrUnhexFunction',
'global_setting'
]
import io
import errno
import string
import random
import re
import hashlib
import datetime as dt
import urllib
import itertools
import base64
import icalendar
from collections import defaultdict
from sqlalchemy import (
BINARY,
Column,
FetchedValue,
ForeignKey,
Index,
LargeBinary,
PickleType,
Sequence,
TIMESTAMP,
Unicode,
UnicodeText,
event,
func,
text,
or_,
and_
)
from sqlalchemy.orm import (
backref,
deferred,
joinedload,
relationship,
validates
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.exc import NoResultFound
from netprofile import (
PY3,
inst_id
)
from netprofile.common import (
ipaddr,
cal
)
from netprofile.common.phps import HybridPickler
from netprofile.common.threadlocal import magic
from netprofile.common.cache import cache
from netprofile.db.connection import (
Base,
DBSession
)
from netprofile.db.fields import (
ASCIIFixedString,
ASCIIString,
DeclEnum,
ExactUnicode,
Int8,
IPv4Address,
IPv6Address,
LargeBLOB,
NPBoolean,
UInt8,
UInt16,
UInt32,
npbool
)
from netprofile.ext.wizards import (
ExtJSWizardField,
SimpleWizard,
Step,
Wizard
)
from netprofile.ext.columns import MarkupColumn
from netprofile.dav import (
IDAVFile,
IDAVCollection,
IDAVPrincipal,
DAVAllPropsSet,
DAVACEValue,
DAVACLValue,
DAVPrincipalValue,
DAVResourceTypeValue,
dprops
)
from netprofile.ext.filters import (
SelectFilter
)
from netprofile.db.ddl import (
Comment,
CurrentTimestampDefault,
SQLFunction,
SQLFunctionArgument,
Trigger
)
from pyramid.response import (
FileIter,
Response
)
from pyramid.threadlocal import get_current_request
from pyramid.i18n import (
TranslationStringFactory,
get_localizer
)
from pyramid.security import (
Allow, Deny,
Everyone, Authenticated,
DENY_ALL,
has_permission
)
from zope.interface import implementer
_ = TranslationStringFactory('netprofile_core')
_DEFAULT_DICT = 'netprofile_core:dicts/np_cmb_rus'
F_OWNER_READ = 0x0100
F_OWNER_WRITE = 0x0080
F_OWNER_EXEC = 0x0040
F_GROUP_READ = 0x0020
F_GROUP_WRITE = 0x0010
F_GROUP_EXEC = 0x0008
F_OTHER_READ = 0x0004
F_OTHER_WRITE = 0x0002
F_OTHER_EXEC = 0x0001
F_OWNER_ALL = 0x01c0
F_GROUP_ALL = 0x0038
F_OTHER_ALL = 0x0007
F_RIGHTS_ALL = 0x01ff
F_DEFAULT_FILES = F_OWNER_READ | F_OWNER_WRITE | F_GROUP_READ | F_GROUP_WRITE | F_OTHER_READ
F_DEFAULT_DIRS = F_OWNER_ALL | F_GROUP_ALL | F_OTHER_READ | F_OTHER_EXEC
_VFS_READ = 0x01
_VFS_WRITE = 0x02
_VFS_APPEND = 0x04
_VFS_TRUNCATE = 0x10
def _gen_xcap(cls, k, v):
"""
Creator for privilege-related attribute-mapped collections.
"""
priv = DBSession.query(Privilege).filter(Privilege.code == k).one()
if priv is None:
raise KeyError('Unknown privilege %s' % k)
return cls(privilege=priv, value=v)
def _gen_xacl(cls, k, v):
"""
Creator for ACL-related attribute-mapped collections.
"""
priv = DBSession.query(Privilege).filter(Privilege.code == k[0]).one()
if priv is None:
raise KeyError('Unknown privilege %s' % k[0])
return cls(privilege=priv, resource=k[1], value=v)
def _gen_user_setting(k, v):
"""
Creator for user-setting-related attribute-mapped collections.
"""
ust = DBSession.query(UserSettingType).filter(UserSettingType.name == k).one()
return UserSetting(type=ust, value=ust.param_to_db(v))
class NPModule(Base):
"""
NetProfile module registry.
"""
__tablename__ = 'np_modules'
__table_args__ = (
Comment('NetProfile modules'),
Index('np_modules_u_name', 'name', unique=True),
Index('np_modules_i_enabled', 'enabled'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_name' : _('Modules'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('npmodid', 'name', 'curversion', 'enabled'),
'grid_hidden' : ('npmodid',),
'easy_search' : ('name',)
}
}
)
id = Column(
'npmodid',
UInt32(),
Sequence('np_modules_npmodid_seq'),
Comment('NetProfile module ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
ASCIIString(255),
Comment('NetProfile module name'),
nullable=False,
default=None,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
current_version = Column(
'curversion',
ASCIIString(32),
Comment('NetProfile module current version'),
nullable=False,
default='0.0.1',
server_default='0.0.1',
info={
'header_string' : _('Version'),
'column_flex' : 1
}
)
enabled = Column(
NPBoolean(),
Comment('Is module enabled?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Enabled')
}
)
privileges = relationship(
'Privilege',
backref=backref('module', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
global_sections = relationship(
'GlobalSettingSection',
backref=backref('module', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
user_sections = relationship(
'UserSettingSection',
backref=backref('module', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
global_settings = relationship(
'GlobalSetting',
backref=backref('module', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
user_setting_types = relationship(
'UserSettingType',
backref=backref('module', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def __init__(self, id=id, name=None, current_version='1.0.0', enabled=False):
self.id = id
self.name = name
self.current_version = current_version
self.enabled = enabled
def __repr__(self):
return 'NPModule(%s,%s,%s,%s)' % (
repr(self.id),
repr(self.name),
repr(self.current_version),
repr(self.enabled)
)
def __str__(self):
return '%s' % str(self.name)
def get_tree_node(self, req, mod):
loc = get_localizer(req)
return {
'id' : self.name,
'text' : loc.translate(mod.name),
'leaf' : False,
'expanded' : True,
'iconCls' : 'ico-module'
}
class AddressType(DeclEnum):
"""
Address type ENUM.
"""
home = 'home', _('Home Address'), 10
work = 'work', _('Work Address'), 20
postal = 'post', _('Postal Address'), 30
parcel = 'parc', _('Parcel Address'), 40
billing = 'bill', _('Billing Address'), 50
@classmethod
def ldap_address_attrs(cls, data):
if data == AddressType.home:
return ('homePostalAddress',)
if data == AddressType.work:
return ('street',)
if data == AddressType.postal:
return ('postalAddress',)
return ()
class PhoneType(DeclEnum):
"""
Phone type ENUM.
"""
home = 'home', _('Home Phone'), 10
cell = 'cell', _('Cell Phone'), 20
work = 'work', _('Work Phone'), 30
pager = 'pager', _('Pager Number'), 40
fax = 'fax', _('Fax Number'), 50
rec = 'rec', _('Receptionist'), 60
@classmethod
def icon(cls, data):
img = 'phone_small'
if data == PhoneType.cell:
img = 'mobile_small'
return img
@classmethod
def prefix(cls, data):
if data == PhoneType.home:
return _('home')
if data == PhoneType.cell:
return _('cell')
if data == PhoneType.work:
return _('work')
if data == PhoneType.pager:
return _('pg.')
if data == PhoneType.fax:
return _('fax')
if data == PhoneType.rec:
return _('rec.')
return _('tel.')
@classmethod
def ldap_attrs(cls, data):
if data == PhoneType.home:
return ('homePhone',)
if data == PhoneType.cell:
return ('mobile',)
if data == PhoneType.work:
return ('telephoneNumber',)
if data == PhoneType.pager:
return ('pager',)
if data == PhoneType.fax:
return ('facsimileTelephoneNumber',)
if data == PhoneType.rec:
return ('companyPhone',)
return ('otherPhone',)
class ContactInfoType(DeclEnum):
"""
Scope of contact information ENUM.
"""
home = 'home', _('home'), 10
work = 'work', _('work'), 20
class UserState(DeclEnum):
"""
Current user state ENUM.
"""
pending = 'P', _('Pending'), 10
active = 'A', _('Active'), 20
deleted = 'D', _('Deleted'), 30
def _validate_user_password(model, colname, values, req):
if colname not in values:
return
try:
uid = int(values['uid'])
except (KeyError, TypeError, ValueError):
return
sess = DBSession()
user = sess.query(User).get(uid)
if user is None:
return
newpwd = values[colname]
if newpwd is None:
return
secpol = user.effective_policy
if secpol is None:
return
ts = dt.datetime.now()
checkpw = secpol.check_new_password(req, user, newpwd, ts)
if checkpw is True:
return
loc = get_localizer(req)
return secpol_errors(checkpw, loc)
def secpol_errors(checkpw, loc):
errors = []
if 'pw_length_min' in checkpw:
errors.append(loc.translate(_('Password is too short.')))
if 'pw_length_max' in checkpw:
errors.append(loc.translate(_('Password is too long.')))
if 'pw_ctype_min' in checkpw:
errors.append(loc.translate(_('Password has not enough character types.')))
if 'pw_ctype_max' in checkpw:
errors.append(loc.translate(_('Password has too many character types.')))
if 'pw_dict_check' in checkpw:
errors.append(loc.translate(_('Password was found in a dictionary.')))
if 'pw_hist_check' in checkpw:
errors.append(loc.translate(_('You used this password not too long ago.')))
if 'pw_age_min' in checkpw:
errors.append(loc.translate(_('You\'ve just changed your password.')))
return errors
@implementer(IDAVFile, IDAVPrincipal)
class User(Base):
"""
NetProfile operator user.
"""
__tablename__ = 'users'
__table_args__ = (
Comment('Users'),
Index('users_u_login', 'login', unique=True),
Index('users_i_gid', 'gid'),
Index('users_i_secpolid', 'secpolid'),
Index('users_i_state', 'state'),
Index('users_i_enabled', 'enabled'),
Index('users_i_managerid', 'managerid'),
Index('users_i_phfileid', 'phfileid'),
Trigger('after', 'insert', 't_users_ai'),
Trigger('after', 'update', 't_users_au'),
Trigger('after', 'delete', 't_users_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_USERS',
'cap_read' : 'USERS_LIST',
'cap_create' : 'USERS_CREATE',
'cap_edit' : 'USERS_EDIT',
'cap_delete' : 'USERS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Users'),
'default_sort' : ({ 'property': 'login' ,'direction': 'ASC' },),
'grid_view' : ('uid', 'login', 'name_family', 'name_given', 'name_middle', 'manager', 'group', 'enabled', 'state', 'security_policy'),
'grid_hidden' : ('uid', 'name_middle', 'manager', 'security_policy'),
'form_view' : (
'login', 'name_family', 'name_given', 'name_middle',
'org', 'orgunit', 'title',
'group', 'secondary_groups', 'enabled',
'pass', 'security_policy', 'state',
'manager', 'photo', 'descr'
),
'easy_search' : ('login', 'name_family'),
'create_wizard' :
Wizard(
Step('login', 'pass', 'group', title=_('New user')),
Step('name_family', 'name_given', 'name_middle', 'enabled', 'state', title=_('New user details')),
title=_('Add new user')
),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'ldap_classes' : ('npUser', 'posixAccount', 'shadowAccount'),
'ldap_rdn' : 'login'
}
}
)
id = Column(
'uid',
UInt32(),
Sequence('users_uid_seq'),
Comment('User ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID'),
'ldap_attr' : 'uidNumber'
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='users_fk_gid', onupdate='CASCADE'),
Comment('Group ID'),
nullable=False,
info={
'header_string' : _('Group'),
'filter_type' : 'list',
'ldap_attr' : 'gidNumber',
'column_flex' : 2
}
)
security_policy_id = Column(
'secpolid',
UInt32(),
ForeignKey('secpol_def.secpolid', name='users_fk_secpolid', onupdate='CASCADE'),
Comment('Security policy ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Security Policy'),
'column_flex' : 2
}
)
state = Column(
UserState.db_type(),
Comment('User state'),
nullable=False,
default=UserState.pending,
server_default=UserState.pending,
info={
'header_string' : _('State'),
'ldap_attr' : 'npAccountStatus',
'ldap_value' : 'ldap_status'
}
)
login = Column(
ExactUnicode(48),
Comment('Login string'),
nullable=False,
info={
'header_string' : _('Username'),
'writer' : 'change_login',
'pass_request' : True,
'ldap_attr' : ('uid', 'xmozillanickname', 'gecos', 'displayName'),
'column_flex' : 2
}
)
password = Column(
'pass',
ASCIIString(255),
Comment('Some form of password'),
nullable=False,
info={
'header_string' : _('Password'),
'secret_value' : True,
'editor_xtype' : 'passwordfield',
'writer' : 'change_password',
'validator' : _validate_user_password,
'pass_request' : True,
'ldap_attr' : 'userPassword', # FIXME!
'ldap_value' : 'ldap_password'
}
)
a1_hash = Column(
'a1hash',
ASCIIFixedString(32),
Comment('DIGEST-MD5 A1 hash'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('A1 Hash'),
'secret_value' : True,
'editor_xtype' : None,
'ldap_attr' : 'npDigestHA1'
}
)
enabled = Column(
NPBoolean(),
Comment('Is logging in enabled?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Enabled')
}
)
name_family = Column(
Unicode(255),
Comment('Family name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Family Name'),
'ldap_attr' : ('sn', 'cn'), # FIXME: move 'cn' to dynamic attr
'column_flex' : 3
}
)
name_given = Column(
Unicode(255),
Comment('Given name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Given Name'),
'ldap_attr' : 'givenName',
'column_flex' : 3
}
)
name_middle = Column(
Unicode(255),
Comment('Middle name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Middle Name'),
'ldap_attr' : 'initials',
'column_flex' : 3
}
)
organization = Column(
'org',
Unicode(255),
Comment('Organization name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Organization'),
'ldap_attr' : 'o'
}
)
organizational_unit = Column(
'orgunit',
Unicode(255),
Comment('Organizational unit name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Organizational Unit'),
'ldap_attr' : 'ou'
}
)
title = Column(
Unicode(255),
Comment('Title'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Position'),
'ldap_attr' : 'title'
}
)
manager_id = Column(
'managerid',
UInt32(),
ForeignKey('users.uid', name='users_fk_managerid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Manager user ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Manager'),
'column_flex' : 2
}
)
ip_address = Column(
'ipaddr',
IPv4Address(),
Comment('Lock-in IP address'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('IP Address')
}
)
random_key = Column(
'randomkey',
ASCIIString(64),
Comment('Activation random key'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Random Key')
}
)
photo_id = Column(
'phfileid',
UInt32(),
ForeignKey('files_def.fileid', name='users_fk_phfileid', ondelete='SET NULL', onupdate='CASCADE', use_alter=True),
Comment('Photo file ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Photo'),
'ldap_attr' : 'jpegPhoto',
'ldap_value' : 'ldap_photo',
'editor_xtype' : 'fileselect'
}
)
description = Column(
'descr',
UnicodeText(),
Comment('User description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description'),
'ldap_attr' : ('comment', 'description')
}
)
photo = relationship(
'File',
backref='photo_of',
foreign_keys=(photo_id,)
)
secondary_groupmap = relationship(
'UserGroup',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
dav_locks = relationship(
'DAVLock',
backref='user',
cascade='all, delete-orphan',
passive_deletes=True
)
subordinates = relationship(
'User',
backref=backref('manager', remote_side=[id])
)
caps = relationship(
'UserCapability',
collection_class=attribute_mapped_collection('code'),
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
aclmap = relationship(
'UserACL',
collection_class=attribute_mapped_collection('code_res'),
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
files = relationship(
'File',
backref='user',
primaryjoin='File.user_id == User.id'
)
folders = relationship(
'FileFolder',
backref='user',
primaryjoin='FileFolder.user_id == User.id'
)
password_history = relationship(
'PasswordHistory',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
setting_map = relationship(
'UserSetting',
collection_class=attribute_mapped_collection('name'),
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
data_cache_map = relationship(
'DataCache',
collection_class=attribute_mapped_collection('name'),
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
sessions = relationship(
'NPSession',
backref='user',
passive_deletes=True
)
calendars = relationship(
'Calendar',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
calendar_imports = relationship(
'CalendarImport',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
events = relationship(
'Event',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
comm_channels = relationship(
'UserCommunicationChannel',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
phones = relationship(
'UserPhone',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
email_addresses = relationship(
'UserEmail',
backref=backref('user', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
secondary_groups = association_proxy(
'secondary_groupmap',
'group',
creator=lambda v: UserGroup(group=v)
)
privileges = association_proxy(
'caps',
'value',
creator=lambda k,v: _gen_xcap(UserCapability, k, v)
)
acls = association_proxy(
'aclmap',
'value',
creator=lambda k,v: _gen_xacl(UserACL, k, v)
)
settings = association_proxy(
'setting_map',
'python_value',
creator=_gen_user_setting
)
data_cache = association_proxy(
'data_cache_map',
'value',
creator=lambda k,v: DataCache(name=k, value=v)
)
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
self.vcard = None
self.mod_pw = False
def __str__(self):
return '%s' % str(self.login)
@hybrid_property
def name_full(self):
return self.name_family + ' ' + self.name_given
def generate_salt(self, salt_len=4, system_rng=True, chars=(string.ascii_lowercase + string.ascii_uppercase + string.digits)):
if system_rng:
try:
rng = random.SystemRandom()
except NotImplementedError:
rng = random
else:
rng = random
return ''.join(rng.choice(chars) for i in range(salt_len))
def ldap_status(self, settings):
if self.state == UserState.pending:
return 'noaccess'
elif self.state == UserState.active:
if self.enabled:
return 'active'
else:
return 'disabled'
else:
return 'deleted'
def ldap_password(self, settings):
pw = getattr(self, 'mod_pw', False)
if not pw:
raise ValueError('Temporary plaintext password was not found')
salt = self.generate_salt(4).encode()
ctx = hashlib.sha1()
ctx.update(pw.encode())
ctx.update(salt)
return '{SSHA}' + base64.b64encode(ctx.digest() + salt).decode()
def ldap_photo(self, settings):
if not self.photo:
return
ph = self.photo
if not ph.mime_type:
return
if ph.plain_mime_type != 'image/jpeg':
return
return ph.get_data(sess=DBSession())
def generate_a1hash(self, realm):
ctx = hashlib.md5()
ctx.update(('%s:%s:%s' % (self.login, realm, self.mod_pw)).encode())
return ctx.hexdigest()
def check_password(self, pwd, hash_con='sha1', salt_len=4):
if isinstance(pwd, str):
pwd = pwd.encode()
salt = self.password[:salt_len].encode()
orig = self.password[salt_len:]
ctx = hashlib.new(hash_con)
ctx.update(salt)
ctx.update(pwd)
return ctx.hexdigest() == orig
def change_login(self, newlogin, opts, request):
reg = request.registry
self.login = newlogin
if getattr(self, 'mod_pw', False):
realm = reg.settings.get('netprofile.auth.digest_realm', 'NetProfile UI')
self.a1_hash = self.generate_a1hash(realm)
def change_password(self, newpwd, opts, request):
self.mod_pw = newpwd
ts = dt.datetime.now()
secpol = self.effective_policy
if secpol:
checkpw = secpol.check_new_password(request, self, newpwd, ts)
if checkpw is not True:
# FIXME: error reporting
raise ValueError(checkpw)
reg = request.registry
hash_con = reg.settings.get('netprofile.auth.hash', 'sha1')
salt_len = int(reg.settings.get('netprofile.auth.salt_length', 4))
salt = self.generate_salt(salt_len)
ctx = hashlib.new(hash_con)
ctx.update(salt.encode())
ctx.update(newpwd.encode())
newhash = ctx.hexdigest()
self.password = salt + newhash
if self.login:
realm = reg.settings.get('netprofile.auth.digest_realm', 'NetProfile UI')
self.a1_hash = self.generate_a1hash(realm)
if secpol:
secpol.after_new_password(request, self, newpwd, ts)
if request.user == self:
request.session['sess.nextcheck'] = ts
request.session['sess.pwage'] = 'ok'
if 'sess.pwdays' in request.session:
del request.session['sess.pwdays']
@property
def last_password_change(self):
return DBSession().query(PasswordHistory)\
.filter(PasswordHistory.user == self)\
.order_by(PasswordHistory.timestamp.desc())\
.first()
@property
def sess_timeout(self):
secpol = self.effective_policy
if not secpol:
return None
sto = secpol.sess_timeout
if (not sto) or (sto < 30):
return None
return sto
@property
def flat_privileges(self):
gpriv = self.group.flat_privileges
for sg in self.secondary_groups:
if sg == self.group:
continue
gpriv.update(sg.flat_privileges)
gpriv.update(self.privileges)
return gpriv
@property
def group_names(self):
names = []
if self.group:
names.append(self.group.name)
for sg in self.secondary_groups:
if sg == self.group:
continue
names.append(sg.name)
return names
@property
def effective_policy(self):
if self.security_policy:
return self.security_policy
grp = self.group
secpol = None
while grp and (secpol is None):
secpol = grp.security_policy
grp = grp.parent
return secpol
def client_settings(self, req):
sess = DBSession()
ret = {}
for ust in sess.query(UserSettingType):
if not ust.client_ok:
continue
if ust.name in self.settings:
ret[ust.name] = self.settings[ust.name]
else:
ret[ust.name] = ust.parse_param(ust.default)
return ret
def client_acls(self, req):
ret = {}
for priv, res in self.acls:
if priv not in ret:
ret[priv] = {}
ret[priv][res] = self.acls[(priv, res)]
return ret
def generate_session(self, req, sname, now=None):
if now is None:
now = dt.datetime.now()
npsess = NPSession(
user=self,
login=self.login,
session_name=sname,
start_time=now,
last_time=now
)
if req.remote_addr is not None:
try:
ip = ipaddr.IPAddress(req.remote_addr)
if isinstance(ip, ipaddr.IPv4Address):
npsess.ip_address = ip
elif isinstance(ip, ipaddr.IPv6Address):
npsess.ipv6_address = ip
except ValueError:
pass
secpol = self.effective_policy
if secpol and (not secpol.check_new_session(req, self, npsess, now)):
return None
return npsess
def group_vector(self):
vec = [ self.group_id ]
for sg in self.secondary_groups:
vec.append(sg.id)
return vec
def is_member_of(self, grp):
if self == grp:
return True
if not isinstance(grp, Group):
return False
xgrp = self.group
while xgrp:
if xgrp == grp:
return True
xgrp = xgrp.parent
for xgrp in self.secondary_groups:
if xgrp == grp:
return True
return False
def get_root_folder(self):
if self.group is None:
return None
ff = self.group.effective_root_folder
if ff is None:
root_uid = global_setting('vfs_root_uid')
root_gid = global_setting('vfs_root_gid')
root_rights = global_setting('vfs_root_rights')
allow_read = False
allow_write = False
allow_traverse = False
if self.id == root_uid:
allow_read = bool(root_rights & F_OWNER_READ)
allow_write = bool(root_rights & F_OWNER_WRITE)
allow_traverse = bool(root_rights & F_OWNER_EXEC)
elif root_gid in self.group_vector():
allow_read = bool(root_rights & F_GROUP_READ)
allow_write = bool(root_rights & F_GROUP_WRITE)
allow_traverse = bool(root_rights & F_GROUP_EXEC)
else:
allow_read = bool(root_rights & F_OTHER_READ)
allow_write = bool(root_rights & F_OTHER_WRITE)
allow_traverse = bool(root_rights & F_OTHER_EXEC)
return {
'id' : 'root',
'name' : 'root',
'allow_read' : allow_read,
'allow_write' : allow_write,
'allow_traverse' : allow_traverse,
'parent_write' : (allow_traverse and allow_write)
}
p_wr = False
if ff.parent:
p_wr = ff.parent.can_write(self)
else:
root_uid = global_setting('vfs_root_uid')
root_gid = global_setting('vfs_root_gid')
root_rights = global_setting('vfs_root_rights')
if self.id == root_uid:
p_wr = bool(root_rights & F_OWNER_WRITE)
elif root_gid in self.group_vector():
p_wr = bool(root_rights & F_GROUP_WRITE)
else:
p_wr = bool(root_rights & F_OTHER_WRITE)
return {
'id' : ff.id,
'name' : ff.name,
'allow_read' : ff.can_read(self),
'allow_write' : ff.can_write(self),
'allow_traverse' : ff.can_traverse_path(self),
'parent_write' : p_wr
}
@property
def root_readable(self):
ff = self.group.effective_root_folder
if ff is not None:
return ff.can_read(self)
root_uid = global_setting('vfs_root_uid')
root_gid = global_setting('vfs_root_gid')
root_rights = global_setting('vfs_root_rights')
if self.id == root_uid:
return bool(root_rights & F_OWNER_READ)
if root_gid in self.group_vector():
return bool(root_rights & F_GROUP_READ)
return bool(root_rights & F_OTHER_READ)
@property
def root_writable(self):
ff = self.group.effective_root_folder
if ff is not None:
return ff.can_write(self)
root_uid = global_setting('vfs_root_uid')
root_gid = global_setting('vfs_root_gid')
root_rights = global_setting('vfs_root_rights')
if self.id == root_uid:
return bool(root_rights & F_OWNER_WRITE)
if root_gid in self.group_vector():
return bool(root_rights & F_GROUP_WRITE)
return bool(root_rights & F_OTHER_WRITE)
@property
def __name__(self):
return self.login
def ldap_attrs(self, settings):
from netprofile_ldap.ldap import get_dn
groupset = set()
if self.group:
groupset.add(self.group)
for g in self.secondary_groups:
groupset.add(g)
dnlist = []
for g in groupset:
dnlist.append(get_dn(g, settings))
ret = {}
if self.login:
ret['homeDirectory'] = '/home/%s' % (self.login,)
if 'netprofile.ldap.orm.User.default_shell' in settings:
ret['loginShell'] = settings['netprofile.ldap.orm.User.default_shell']
if len(dnlist) > 0:
ret['memberOf'] = dnlist
if len(self.email_addresses) > 0:
ret['mail'] = [str(ea) for ea in self.email_addresses]
phones = defaultdict(list)
for ph in self.phones:
for attr in PhoneType.ldap_attrs(ph.type):
# FIXME: format phone as intl. (probably using phonenumbers lib)
phones[attr].append(ph.number)
if len(phones) > 0:
ret.update(phones)
return ret
def get_uri(self):
return [ '', 'users', self.login ]
def dav_props(self, pset):
vcard = getattr(self, 'vcard', None)
if vcard is None:
self.vcard = self._get_vcard()
ret = {}
if dprops.RESOURCE_TYPE in pset:
ret[dprops.RESOURCE_TYPE] = DAVResourceTypeValue(dprops.PRINCIPAL)
if dprops.CONTENT_LENGTH in pset:
ret[dprops.CONTENT_LENGTH] = self.vcard.content_length
if dprops.CONTENT_TYPE in pset:
ret[dprops.CONTENT_TYPE] = 'text/x-vcard'
if dprops.DISPLAY_NAME in pset:
ret[dprops.DISPLAY_NAME] = self.login
if dprops.ETAG in pset:
ret[dprops.ETAG] = '"%s"' % self.vcard.etag
return ret
def dav_group_members(self, req):
return set()
def dav_memberships(self, req):
gmset = set()
if self.group:
gmset.add(self.group)
gmset.update(self.secondary_groups)
return gmset
def dav_alt_uri(self, req):
uris = []
for email in self.email_addresses:
uris.append('mailto:' + str(email))
return uris
def _get_vcard(self):
card = cal.Card()
card.add('VERSION', '3.0')
fname = []
if self.name_family:
fname.append(self.name_family)
if self.name_given:
fname.append(self.name_given)
if self.name_middle:
fname.append(self.name_middle)
if len(fname) == 0:
fname = (self.login,)
if self.id:
card.add('UID', icalendar.vUri('urn:npobj:user:%s:%u' % (
inst_id,
self.id
)))
card.add('N', cal.vStructuredUnicode(*fname))
card.add('FN', cal.vUnicode(' '.join(fname)))
card.add('NICKNAME', cal.vUnicode(self.login))
for email in self.email_addresses:
card.add('EMAIL', cal.vEMail(str(email)))
ical = card.to_ical()
resp = Response(ical, content_type='text/x-vcard', charset='utf-8')
if PY3:
resp.content_disposition = \
'attachment; filename*=UTF-8\'\'%s.vcf' % (
urllib.parse.quote(self.login, '')
)
else:
resp.content_disposition = \
'attachment; filename*=UTF-8\'\'%s.vcf' % (
urllib.quote(self.login.encode(), '')
)
ctx = hashlib.md5()
ctx.update(ical)
resp.etag = ctx.hexdigest()
return resp
def dav_get(self, req):
vcard = getattr(self, 'vcard', None)
if vcard is None:
self.vcard = self._get_vcard()
return self.vcard
@validates('name_family', 'name_given', 'name_middle', 'login')
def _reset_vcard(self, k, v):
self.vcard = None
return v
@classmethod
def get_acls(cls):
sess = DBSession()
res = {}
for u in sess.query(User):
res[u.id] = str(u)
return res
def _del_user(mapper, conn, tgt):
sess = DBSession()
sess.query(UserACL)\
.filter(
UserACL.privilege_id.in_(sess.query(Privilege.id).filter(Privilege.resource_class == 'NPUser')),
UserACL.resource == tgt.id
)\
.delete(synchronize_session=False)
event.listen(User, 'after_delete', _del_user)
@implementer(IDAVFile, IDAVPrincipal)
class Group(Base):
"""
Defines a group of NetProfile users.
"""
__tablename__ = 'groups'
__table_args__ = (
Comment('Groups'),
Index('groups_u_name', 'name', unique=True),
Index('groups_i_parentid', 'parentid'),
Index('groups_i_secpolid', 'secpolid'),
Index('groups_i_rootffid', 'rootffid'),
Trigger('after', 'insert', 't_groups_ai'),
Trigger('after', 'update', 't_groups_au'),
Trigger('after', 'delete', 't_groups_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_GROUPS',
'cap_read' : 'GROUPS_LIST',
'cap_create' : 'GROUPS_CREATE',
'cap_edit' : 'GROUPS_EDIT',
'cap_delete' : 'GROUPS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Groups'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('gid', 'name', 'parent', 'security_policy', 'root_folder'),
'grid_hidden' : ('gid',),
'form_view' : ('name', 'parent', 'security_policy', 'visible', 'assignable', 'root_folder'),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' :
Wizard(
Step('name', 'parent', 'security_policy', title=_('New group data')),
Step('visible', 'assignable', 'root_folder', title=_('New group details')),
title=_('Add new group')
),
'ldap_classes' : ('npGroup',),
'ldap_rdn' : 'name'
}
}
)
id = Column(
'gid',
UInt32(),
Sequence('groups_gid_seq'),
Comment('Group ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID'),
'ldap_attr' : 'gidNumber'
}
)
parent_id = Column(
'parentid',
UInt32(),
ForeignKey('groups.gid', name='groups_fk_parentid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Parent group ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Parent'),
'filter_type' : 'list',
'column_flex' : 3
}
)
security_policy_id = Column(
'secpolid',
UInt32(),
ForeignKey('secpol_def.secpolid', name='groups_fk_secpolid', onupdate='CASCADE'),
Comment('Security policy ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Security Policy'),
'filter_type' : 'list',
'column_flex' : 2
}
)
name = Column(
Unicode(255),
Comment('Group name'),
nullable=False,
info={
'header_string' : _('Name'),
'ldap_attr' : 'cn',
'column_flex' : 3
}
)
visible = Column(
NPBoolean(),
Comment('Is visible in UI?'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Visible')
}
)
assignable = Column(
NPBoolean(),
Comment('Can be assigned tasks?'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Assignable')
}
)
root_folder_id = Column(
'rootffid',
UInt32(),
ForeignKey('files_folders.ffid', name='groups_fk_rootffid', ondelete='SET NULL', onupdate='CASCADE', use_alter=True),
Comment('Root file folder ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Root Folder'),
'filter_type' : 'none',
'column_flex' : 2
}
)
secondary_usermap = relationship(
'UserGroup',
backref=backref('group', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
users = relationship(
'User',
backref=backref('group', innerjoin=True)
)
children = relationship(
'Group',
backref=backref('parent', remote_side=[id])
)
caps = relationship(
'GroupCapability',
collection_class=attribute_mapped_collection('privilege.code'),
backref=backref('group', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
aclmap = relationship(
'GroupACL',
collection_class=attribute_mapped_collection('code_res'),
backref=backref('group', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
files = relationship(
'File',
backref='group',
primaryjoin='File.group_id == Group.id'
)
folders = relationship(
'FileFolder',
backref='group',
primaryjoin='FileFolder.group_id == Group.id'
)
calendars = relationship(
'Calendar',
backref=backref('group', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
secondary_users = association_proxy(
'secondary_usermap',
'user'
)
privileges = association_proxy(
'caps',
'value',
creator=lambda k,v: _gen_xcap(GroupCapability, k, v)
)
acls = association_proxy(
'aclmap',
'value',
creator=lambda k,v: _gen_xacl(GroupACL, k, v)
)
def __str__(self):
return '%s' % str(self.name)
@property
def flat_privileges(self):
ppriv = {}
if self.parent is None:
return self.privileges.copy()
ppriv = self.parent.flat_privileges
ppriv.update(self.privileges)
return ppriv
@property
def effective_policy(self):
if self.security_policy:
return self.security_policy
grp = self.parent
secpol = None
while grp and (secpol is None):
secpol = grp.security_policy
grp = grp.parent
return secpol
@property
def effective_root_folder(self):
if self.root_folder:
return self.root_folder
grp = self.parent
ff = None
while grp and (ff is None):
ff = grp.root_folder
grp = grp.parent
return ff
@property
def __name__(self):
return self.name
def ldap_attrs(self, settings):
from netprofile_ldap.ldap import get_dn
userset = set()
for u in self.users:
userset.add(u)
for u in self.secondary_users:
userset.add(u)
dnlist = []
for u in userset:
dnlist.append(get_dn(u, settings))
ret = {}
if len(dnlist) > 0:
ret['uniqueMember'] = dnlist
return ret
def get_uri(self):
return [ '', 'groups', self.name ]
def dav_props(self, pset):
ret = {}
if dprops.RESOURCE_TYPE in pset:
ret[dprops.RESOURCE_TYPE] = DAVResourceTypeValue(dprops.PRINCIPAL)
if dprops.CONTENT_TYPE in pset:
ret[dprops.CONTENT_TYPE] = 'text/x-vcard'
if dprops.DISPLAY_NAME in pset:
ret[dprops.DISPLAY_NAME] = self.name
return ret
def dav_group_members(self, req):
gmset = set()
gmset.update(self.children)
gmset.update(self.users)
gmset.update(self.secondary_users)
return gmset
def dav_memberships(self, req):
gmset = set()
if self.parent:
gmset.add(self.parent)
return gmset
def is_member_of(self, grp):
if not isinstance(grp, Group):
return False
xgrp = self
while xgrp:
if xgrp == grp:
return True
xgrp = xgrp.parent
return False
@classmethod
def get_acls(cls):
sess = DBSession()
res = {}
for g in sess.query(Group):
res[g.id] = str(g)
return res
def _del_group(mapper, conn, tgt):
sess = DBSession()
sess.query(GroupACL)\
.filter(
GroupACL.privilege_id.in_(sess.query(Privilege.id).filter(Privilege.resource_class == 'NPGroup')),
GroupACL.resource == tgt.id
)\
.delete(synchronize_session=False)
event.listen(Group, 'after_delete', _del_group)
class Privilege(Base):
"""
Generic privilege code, to be assigned to users or groups.
"""
__tablename__ = 'privileges'
__table_args__ = (
Comment('Privilege definitions'),
Index('privileges_u_code', 'code', unique=True),
Index('privileges_u_name', 'name', unique=True),
Index('privileges_i_canbeset', 'canbeset'),
Index('privileges_i_npmodid', 'npmodid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'PRIVILEGES_LIST',
'cap_create' : 'PRIVILEGES_CREATE',
'cap_edit' : 'PRIVILEGES_EDIT',
'cap_delete' : 'PRIVILEGES_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Privileges'),
'default_sort' : ({ 'property': 'code' ,'direction': 'ASC' },),
'grid_view' : ('privid', 'module', 'code', 'name', 'guestvalue', 'hasacls', 'canbeset'),
'grid_hidden' : ('privid', 'canbeset'),
'form_view' : ('module', 'code', 'name', 'guestvalue', 'hasacls', 'resclass'),
'easy_search' : ('code', 'name'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
# FIXME: temporary wizard
'create_wizard' : SimpleWizard(title=_('Add new privilege'))
}
}
)
id = Column(
'privid',
UInt32(),
Sequence('privileges_privid_seq'),
Comment('Privilege ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
module_id = Column(
'npmodid',
UInt32(),
ForeignKey('np_modules.npmodid', name='privileges_fk_npmodid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('NetProfile module ID'),
nullable=False,
default=1,
server_default=text('1'),
info={
'header_string' : _('Module'),
'filter_type' : 'list',
'column_flex' : 2
}
)
can_be_set = Column(
'canbeset',
NPBoolean(),
Comment('Can be set from UI?'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Can be Set')
}
)
code = Column(
ASCIIString(48),
Comment('Privilege code'),
nullable=False,
info={
'header_string' : _('Code'),
'column_flex' : 2
}
)
name = Column(
Unicode(255),
Comment('Privilege name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 3
}
)
guest_value = Column(
'guestvalue',
NPBoolean(),
Comment('Value for users not logged in'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Guest Value')
}
)
has_acls = Column(
'hasacls',
NPBoolean(),
Comment('Can have ACLs?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Has ACLs')
}
)
resource_class = Column(
'resclass',
ASCIIString(255),
Comment('Resource provider class'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Resource Class')
}
)
group_caps = relationship(
'GroupCapability',
backref=backref('privilege', lazy='subquery', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
user_caps = relationship(
'UserCapability',
backref=backref('privilege', lazy='subquery', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
group_acls = relationship(
'GroupACL',
backref=backref('privilege', lazy='subquery', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
user_acls = relationship(
'UserACL',
backref=backref('privilege', lazy='subquery', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def __str__(self):
return '%s' % str(self.code)
def get_acls(self):
if (not self.has_acls) or (not self.resource_class):
return None
cls = self.resource_class
if cls[:2] == 'NP':
cls = cls[2:]
if cls not in Base._decl_class_registry:
return None
cls = Base._decl_class_registry[cls]
getter = getattr(cls, 'get_acls', None)
if callable(getter):
return getter()
class Capability(object):
"""
Abstract prototype for privilege assignment object.
"""
@declared_attr
def id(cls):
return Column(
'capid',
UInt32(),
Sequence('capid_seq'), # FIXME: needs different names
Comment('Capability ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
@declared_attr
def privilege_id(cls):
return Column(
'privid',
UInt32(),
ForeignKey('privileges.privid', name=(cls.__tablename__ + '_fk_privid'), ondelete='CASCADE', onupdate='CASCADE'),
Comment('Privilege ID'),
nullable=False,
info={
'header_string' : _('Privilege')
}
)
@declared_attr
def value(cls):
return Column(
NPBoolean(),
Comment('Capability value'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Value')
}
)
def __str__(self):
return '<%s(%s) = %s>' % (
str(self.__class__.__name__),
str(self.code),
str(self.value)
)
@property
def code(self):
return self.privilege.code
class GroupCapability(Capability,Base):
"""
Group privilege assignment object.
"""
__tablename__ = 'capabilities_groups'
__table_args__ = (
Comment('Group capabilities'),
Index('capabilities_groups_u_cap', 'gid', 'privid', unique=True),
Index('capabilities_groups_i_priv', 'privid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'GROUPS_GETCAP',
'cap_create' : 'GROUPS_SETCAP',
'cap_edit' : 'GROUPS_SETCAP',
'cap_delete' : 'GROUPS_SETCAP',
# 'show_in_menu' : 'admin',
'menu_name' : _('Group Capabilities'),
'default_sort' : (),
# 'grid_view' : ('code', 'name', 'guestvalue', 'hasacls')
}
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='capabilities_groups_fk_gid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Group ID'),
nullable=False,
info={
'header_string' : _('Group')
}
)
class UserCapability(Capability,Base):
"""
User privilege assignment object.
"""
__tablename__ = 'capabilities_users'
__table_args__ = (
Comment('User capabilities'),
Index('capabilities_users_u_cap', 'uid', 'privid', unique=True),
Index('capabilities_users_i_priv', 'privid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'USERS_GETCAP',
'cap_create' : 'USERS_SETCAP',
'cap_edit' : 'USERS_SETCAP',
'cap_delete' : 'USERS_SETCAP',
# 'show_in_menu' : 'admin',
'menu_name' : _('User Capabilities'),
'default_sort' : (),
# 'grid_view' : ('code', 'name', 'guestvalue', 'hasacls')
}
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='capabilities_users_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User')
}
)
class ACL(object):
"""
Abstract prototype for resource-specific privilege assignment object.
"""
@declared_attr
def id(cls):
return Column(
'aclid',
UInt32(),
Sequence('aclid_seq'), # FIXME: needs different names
Comment('ACL ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
@declared_attr
def privilege_id(cls):
return Column(
'privid',
UInt32(),
ForeignKey('privileges.privid', name=(cls.__tablename__ + '_fk_privid'), ondelete='CASCADE', onupdate='CASCADE'),
Comment('Privilege ID'),
nullable=False,
info={
'header_string' : _('Privilege')
}
)
@declared_attr
def resource(cls):
return Column(
UInt32(),
Comment('Resource ID'),
nullable=False,
info={
'header_string' : _('Resource')
}
)
@declared_attr
def value(cls):
return Column(
NPBoolean(),
Comment('Access value'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Value')
}
)
def __str__(self):
return '<%s(%s,%u) = %s>' % (
str(self.__class__.__name__),
str(self.code),
str(self.resource),
str(self.value)
)
@property
def code(self):
return self.privilege.code
@property
def code_res(self):
return self.code, self.resource
class GroupACL(ACL,Base):
"""
Group resource-specific privilege assignment object.
"""
__tablename__ = 'acls_groups'
__table_args__ = (
Comment('Group access control lists'),
Index('acls_groups_u_cap', 'gid', 'privid', 'resource', unique=True),
Index('acls_groups_i_priv', 'privid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
}
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='acls_groups_fk_gid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Group ID'),
nullable=False,
info={
'header_string' : _('Group')
}
)
class UserACL(ACL,Base):
"""
User resource-specific privilege assignment object.
"""
__tablename__ = 'acls_users'
__table_args__ = (
Comment('User access control lists'),
Index('acls_users_u_cap', 'uid', 'privid', 'resource', unique=True),
Index('acls_users_i_priv', 'privid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
}
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='acls_users_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User')
}
)
class UserGroup(Base):
"""
Secondary group membership association object.
"""
__tablename__ = 'users_groups'
__table_args__ = (
Comment('Secondary user groups'),
Index('users_groups_u_mapping', 'uid', 'gid', unique=True),
Index('users_groups_i_gid', 'gid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
}
}
)
id = Column(
'ugid',
UInt32(),
Sequence('users_groups_ugid_seq'),
Comment('User group mapping ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='users_groups_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User')
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='users_groups_fk_gid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Group ID'),
nullable=False,
info={
'header_string' : _('Group')
}
)
def __str__(self):
return '%s' % str(self.group)
class SecurityPolicyOnExpire(DeclEnum):
"""
On-password-expire security policy action.
"""
none = 'none', _('No action'), 10
force = 'force', _('Force new password'), 20
drop = 'drop', _('Drop connection'), 30
class SecurityPolicy(Base):
"""
Assignable security policy for users and groups.
"""
__tablename__ = 'secpol_def'
__table_args__ = (
Comment('Security policies'),
Index('secpol_def_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'SECPOL_LIST',
'cap_create' : 'SECPOL_CREATE',
'cap_edit' : 'SECPOL_EDIT',
'cap_delete' : 'SECPOL_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Security Policies'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('secpolid', 'name', 'pw_length_min', 'pw_length_max', 'pw_ctype_min', 'pw_ctype_max', 'pw_dict_check', 'pw_hist_check', 'pw_hist_size', 'sess_timeout'),
'grid_hidden' : ('secpolid', 'sess_timeout'),
'form_view' : (
'name', 'descr',
'pw_length_min', 'pw_length_max',
'pw_ctype_min', 'pw_ctype_max',
'pw_dict_check', 'pw_dict_name',
'pw_hist_check', 'pw_hist_size',
'pw_age_min', 'pw_age_max', 'pw_age_warndays', 'pw_age_warnmail', 'pw_age_action',
'net_whitelist', 'sess_timeout',
'sess_window_ipv4', 'sess_window_ipv6'
),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'secpolid',
UInt32(),
Sequence('secpol_def_secpolid_seq'),
Comment('Security policy ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Security policy name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
pw_length_min = Column(
UInt16(),
Comment('Minimum password length'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Min. Password Len.')
}
)
pw_length_max = Column(
UInt16(),
Comment('Maximum password length'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Max. Password Len.')
}
)
pw_ctype_min = Column(
UInt8(),
Comment('Minimum number of character types in password'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Min. Char Types')
}
)
pw_ctype_max = Column(
UInt8(),
Comment('Maximum number of character types in password'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Max. Char Types')
}
)
pw_dict_check = Column(
NPBoolean(),
Comment('Check password against a dictionary?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Dictionary Check')
}
)
pw_dict_name = Column(
ASCIIString(255),
Comment('Name of a custom dictionary'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Custom Dictionary')
}
)
pw_hist_check = Column(
NPBoolean(),
Comment('Keep a history of old passwords?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Keep History')
}
)
pw_hist_size = Column(
UInt16(),
Comment('Old password history size'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('History Size')
}
)
pw_age_min = Column(
UInt16(),
Comment('Minimum password age in days'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Min. Password Age')
}
)
pw_age_max = Column(
UInt16(),
Comment('Maximum password age in days'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Max. Password Age')
}
)
pw_age_warndays = Column(
UInt16(),
Comment('Notify to change password (in days before expiration)'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Notify Days')
}
)
pw_age_warnmail = Column(
NPBoolean(),
Comment('Warn about password expiry by e-mail'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Warn by E-mail')
}
)
pw_age_action = Column(
SecurityPolicyOnExpire.db_type(),
Comment('Action on expired password'),
nullable=False,
default=SecurityPolicyOnExpire.none,
server_default=SecurityPolicyOnExpire.none,
info={
'header_string' : _('On Expire')
}
)
net_whitelist = Column(
ASCIIString(255),
Comment('Whitelist of allowed login addresses'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Address Whitelist')
}
)
sess_timeout = Column(
UInt32(),
Comment('Session timeout (in seconds)'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Session Timeout')
}
)
sess_window_ipv4 = Column(
UInt8(),
Comment('Allow IPv4 source addresses to migrate within this mask'),
nullable=True,
default=32,
server_default=text('32'),
info={
'header_string' : _('IPv4 Session Window')
}
)
sess_window_ipv6 = Column(
UInt8(),
Comment('Allow IPv6 source addresses to migrate within this mask'),
nullable=True,
default=128,
server_default=text('128'),
info={
'header_string' : _('IPv6 Session Window')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Security policy description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
users = relationship(
'User',
backref='security_policy'
)
groups = relationship(
'Group',
backref='security_policy'
)
@property
def net_whitelist_acl(self):
if not self.net_whitelist:
return None
nets = []
for ace in self.net_whitelist.split(';'):
try:
nets.append(ipaddr.IPNetwork(ace.strip()))
except ValueError:
pass
return nets
def check_new_password(self, req, user, pwd, ts):
err = []
if self.pw_length_min and (len(pwd) < self.pw_length_min):
err.append('pw_length_min')
if self.pw_length_max and (len(pwd) > self.pw_length_max):
err.append('pw_length_max')
if self.pw_ctype_min or self.pw_ctype_max:
has_lower = False
has_upper = False
has_digit = False
has_space = False
has_sym = False
for char in pwd:
if char.islower():
has_lower = True
elif char.isupper():
has_upper = True
elif char.isdigit():
has_digit = True
elif char.isspace():
has_space = True
elif char.isprintable():
has_sym = True
ct_count = 0
for ctype in (has_lower, has_upper, has_digit, has_space, has_sym):
if ctype:
ct_count += 1
if self.pw_ctype_min and (ct_count < self.pw_ctype_min):
err.append('pw_ctype_min')
if self.pw_ctype_max and (ct_count > self.pw_ctype_max):
err.append('pw_ctype_max')
if self.pw_dict_check:
if self.pw_dict_name:
dname = self.pw_dict_name
else:
dname = _DEFAULT_DICT
dname = dname.split(':')
if len(dname) == 2:
from cracklib import FascistCheck
from pkg_resources import resource_filename
dfile = resource_filename(dname[0], dname[1])
try:
FascistCheck(pwd, dfile)
except ValueError:
err.append('pw_dict_check')
if user and user.id:
if req and self.pw_hist_check:
hist_salt = req.registry.settings.get('netprofile.pwhistory_salt', 'nppwdhist_')
ctx = hashlib.sha1()
ctx.update(hist_salt.encode())
ctx.update(pwd.encode())
hist_hash = ctx.hexdigest()
for pwh in user.password_history:
if pwh.password == hist_hash:
err.append('pw_hist_check')
if self.pw_age_min:
delta = dt.timedelta(self.pw_age_min)
minage_fail = False
for pwh in user.password_history:
if (pwh.timestamp + delta) > ts:
minage_fail = True
if minage_fail:
err.append('pw_age_min')
if len(err) == 0:
return True
return err
def after_new_password(self, req, user, pwd, ts):
if self.pw_hist_check:
hist_salt = req.registry.settings.get('netprofile.pwhistory_salt', 'nppwdhist_')
ctx = hashlib.sha1()
ctx.update(hist_salt.encode())
ctx.update(pwd.encode())
hist_hash = ctx.hexdigest()
hist_sz = self.pw_hist_size
if not hist_sz:
hist_sz = 3
hist_cursz = len(user.password_history)
if hist_cursz == hist_sz:
oldest_time = None
oldest_idx = None
for i in range(hist_cursz):
pwh = user.password_history[i]
if (oldest_time is None) or (oldest_time > pwh.timestamp):
oldest_time = pwh.timestamp
oldest_idx = i
if oldest_idx is not None:
del user.password_history[oldest_idx]
user.password_history.append(PasswordHistory(
password=hist_hash,
timestamp=ts
))
def check_new_session(self, req, user, npsess, ts=None):
if ts is None:
ts = dt.datetime.now()
addr = npsess.ip_address or npsess.ipv6_address
acl = self.net_whitelist_acl
if addr and acl:
for net in acl:
if addr in net:
break
else:
return False
# Expensive checks
if not self.check_password_age(req, user, npsess, ts):
return False
req.session['sess.nextcheck'] = _sess_nextcheck(req, ts)
return True
def check_old_session(self, req, user, npsess, ts=None):
if ts is None:
ts = dt.datetime.now()
if self.sess_timeout and npsess.last_time and (self.sess_timeout >= 30):
delta = ts - npsess.last_time
if delta.total_seconds() > self.sess_timeout:
return False
addr = npsess.ip_address or npsess.ipv6_address
if req.remote_addr is not None:
try:
remote_addr = ipaddr.IPAddress(req.remote_addr)
except ValueError:
return False
if isinstance(remote_addr, ipaddr.IPv4Address) and self.sess_window_ipv4:
if not isinstance(addr, ipaddr.IPv4Address):
return False
try:
window = ipaddr.IPv4Network('%s/%d' % (str(addr), self.sess_window_ipv4))
except ValueError:
return False
if remote_addr not in window:
return False
elif isinstance(remote_addr, ipaddr.IPv6Address) and self.sess_window_ipv6:
if not isinstance(addr, ipaddr.IPv6Address):
return False
try:
window = ipaddr.IPv6Network('%s/%d' % (str(addr), self.sess_window_ipv6))
except ValueError:
return False
if remote_addr not in window:
return False
acl = self.net_whitelist_acl
if addr and acl:
for net in acl:
if addr in net:
break
else:
return False
if 'sess.nextcheck' in req.session:
nextcheck = req.session['sess.nextcheck']
else:
nextcheck = req.session['sess.nextcheck'] = _sess_nextcheck(req, ts)
if nextcheck < ts:
# Expensive checks
if not self.check_password_age(req, user, npsess, ts):
return False
req.session['sess.nextcheck'] = _sess_nextcheck(req, ts)
return True
def check_password_age(self, req, user, npsess, ts):
last_pwh = user.last_password_change
if last_pwh:
days = (ts - last_pwh.timestamp).days
if days > self.pw_age_max:
if self.pw_age_action == SecurityPolicyOnExpire.drop:
return False
req.session['sess.pwage'] = 'force'
elif self.pw_age_warndays:
days_left = self.pw_age_max - days
if days_left < self.pw_age_warndays:
req.session['sess.pwage'] = 'warn'
req.session['sess.pwdays'] = days_left
else:
req.session['sess.pwage'] = 'ok'
else:
req.session['sess.pwage'] = 'ok'
else:
req.session['sess.pwage'] = 'ok'
return True
def __str__(self):
return '%s' % str(self.name)
def _sess_nextcheck(req, ts):
cfg = req.registry.settings
try:
secs = int(cfg.get('netprofile.auth.session_check_period', 1800))
except (TypeError, ValueError):
secs = 1800
return ts + dt.timedelta(seconds=secs)
class CommunicationType(Base):
"""
Defines IM, social media and other communication channel links.
"""
__tablename__ = 'comms_types'
__table_args__ = (
Comment('Communication channel types'),
Index('comms_types_u_name', 'name', unique=True),
Index('comms_types_i_impp', 'impp'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
# FIXME
'cap_menu' : 'BASE_ADMIN',
# no read cap
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_name' : _('Communication Types'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : (
'commtid',
MarkupColumn(
name='icon',
header_string=' ',
column_width=22,
column_name=_('Icon'),
column_resizable=False,
cell_class='np-nopad',
template='<img class="np-block-img" src="{grid_icon}" />'
),
'name', 'impp'
),
'grid_hidden' : ('commtid',),
'form_view' : ('name', 'icon', 'impp', 'urifmt', 'descr'),
'easy_search' : ('name',),
'extra_data' : ('grid_icon',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new communication type'))
}
}
)
id = Column(
'commtid',
UInt32(),
Sequence('comms_types_commtid_seq'),
Comment('Communication channel type ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Communication channel name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
icon = Column(
ASCIIString(32),
Comment('Icon name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Icon')
}
)
uri_protocol = Column(
'impp',
ASCIIString(32),
Comment('vCard IMPP URI prefix'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Protocol')
}
)
uri_format = Column(
'urifmt',
Unicode(255),
Comment('URI format string'),
nullable=False,
default='{proto}:{address}',
server_default='{proto}:{address}',
info={
'header_string' : _('URI Format')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Communication channel type description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
user_channels = relationship(
'UserCommunicationChannel',
backref=backref('type', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def __str__(self):
return '%s' % str(self.name)
def grid_icon(self, req):
icn = self.icon or 'generic'
return req.static_url('netprofile_core:static/img/comms/' + icn + '.png')
def format_uri(self, addr):
if PY3:
addr = urllib.parse.quote(addr, '')
else:
addr = urllib.quote(addr.encode(), '')
return self.uri_format.format(proto=self.uri_protocol, address=addr)
class UserPhone(Base):
"""
Users' phone contacts.
"""
__tablename__ = 'users_phones'
__table_args__ = (
Comment('User phone numbers'),
Index('users_phones_i_uid', 'uid'),
Index('users_phones_i_num', 'num'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'USERS_LIST',
'cap_create' : 'USERS_EDIT',
'cap_edit' : 'USERS_EDIT',
'cap_delete' : 'USERS_EDIT',
'menu_name' : _('Phones'),
'default_sort' : (
{ 'property': 'ptype' ,'direction': 'ASC' },
{ 'property': 'num' ,'direction': 'ASC' }
),
'grid_view' : ('uphoneid', 'user', 'primary', 'ptype', 'num', 'descr'),
'grid_hidden' : ('uphoneid',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new phone'))
}
}
)
id = Column(
'uphoneid',
UInt32(),
Sequence('users_phones_uphoneid_seq'),
Comment('User phone ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='users_phones_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'column_flex' : 2,
'filter_type' : 'none'
}
)
primary = Column(
NPBoolean(),
Comment('Primary flag'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Primary')
}
)
type = Column(
'ptype',
PhoneType.db_type(),
Comment('Phone type'),
nullable=False,
default=PhoneType.work,
server_default=PhoneType.work,
info={
'header_string' : _('Type'),
'column_flex' : 1
}
)
number = Column(
'num',
ASCIIString(255),
Comment('Phone number'),
nullable=False,
info={
'header_string' : _('Number'),
'column_flex' : 1
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Phone description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description'),
'column_flex' : 2
}
)
def __str__(self):
req = get_current_request()
loc = get_localizer(req)
return '%s: %s' % (
loc.translate(PhoneType.prefix(self.type)),
self.number
)
def _mod_phone(mapper, conn, tgt):
try:
from netprofile_ldap.ldap import store
except ImportError:
return
user = tgt.user
user_id = tgt.user_id
if (not user) and user_id:
user = DBSession().query(User).get(user_id)
if user:
store(user)
event.listen(UserPhone, 'after_delete', _mod_phone)
event.listen(UserPhone, 'after_insert', _mod_phone)
event.listen(UserPhone, 'after_update', _mod_phone)
class UserEmail(Base):
"""
Users' email addresses.
"""
__tablename__ = 'users_email'
__table_args__ = (
Comment('User e-mail addresses'),
Index('users_email_u_addr', 'addr', unique=True),
Index('users_email_i_uid', 'uid'),
Index('users_email_i_aliasid', 'aliasid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'USERS_LIST',
'cap_create' : 'USERS_EDIT',
'cap_edit' : 'USERS_EDIT',
'cap_delete' : 'USERS_EDIT',
'menu_name' : _('E-mail'),
'default_sort' : (
{ 'property': 'scope' ,'direction': 'ASC' },
{ 'property': 'addr' ,'direction': 'ASC' },
),
'grid_view' : ('uemailid', 'user', 'primary', 'scope', 'addr', 'original'),
'grid_hidden' : ('uemailid',),
'form_view' : ('user', 'primary', 'scope', 'addr', 'original', 'descr'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new e-mail address'))
}
}
)
id = Column(
'uemailid',
UInt32(),
Sequence('users_email_uemailid_seq'),
Comment('User e-mail ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='users_email_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'column_flex' : 2,
'filter_type' : 'none'
}
)
original_id = Column(
'aliasid',
UInt32(),
ForeignKey('users_email.uemailid', name='users_email_fk_aliasid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Aliased e-mail ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Original'),
'column_flex' : 3,
'filter_type' : 'none'
}
)
primary = Column(
NPBoolean(),
Comment('Primary flag'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Primary')
}
)
scope = Column(
ContactInfoType.db_type(),
Comment('Address scope'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Type')
}
)
address = Column(
'addr',
Unicode(255),
nullable=False,
info={
'header_string' : _('Address'),
'column_flex' : 3
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Address description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
aliases = relationship(
'UserEmail',
backref=backref('original', remote_side=(id,)),
cascade='all, delete-orphan',
passive_deletes=True
)
def __str__(self):
return '%s' % (self.address,)
def _mod_mail(mapper, conn, tgt):
try:
from netprofile_ldap.ldap import store
except ImportError:
return
user = tgt.user
user_id = tgt.user_id
if (not user) and user_id:
user = DBSession().query(User).get(user_id)
if user:
store(user)
event.listen(UserEmail, 'after_delete', _mod_mail)
event.listen(UserEmail, 'after_insert', _mod_mail)
event.listen(UserEmail, 'after_update', _mod_mail)
class UserCommunicationChannel(Base):
"""
Users' communication channel links.
"""
__tablename__ = 'users_comms'
__table_args__ = (
Comment('User communication channels'),
Index('users_comms_i_commtid', 'commtid'),
Index('users_comms_i_uid', 'uid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'USERS_LIST',
'cap_create' : 'USERS_EDIT',
'cap_edit' : 'USERS_EDIT',
'cap_delete' : 'USERS_EDIT',
'menu_name' : _('User Communications'),
'default_sort' : ({ 'property': 'commtid' ,'direction': 'ASC' },),
'grid_view' : (
'ucommid',
MarkupColumn(
header_string=' ',
column_width=22,
column_name=_('Icon'),
column_resizable=False,
cell_class='np-nopad',
template='<img class="np-block-img" src="{grid_icon}" />'
),
'type', 'user', 'primary', 'scope',
MarkupColumn(
name='value',
header_string=_('Address'),
column_flex=3,
template='<a href="{uri}">{value}</a>'
)
),
'grid_hidden' : ('ucommid',),
'form_view' : ('type', 'user', 'primary', 'scope', 'value', 'descr'),
'extra_data' : ('grid_icon', 'uri'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new communication channel'))
}
}
)
id = Column(
'ucommid',
UInt32(),
Sequence('users_comms_ucommid_seq'),
Comment('User communication channel ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
type_id = Column(
'commtid',
UInt32(),
ForeignKey('comms_types.commtid', name='users_comms_fk_commtid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Communication channel type ID'),
nullable=False,
info={
'header_string' : _('Type'),
'column_flex' : 2,
'filter_type' : 'list',
'editor_xtype' : 'simplemodelselect'
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='users_comms_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'column_flex' : 2,
'filter_type' : 'none'
}
)
primary = Column(
NPBoolean(),
Comment('Primary flag'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Primary')
}
)
scope = Column(
ContactInfoType.db_type(),
Comment('Channel scope'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Type')
}
)
value = Column(
Unicode(255),
Comment('Channel address value'),
nullable=False,
info={
'header_string' : _('Address'),
'column_flex' : 3
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Communication channel description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
def __str__(self):
return '%s: %s' % (
str(self.type),
str(self.user)
)
def uri(self, req):
if self.type and self.value:
return self.type.format_uri(self.value)
def grid_icon(self, req):
if self.type:
return self.type.grid_icon(req)
class DAVLock(Base):
"""
Persistent locking primitive used in DAV access.
"""
SCOPE_SHARED = 0
SCOPE_EXCLUSIVE = 1
__tablename__ = 'dav_locks'
__table_args__ = (
Comment('DAV locks'),
Index('dav_locks_i_uid', 'uid'),
Index('dav_locks_i_token', 'token'),
Index('dav_locks_i_timeout', 'timeout'),
Index('dav_locks_i_fileid', 'fileid'),
Index('dav_locks_i_uri', 'uri', mysql_length=255),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8'
}
)
id = Column(
'dlid',
UInt32(),
Sequence('dav_locks_dlid_seq'),
Comment('DAV lock ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='dav_locks_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Owner\'s user ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('User')
}
)
file_id = Column(
'fileid',
UInt32(),
ForeignKey('files_def.fileid', name='dav_locks_fk_fileid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Linked file ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('File')
}
)
timeout = Column(
TIMESTAMP(),
Comment('Lock timeout'),
nullable=True,
default=None,
# server_default=text('NULL'),
info={
'header_string' : _('Timeout')
}
)
creation_time = Column(
'ctime',
TIMESTAMP(),
Comment('Creation timestamp'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Created')
}
)
token = Column(
Unicode(100),
Comment('Lock token'),
nullable=False,
info={
'header_string' : _('Token')
}
)
owner = Column(
Unicode(100),
Comment('Lock owner'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Owner')
}
)
scope = Column(
Int8(),
Comment('Lock scope'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Scope')
}
)
depth = Column(
Int8(),
Comment('Lock depth'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Depth')
}
)
uri = Column(
Unicode(1000),
Comment('Lock URI'),
nullable=False,
info={
'header_string' : _('URI')
}
)
@classmethod
def find(cls, path, children=False):
sess = DBSession()
full_path = '/'.join(path)
q = sess.query(DAVLock).filter(or_(
DAVLock.timeout == None,
DAVLock.timeout > func.now()
))
alter = [DAVLock.uri == full_path]
for i in range(len(path) - 1):
alter.append(and_(
DAVLock.depth != 0,
DAVLock.uri == '/'.join(path[:i + 1])
))
if children:
alter.append(DAVLock.uri.startswith(full_path + '/'))
return q.filter(or_(*alter))
def get_dav_scope(self):
if self.scope == self.SCOPE_SHARED:
return dprops.SHARED
if self.scope == self.SCOPE_EXCLUSIVE:
return dprops.EXCLUSIVE
raise ValueError('Invalid lock scope: %r' % self.scope)
def test_token(self, value):
if ('opaquelocktoken:%s' % self.token) == value:
return True
return False
def refresh(self, new_td=None):
old_td = None
if self.creation_time and self.timeout:
old_td = self.timeout - self.creation_time
self.creation_time = dt.datetime.now()
if new_td:
self.timeout = self.creation_time + dt.timedelta(seconds=new_td)
elif old_td:
self.timeout = self.creation_time + old_td
else:
self.timeout = self.creation_time + dt.timedelta(seconds=1800)
return old_td
class FileMeta(Mutable, dict):
@classmethod
def coerce(cls, key, value):
if not isinstance(value, FileMeta):
if isinstance(value, dict):
return FileMeta(value)
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
dict.__delitem__(self, key)
self.changed()
def __getstate__(self):
return dict(self)
def __setstate__(self, st):
self.update(st)
def get_prop(self, name):
return self['p'][name]
def get_props(self):
if 'p' not in self:
return dict()
return self['p']
def set_prop(self, name, value):
if 'p' not in self:
self['p'] = {}
self['p'][name] = value
self.changed()
def del_prop(self, name):
if ('p' not in self) or (name not in self['p']):
return
del self['p'][name]
if len(self['p']) == 0:
del self['p']
self.changed()
class FileFolderAccessRule(DeclEnum):
private = 'private', _('Owner-only access'), 10
group = 'group', _('Group-only access'), 20
public = 'public', _('Public access'), 30
@implementer(IDAVCollection)
class FileFolder(Base):
"""
NetProfile VFS folder definition.
"""
__tablename__ = 'files_folders'
__table_args__ = (
Comment('File folders'),
Index('files_folders_u_folder', 'parentid', 'name', unique=True),
Index('files_folders_i_uid', 'uid'),
Index('files_folders_i_gid', 'gid'),
Trigger('before', 'insert', 't_files_folders_bi'),
Trigger('before', 'update', 't_files_folders_bu'),
Trigger('after', 'insert', 't_files_folders_ai'),
Trigger('after', 'update', 't_files_folders_au'),
Trigger('after', 'delete', 't_files_folders_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'FILES_LIST',
'cap_create' : 'FILES_UPLOAD',
'cap_edit' : 'FILES_EDIT',
'cap_delete' : 'FILES_DELETE',
'menu_name' : _('Folders'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('ffid', 'name', 'parent', 'ctime', 'mtime'),
'grid_hidden' : ('ffid', 'parent'),
'form_view' : ('name', 'user', 'group', 'rights', 'ctime', 'mtime', 'descr'),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'extra_data' : ('allow_read', 'allow_write', 'allow_traverse')
}
}
)
id = Column(
'ffid',
UInt32(),
Sequence('files_folders_ffid_seq'),
Comment('File folder ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
parent_id = Column(
'parentid',
UInt32(),
ForeignKey('files_folders.ffid', name='files_folders_fk_parentid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Parent folder ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Parent')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='files_folders_fk_uid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Owner\'s user ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('User'),
'editor_config' : { 'allowBlank' : False }
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='files_folders_fk_gid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Owner\'s group ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Group'),
'editor_config' : { 'allowBlank' : False }
}
)
rights = Column(
UInt32(),
Comment('Rights bitmask'),
nullable=False,
default=F_DEFAULT_DIRS,
server_default=text(str(F_DEFAULT_DIRS)),
info={
'header_string' : _('Rights'),
'editor_xtype' : 'filerights',
'editor_config' : { 'isDirectory' : True }
}
)
access = Column(
FileFolderAccessRule.db_type(),
Comment('Folder access rule'),
nullable=False,
default=FileFolderAccessRule.public,
server_default=FileFolderAccessRule.public,
info={
'header_string' : _('Access Rule')
}
)
creation_time = Column(
'ctime',
TIMESTAMP(),
Comment('Creation timestamp'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Created')
}
)
modification_time = Column(
'mtime',
TIMESTAMP(),
Comment('Last modification timestamp'),
CurrentTimestampDefault(on_update=True),
nullable=False,
# default=zzz,
info={
'header_string' : _('Modified')
}
)
name = Column(
ExactUnicode(255),
Comment('Folder name'),
nullable=False,
info={
'header_string' : _('Name')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Folder description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
meta = Column(
FileMeta.as_mutable(PickleType),
Comment('Serialized meta-data'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Metadata')
}
)
files = relationship(
'File',
backref='folder',
cascade='all, delete-orphan',
passive_deletes=True
)
subfolders = relationship(
'FileFolder',
backref=backref('parent', remote_side=[id])
)
root_groups = relationship(
'Group',
backref='root_folder',
primaryjoin='FileFolder.id == Group.root_folder_id'
)
@classmethod
def __augment_create__(cls, sess, obj, values, req):
u = req.user
root_ff = u.group.effective_root_folder
if 'parentid' in values:
pid = values['parentid']
if pid is None:
if not u.root_writable:
return False
else:
try:
pid = int(pid)
except (TypeError, ValueError):
return False
parent = sess.query(FileFolder).get(pid)
if parent is None:
return False
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
if root_ff and (not parent.is_inside(root_ff)):
return False
elif root_ff or not u.root_writable:
return False
return True
@classmethod
def __augment_update__(cls, sess, obj, values, req):
u = req.user
if not obj.can_write(u):
return False
parent = obj.parent
if parent:
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
root_ff = u.group.effective_root_folder
if root_ff and (not obj.is_inside(root_ff)):
return False
if (not root_ff) and (not u.root_writable):
return False
if 'parentid' in values:
pid = values['parentid']
if pid is None:
if not u.root_writable:
return False
else:
try:
pid = int(pid)
except (TypeError, ValueError):
return False
new_parent = sess.query(FileFolder).get(pid)
if new_parent is None:
return False
if (not new_parent.can_write(u)) or (not new_parent.can_traverse_path(u)):
return False
if root_ff and (not new_parent.is_inside(root_ff)):
return False
return True
@classmethod
def __augment_delete__(cls, sess, obj, values, req):
u = req.user
if not obj.can_write(u):
return False
parent = obj.parent
if parent:
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
root_ff = u.group.effective_root_folder
if root_ff and (not obj.is_inside(root_ff)):
return False
if (not parent) and (not u.root_writable):
return False
# Extra precaution
if obj.user != u:
return False
return True
@property
def __name__(self):
return self.name
def __iter__(self):
for t in self.subfolders:
yield t.name
for t in self.files:
yield t.filename
def __getitem__(self, name):
sess = DBSession()
try:
f = sess.query(FileFolder).filter(FileFolder.parent == self, FileFolder.name == name).one()
except NoResultFound:
try:
f = sess.query(File).filter(File.folder == self, File.filename == name).one()
except NoResultFound:
raise KeyError('No such file or directory')
f.__req__ = getattr(self, '__req__', None)
f.__plugin__ = getattr(self, '__plugin__', None)
f.__parent__ = self
return f
@property
def __acl__(self):
rights = self.rights
if self.user:
ff_user = 'u:%s' % self.user.login
else:
ff_user = 'u:'
if self.group:
ff_group = 'g:%s' % self.group.name
else:
ff_group = 'g:'
can_access_u = None
can_access_g = None
can_access_o = None
for pacl in self.__parent__.__acl__:
if pacl[2] == 'access':
if pacl[1] == ff_user:
can_access_u = (True if (pacl[0] == Allow) else False)
elif pacl[1] == ff_group:
can_access_g = (True if (pacl[0] == Allow) else False)
elif pacl[1] == Everyone:
can_access_o = (True if (pacl[0] == Allow) else False)
if can_access_g is None:
can_access_g = can_access_o
if can_access_u is None:
can_access_u = can_access_o
return (
(Allow if ((rights & F_OWNER_EXEC) and can_access_u) else Deny, ff_user, 'access'),
(Allow if ((rights & F_OWNER_READ) and can_access_u) else Deny, ff_user, 'read'),
(Allow if ((rights & F_OWNER_WRITE) and can_access_u) else Deny, ff_user, 'write'),
(Allow if ((rights & F_OWNER_EXEC) and can_access_u) else Deny, ff_user, 'execute'),
(Allow if ((rights & F_OWNER_WRITE) and can_access_u) else Deny, ff_user, 'create'),
(Allow if ((rights & F_OWNER_WRITE) and can_access_u) else Deny, ff_user, 'delete'),
(Allow if ((rights & F_GROUP_EXEC) and can_access_g) else Deny, ff_group, 'access'),
(Allow if ((rights & F_GROUP_READ) and can_access_g) else Deny, ff_group, 'read'),
(Allow if ((rights & F_GROUP_WRITE) and can_access_g) else Deny, ff_group, 'write'),
(Allow if ((rights & F_GROUP_EXEC) and can_access_g) else Deny, ff_group, 'execute'),
(Allow if ((rights & F_GROUP_WRITE) and can_access_g) else Deny, ff_group, 'create'),
(Allow if ((rights & F_GROUP_WRITE) and can_access_g) else Deny, ff_group, 'delete'),
(Allow if ((rights & F_OTHER_EXEC) and can_access_o) else Deny, Everyone, 'access'),
(Allow if ((rights & F_OTHER_READ) and can_access_o) else Deny, Everyone, 'read'),
(Allow if ((rights & F_OTHER_WRITE) and can_access_o) else Deny, Everyone, 'write'),
(Allow if ((rights & F_OTHER_EXEC) and can_access_o) else Deny, Everyone, 'execute'),
(Allow if ((rights & F_OTHER_WRITE) and can_access_o) else Deny, Everyone, 'create'),
(Allow if ((rights & F_OTHER_WRITE) and can_access_o) else Deny, Everyone, 'delete'),
DENY_ALL
)
@property
def dav_owner(self):
return self.user
@property
def dav_group(self):
return self.group
def dav_acl(self, req):
if self.user:
ff_user = 'u:%s' % self.user.login
else:
ff_user = 'u:'
if self.group:
ff_group = 'g:%s' % self.group.name
else:
ff_group = 'g:'
owner_y = []
group_y = []
other_y = []
for ace in self.__acl__:
if ace[0] != Allow:
continue
bucket = None
if ace[1] == ff_user:
bucket = owner_y
elif ace[1] == ff_group:
bucket = group_y
elif ace[1] == Everyone:
bucket = other_y
if bucket is None:
continue
if ace[2] == 'read':
bucket.append(dprops.ACL_READ)
elif ace[2] == 'write':
bucket.extend((
dprops.ACL_WRITE,
dprops.ACL_WRITE_CONTENT,
dprops.ACL_WRITE_PROPERTIES
))
elif ace[2] == 'create':
bucket.append(dprops.ACL_BIND)
elif ace[2] == 'delete':
bucket.append(dprops.ACL_UNBIND)
# TODO: access, execute
aces = []
if len(owner_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.PROPERTY, prop=dprops.OWNER),
grant=owner_y,
protected=True
))
if len(group_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.PROPERTY, prop=dprops.GROUP),
grant=group_y,
protected=True
))
if len(other_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.ALL),
grant=other_y,
protected=True
))
return DAVACLValue(aces)
def get_uri(self):
p = getattr(self, '__parent__', None)
if p is None:
p = self.parent
if p is None:
return [ '', 'fs', self.name ]
uri = p.get_uri()
uri.append(self.name)
return uri
def dav_props(self, pset):
ret = {}
if dprops.RESOURCE_TYPE in pset:
ret[dprops.RESOURCE_TYPE] = DAVResourceTypeValue(dprops.COLLECTION)
if dprops.CREATION_DATE in pset:
ret[dprops.CREATION_DATE] = self.creation_time
if dprops.DISPLAY_NAME in pset:
ret[dprops.DISPLAY_NAME] = self.name
if dprops.LAST_MODIFIED in pset:
ret[dprops.LAST_MODIFIED] = self.modification_time
if dprops.IS_COLLECTION in pset:
ret[dprops.IS_COLLECTION] = '1'
if dprops.IS_FOLDER in pset:
ret[dprops.IS_FOLDER] = 't'
if dprops.IS_HIDDEN in pset:
ret[dprops.IS_HIDDEN] = '0'
if isinstance(pset, DAVAllPropsSet):
ret.update(self.get_props())
else:
custom = pset.difference(dprops.RO_PROPS)
for cprop in custom:
try:
ret[cprop] = self.get_prop(cprop)
except KeyError:
pass
return ret
def dav_props_set(self, pdict):
pset = set(pdict)
custom = pset.difference(dprops.RO_PROPS)
for cprop in custom:
if pdict[cprop] is None:
self.del_prop(cprop)
else:
self.set_prop(cprop, pdict[cprop])
return True
def get_prop(self, name):
if not self.meta:
self.meta = FileMeta()
return self.meta.get_prop(name)
def get_props(self):
if not self.meta:
self.meta = FileMeta()
return self.meta.get_props()
def set_prop(self, name, value):
if not self.meta:
self.meta = FileMeta()
return self.meta.set_prop(name, value)
def del_prop(self, name):
if not self.meta:
self.meta = FileMeta()
return self.meta.del_prop(name)
def dav_create(self, req, name, rtype=None, props=None, data=None):
# TODO: externalize type resolution
user = req.user
sess = DBSession()
if rtype and (dprops.COLLECTION in rtype):
obj = FileFolder(
user_id=user.id,
group_id=user.group_id,
name=name,
parent=self,
rights=F_DEFAULT_DIRS
)
sess.add(obj)
else:
obj = File(
user_id=user.id,
group_id=user.group_id,
filename=name,
name=name,
folder=self,
rights=F_DEFAULT_FILES
)
sess.add(obj)
if props and (dprops.CONTENT_TYPE in props):
obj.mime_type = props[dprops.CONTENT_TYPE]
if data is not None:
# TODO: detect type of data (fd / buffer)
obj.set_from_file(data, user, sess)
if props:
if dprops.CREATION_DATE in props:
obj.creation_time = props[dprops.CREATION_DATE]
if dprops.LAST_MODIFIED in props:
obj.modification_time = props[dprops.LAST_MODIFIED]
return obj
def dav_append(self, req, ctx, name):
if isinstance(ctx, File):
ctx.folder = self
ctx.filename = name
elif isinstance(ctx, FileFolder):
if self.is_inside(ctx):
raise ValueError('Infinite folder loop detected.')
ctx.parent = self
ctx.name = name
def dav_clone(self, req):
# TODO: clone meta
obj = FileFolder(
parent_id=None,
name=self.name,
user_id=self.user_id,
group_id=self.group_id,
rights=self.rights,
access=self.access,
description=self.description
)
return obj
@property
def dav_children(self):
for t in itertools.chain(self.subfolders, self.files):
t.__req__ = getattr(self, '__req__', None)
t.__plugin__ = getattr(self, '__plugin__', None)
t.__parent__ = self
yield t
def allow_read(self, req):
return self.can_read(req.user)
def allow_write(self, req):
return self.can_write(req.user)
def allow_traverse(self, req):
return self.can_traverse_path(req.user)
def can_read(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_READ)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_READ)
return bool(self.rights & F_OTHER_READ)
def can_write(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_WRITE)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_WRITE)
return bool(self.rights & F_OTHER_WRITE)
def can_traverse(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_EXEC)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_EXEC)
return bool(self.rights & F_OTHER_EXEC)
def can_traverse_path(self, user):
if not self.can_traverse(user):
return False
if user.group and (self == user.group.effective_root_folder):
return True
if self.parent:
return self.parent.can_traverse_path(user)
return True
def is_inside(self, cont):
par = self
while par:
if par.id == cont.id:
return True
par = par.parent
return False
def __str__(self):
return '%s' % str(self.name)
_BLOCK_SIZE = 4096 * 64 # 256K
_CHUNK_SIZE = 1024 * 1024 * 2 # 2M
class WindowFileIter(FileIter):
def __init__(self, f, block_size=_BLOCK_SIZE, window=None):
super(WindowFileIter, self).__init__(f, block_size)
self.window = window
def next(self):
if self.window is None:
return super(WindowFileIter, self).next()
if self.window <= 0:
raise StopIteration
to_read = self.block_size
if to_read > self.window:
to_read = self.window
val = self.file.read(to_read)
if not val:
raise StopIteration
self.window -= len(val)
return val
__next__ = next
class FileResponse(Response):
def __init__(self, obj, request=None, cache_max_age=None, content_encoding=None):
super(FileResponse, self).__init__(conditional_response=True)
self.last_modified = obj.modification_time
self.content_type = obj.plain_mime_type
self.charset = obj.mime_charset
self.allow = ('GET', 'HEAD')
self.vary = ('Cookie',)
# TODO: self.cache_control
self.accept_ranges = 'bytes'
self.headerlist.append(('X-Frame-Options', 'SAMEORIGIN'))
if PY3:
self.content_disposition = \
'attachment; filename*=UTF-8\'\'%s' % (
urllib.parse.quote(obj.filename, '')
)
else:
self.content_disposition = \
'attachment; filename*=UTF-8\'\'%s' % (
urllib.quote(obj.filename.encode(), '')
)
self.etag = obj.etag
self.content_encoding = content_encoding
cr = None
if request.range and (self in request.if_range) and (',' not in request.headers.get('Range')):
cr = request.range.content_range(length=obj.size)
if cr:
self.status = 206
self.content_range = cr
elif obj.size:
self.content_range = (0, obj.size, obj.size)
if request.range and ('If-Range' not in request.headers):
self.status = 416
self.content_range = 'bytes */%d' % obj.size
if request.method != 'HEAD':
bio = None
app_iter = None
data = obj.data
if data is None:
bio = obj.open('r')
if cr:
bio.seek(cr.start)
self.app_iter = WindowFileIter(bio, _BLOCK_SIZE, cr.stop - cr.start)
else:
if request is not None:
environ = request.environ
if 'wsgi.file_wrapper' in environ:
app_iter = environ['wsgi.file_wrapper'](bio, _BLOCK_SIZE)
if app_iter is None:
app_iter = FileIter(bio, _BLOCK_SIZE)
self.app_iter = app_iter
else:
if cr:
bio = io.BytesIO(obj.data[cr.start:cr.stop])
else:
bio = io.BytesIO(obj.data)
if request is not None:
environ = request.environ
if 'wsgi.file_wrapper' in environ:
app_iter = environ['wsgi.file_wrapper'](bio, _BLOCK_SIZE)
if app_iter is None:
app_iter = FileIter(bio, _BLOCK_SIZE)
self.app_iter = app_iter
if cr:
self.content_length = (cr.stop - cr.start)
else:
self.content_length = obj.size
if cache_max_age is not None:
self.cache_expires = cache_max_age
_re_charset = re.compile(r'charset=([\w\d_-]+)')
@implementer(IDAVFile)
class File(Base):
"""
NetProfile VFS file definition.
"""
__tablename__ = 'files_def'
__table_args__ = (
Comment('Stored files'),
Index('files_def_u_file', 'ffid', 'fname', unique=True),
Index('files_def_i_uid', 'uid'),
Index('files_def_i_gid', 'gid'),
Index('files_def_i_ffid', 'ffid'),
Trigger('before', 'insert', 't_files_def_bi'),
Trigger('before', 'update', 't_files_def_bu'),
Trigger('after', 'insert', 't_files_def_ai'),
Trigger('after', 'update', 't_files_def_au'),
Trigger('after', 'delete', 't_files_def_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_read' : 'FILES_LIST',
'cap_create' : 'FILES_UPLOAD',
'cap_edit' : 'FILES_EDIT',
'cap_delete' : 'FILES_DELETE',
'menu_name' : _('Files'),
'default_sort' : ({ 'property': 'fname' ,'direction': 'ASC' },),
'grid_view' : ('fileid', 'folder', 'fname', 'size', 'ctime', 'mtime'),
'grid_hidden' : ('fileid',),
'form_view' : ('fname', 'folder', 'size', 'user', 'group', 'rights', 'ctime', 'mtime', 'name', 'descr'),
'easy_search' : ('fname', 'name'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'extra_data' : ('allow_access', 'allow_read', 'allow_write', 'allow_execute')
}
}
)
id = Column(
'fileid',
UInt32(),
Sequence('files_def_fileid_seq'),
Comment('File ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
folder_id = Column(
'ffid',
UInt32(),
ForeignKey('files_folders.ffid', name='files_def_fk_ffid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Parent folder ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Folder'),
'filter_type' : 'list',
'column_flex' : 1
}
)
filename = Column(
'fname',
ExactUnicode(255),
Comment('File name'),
nullable=False,
info={
'header_string' : _('Filename'),
'column_flex' : 2
}
)
name = Column(
Unicode(255),
Comment('Human-readable file name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 2
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='files_def_fk_uid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Owner\'s user ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('User'),
'editor_config' : { 'allowBlank' : False }
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='files_def_fk_gid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Owner\'s group ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Group'),
'editor_config' : { 'allowBlank' : False }
}
)
rights = Column(
UInt32(),
Comment('Rights bitmask'),
nullable=False,
default=F_DEFAULT_FILES,
server_default=text(str(F_DEFAULT_FILES)),
info={
'header_string' : _('Rights'),
'editor_xtype' : 'filerights'
}
)
mime_type = Column(
'mime',
ASCIIString(255),
Comment('MIME type of the file'),
nullable=False,
default='application/octet-stream',
server_default='application/octet-stream',
info={
'header_string' : _('Type')
}
)
size = Column(
UInt32(),
Comment('File size (in bytes)'),
nullable=False,
info={
'header_string' : _('Size'),
'read_only' : True
}
)
creation_time = Column(
'ctime',
TIMESTAMP(),
Comment('Creation timestamp'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Created'),
'read_only' : True
}
)
modification_time = Column(
'mtime',
TIMESTAMP(),
Comment('Last modification timestamp'),
CurrentTimestampDefault(on_update=True),
nullable=False,
# default=zzz,
info={
'header_string' : _('Modified'),
'read_only' : True
}
)
etag = Column(
ASCIIString(255),
Comment('Generated file ETag'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('E-Tag'),
'read_only' : True
}
)
read_count = Column(
'rcount',
UInt32(),
Comment('Current read count'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Read Count')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('File description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
meta = Column(
FileMeta.as_mutable(PickleType),
Comment('Serialized meta-data'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Metadata')
}
)
data = deferred(Column(
LargeBLOB(),
Comment('Actual file data'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Data')
}
))
chunks = relationship(
'FileChunk',
backref=backref('file', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
locks = relationship(
'DAVLock',
backref='file',
cascade='all, delete-orphan',
passive_deletes=True
)
@classmethod
def __augment_create__(cls, sess, obj, values, req):
u = req.user
root_ff = u.group.effective_root_folder
if 'ffid' in values:
ffid = values['ffid']
if ffid is None:
if not u.root_writable:
return False
else:
try:
ffid = int(ffid)
except (TypeError, ValueError):
return False
parent = sess.query(FileFolder).get(ffid)
if parent is None:
return False
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
if root_ff and (not parent.is_inside(root_ff)):
return False
elif root_ff or not u.root_writable:
return False
return True
@classmethod
def __augment_update__(cls, sess, obj, values, req):
u = req.user
if not obj.can_write(u):
return False
parent = obj.folder
if parent:
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
root_ff = u.group.effective_root_folder
if root_ff and (not obj.is_inside(root_ff)):
return False
if (not root_ff) and (not u.root_writable):
return False
if 'ffid' in values:
ffid = values['ffid']
if ffid is None:
if not u.root_writable:
return False
else:
try:
ffid = int(ffid)
except (TypeError, ValueError):
return False
new_parent = sess.query(FileFolder).get(ffid)
if new_parent is None:
return False
if (not new_parent.can_write(u)) or (not new_parent.can_traverse_path(u)):
return False
if root_ff and (not new_parent.is_inside(root_ff)):
return False
return True
@classmethod
def __augment_delete__(cls, sess, obj, values, req):
u = req.user
if not obj.can_write(u):
return False
parent = obj.folder
if parent:
if (not parent.can_write(u)) or (not parent.can_traverse_path(u)):
return False
root_ff = u.group.effective_root_folder
if root_ff and (not obj.is_inside(root_ff)):
return False
if (not parent) and (not u.root_writable):
return False
return True
@property
def plain_mime_type(self):
return self.mime_type.split(';')[0]
@property
def mime_class(self):
return self.mime_type.split('/')[0]
@property
def mime_charset(self):
if not self.mime_type:
return None
csm = _re_charset.search(self.mime_type)
if csm:
cset = csm.group(1)
if cset in {'binary', 'unknown-8bit'}:
return None
return cset
@property
def __name__(self):
return self.filename
@property
def __acl__(self):
rights = self.rights
if self.user:
ff_user = 'u:%s' % self.user.login
else:
ff_user = 'u:'
if self.group:
ff_group = 'g:%s' % self.group.name
else:
ff_group = 'g:'
can_access_u = None
can_access_g = None
can_access_o = None
for pacl in self.__parent__.__acl__:
if pacl[2] == 'access':
if pacl[1] == ff_user:
can_access_u = (True if (pacl[0] == Allow) else False)
elif pacl[1] == ff_group:
can_access_g = (True if (pacl[0] == Allow) else False)
elif pacl[1] == Everyone:
can_access_o = (True if (pacl[0] == Allow) else False)
if can_access_g is None:
can_access_g = can_access_o
if can_access_u is None:
can_access_u = can_access_o
return (
(Allow if ((rights & F_OWNER_READ) and can_access_u) else Deny, ff_user, 'access'),
(Allow if ((rights & F_OWNER_READ) and can_access_u) else Deny, ff_user, 'read'),
(Allow if ((rights & F_OWNER_WRITE) and can_access_u) else Deny, ff_user, 'write'),
(Allow if ((rights & F_OWNER_EXEC) and can_access_u) else Deny, ff_user, 'execute'),
(Allow if ((rights & F_GROUP_READ) and can_access_g) else Deny, ff_group, 'access'),
(Allow if ((rights & F_GROUP_READ) and can_access_g) else Deny, ff_group, 'read'),
(Allow if ((rights & F_GROUP_WRITE) and can_access_g) else Deny, ff_group, 'write'),
(Allow if ((rights & F_GROUP_EXEC) and can_access_g) else Deny, ff_group, 'execute'),
(Allow if ((rights & F_OTHER_READ) and can_access_o) else Deny, Everyone, 'access'),
(Allow if ((rights & F_OTHER_READ) and can_access_o) else Deny, Everyone, 'read'),
(Allow if ((rights & F_OTHER_WRITE) and can_access_o) else Deny, Everyone, 'write'),
(Allow if ((rights & F_OTHER_EXEC) and can_access_o) else Deny, Everyone, 'execute'),
DENY_ALL
)
def get_uri(self):
p = getattr(self, '__parent__', None)
if p is None:
p = self.folder
if p is None:
return [ self.filename ]
uri = p.get_uri()
uri.append(self.filename)
return uri
@property
def dav_owner(self):
return self.user
@property
def dav_group(self):
return self.group
def dav_acl(self, req):
if self.user:
ff_user = 'u:%s' % self.user.login
else:
ff_user = 'u:'
if self.group:
ff_group = 'g:%s' % self.group.name
else:
ff_group = 'g:'
owner_y = []
group_y = []
other_y = []
for ace in self.__acl__:
if ace[0] != Allow:
continue
bucket = None
if ace[1] == ff_user:
bucket = owner_y
elif ace[1] == ff_group:
bucket = group_y
elif ace[1] == Everyone:
bucket = other_y
if bucket is None:
continue
if ace[2] == 'read':
bucket.append(dprops.ACL_READ)
elif ace[2] == 'write':
bucket.extend((
dprops.ACL_WRITE,
dprops.ACL_WRITE_CONTENT,
dprops.ACL_WRITE_PROPERTIES
))
# TODO: access, execute
aces = []
if len(owner_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.PROPERTY, prop=dprops.OWNER),
grant=owner_y,
protected=True
))
if len(group_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.PROPERTY, prop=dprops.GROUP),
grant=group_y,
protected=True
))
if len(other_y):
aces.append(DAVACEValue(
DAVPrincipalValue(DAVPrincipalValue.ALL),
grant=other_y,
protected=True
))
return DAVACLValue(aces)
def dav_props(self, pset):
ret = {}
if dprops.RESOURCE_TYPE in pset:
ret[dprops.RESOURCE_TYPE] = DAVResourceTypeValue()
if dprops.CONTENT_LENGTH in pset:
ret[dprops.CONTENT_LENGTH] = self.size
if dprops.CONTENT_TYPE in pset:
ret[dprops.CONTENT_TYPE] = self.plain_mime_type
if dprops.CREATION_DATE in pset:
ret[dprops.CREATION_DATE] = self.creation_time
if dprops.DISPLAY_NAME in pset:
ret[dprops.DISPLAY_NAME] = self.filename
if dprops.ETAG in pset:
etag = None
if self.etag:
etag = '"%s"' % self.etag
ret[dprops.ETAG] = etag
if hasattr(self, '__req__'):
req = self.__req__
if dprops.EXECUTABLE in pset:
ret[dprops.EXECUTABLE] = 'T' if self.can_execute(req.user) else 'F'
if dprops.LAST_MODIFIED in pset:
ret[dprops.LAST_MODIFIED] = self.modification_time
if isinstance(pset, DAVAllPropsSet):
ret.update(self.get_props())
else:
custom = pset.difference(dprops.RO_PROPS)
for cprop in custom:
try:
ret[cprop] = self.get_prop(cprop)
except KeyError:
pass
return ret
def dav_props_set(self, pdict):
pset = set(pdict)
custom = pset.difference(dprops.RO_PROPS)
for cprop in custom:
if pdict[cprop] is None:
self.del_prop(cprop)
else:
self.set_prop(cprop, pdict[cprop])
return True
def get_prop(self, name):
if not self.meta:
self.meta = FileMeta()
return self.meta.get_prop(name)
def get_props(self):
if not self.meta:
self.meta = FileMeta()
return self.meta.get_props()
def set_prop(self, name, value):
if not self.meta:
self.meta = FileMeta()
return self.meta.set_prop(name, value)
def del_prop(self, name):
if not self.meta:
self.meta = FileMeta()
return self.meta.del_prop(name)
def dav_get(self, req):
return self.get_response(req)
def dav_put(self, req, data, start=None, length=None):
self.etag = None
if isinstance(start, int) and isinstance(length, int):
self.set_region_from_file(data, start, length, req.user)
else:
self.set_from_file(data, req.user)
def dav_clone(self, req):
# TODO: clone meta
obj = File(
folder_id=self.folder_id,
filename=self.filename,
name=self.name,
user_id=self.user_id,
group_id=self.group_id,
rights=self.rights,
mime_type=self.mime_type,
size=0,
etag=None,
description=self.description
)
obj.set_from_object(self, req.user)
return obj
def allow_access(self, req):
return self.can_access(req.user)
def allow_read(self, req):
return self.can_read(req.user)
def allow_write(self, req):
return self.can_write(req.user)
def allow_execute(self, req):
return self.can_execute(req.user)
def can_access(self, user):
if self.folder:
return self.folder.can_traverse_path(user)
return True
def can_read(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_READ)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_READ)
return bool(self.rights & F_OTHER_READ)
def can_write(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_WRITE)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_WRITE)
return bool(self.rights & F_OTHER_WRITE)
def can_execute(self, user):
if self.user_id == user.id:
return bool(self.rights & F_OWNER_EXEC)
if self.group_id in user.group_vector():
return bool(self.rights & F_GROUP_EXEC)
return bool(self.rights & F_OTHER_EXEC)
def is_inside(self, cont):
if (self.folder_id is None) and (cont is None):
return True
par = self.folder
while par:
if par.id == cont.id:
return True
par = par.parent
return False
def get_response(self, req):
return FileResponse(self, req)
def open(self, mode='r', user_perm=None, sess=None):
xm = 0
if 'r+' in mode:
xm |= _VFS_READ|_VFS_WRITE
elif 'w+' in mode:
xm |= _VFS_READ|_VFS_WRITE|_VFS_TRUNCATE
elif 'a+' in mode:
xm |= _VFS_READ|_VFS_WRITE|_VFS_APPEND
elif 'r' in mode:
xm |= _VFS_READ
elif 'w' in mode:
xm |= _VFS_WRITE
elif 'a' in mode:
xm |= _VFS_WRITE|_VFS_APPEND
if user_perm:
if (xm & _VFS_READ) and not self.can_read(user_perm):
raise IOError(errno.EACCES, 'Read access denied', self)
if (xm & _VFS_WRITE) and not self.can_write(user_perm):
raise IOError(errno.EACCES, 'Write access denied', self)
return VFSFileIO(self, xm, sess)
@validates('data')
def _set_data(self, k, v):
if v is None:
return None
ctx = hashlib.md5()
ctx.update(v)
self.etag = ctx.hexdigest()
self.size = len(v)
m = magic.get()
guessed_mime = m.buffer(v)
if guessed_mime:
self.mime_type = guessed_mime
return v
def set_from_file(self, infile, user=None, sess=None):
if sess is None:
sess = DBSession()
m = magic.get()
self.size = 0
fd = -1
buf = bytearray(_BLOCK_SIZE)
mv = memoryview(buf)
ctx = hashlib.md5()
with self.open('w+', user, sess) as fd:
while 1:
rsz = infile.readinto(buf)
if not rsz:
break
ctx.update(mv[:rsz])
fd.write(mv[:rsz])
self.etag = ctx.hexdigest()
self.data = None
infile.seek(0)
try:
fd = infile.fileno()
guessed_mime = m.descriptor(fd)
except:
guessed_mime = m.buffer(infile.read())
if guessed_mime:
self.mime_type = guessed_mime
def set_region_from_file(self, infile, start, length, user=None, sess=None):
if sess is None:
sess = DBSession()
fd = -1
buf = bytearray(_BLOCK_SIZE)
mv = memoryview(buf)
ctx = hashlib.md5()
with self.open('w+', user, sess) as fd:
fd.seek(start)
while 1:
rsz = infile.readinto(buf)
if not rsz:
break
if rsz > length:
ctx.update(mv[:length])
fd.write(mv[:length])
break
ctx.update(mv[:rsz])
fd.write(mv[:rsz])
length -= rsz
self.etag = ctx.hexdigest()
self.data = None
def set_from_object(self, infile, user=None, sess=None):
if sess is None:
sess = DBSession()
self.size = 0
self.etag = None
buf = bytearray(_BLOCK_SIZE)
mv = memoryview(buf)
with self.open('w+', user, sess) as fd:
with infile.open('r', user, sess) as infd:
while 1:
rsz = infd.readinto(buf)
if not rsz:
break
fd.write(mv[:rsz])
self.etag = infile.etag
self.data = None
self.mime_type = infile.mime_type
def get_data(self, sess=None):
if self.data:
return self.data
with self.open('r', sess=sess) as fd:
return fd.read()
def __str__(self):
return '%s' % str(self.filename)
class FileChunk(Base):
"""
Single chunk of a VFS file. Contains _CHUNK_SIZE bytes.
"""
__tablename__ = 'files_chunks'
__table_args__ = (
Comment('Stored file chunks'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : '__NOPRIV__',
'cap_edit' : '__NOPRIV__',
'cap_delete' : '__NOPRIV__'
}
}
)
file_id = Column(
'fileid',
UInt32(),
ForeignKey('files_def.fileid', name='files_chunks_fk_fileid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('File ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
offset = Column(
UInt32(),
Comment('File chunk offset'),
primary_key=True,
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Offset')
}
)
# needs deferred? maybe not
data = Column(
LargeBLOB(),
Comment('File chunk data'),
nullable=False,
info={
'header_string' : _('Data')
}
)
def get_buffer(self):
try:
return self._buf
except AttributeError:
pass
if self.data:
self._buf = bytearray(self.data)
else:
self._buf = bytearray()
return self._buf
def sync_buffer(self):
if not hasattr(self, '_buf'):
raise ValueError()
if self.file and self.file.etag:
ctx = hashlib.md5()
ctx.update(self.file.etag.encode())
ctx.update(self._buf)
self.file.etag = ctx.hexdigest()
self.data = bytes(self._buf)
class VFSFileIO(io.BufferedIOBase):
"""
VFS file handle.
"""
def __init__(self, fo, mode=_VFS_READ, sess=None):
if sess is None:
xsess = DBSession()
xsess.expunge(fo)
self.own_sess = True
else:
self.own_sess = False
# File mode internal bitmask
self._mode = mode
# DB session
self._sess = sess
# File object
self.f = fo
# Current chunk
self.c = None
# Last chunk number that we tried to load
self.last_c = None
# Set to true on chunk modification, to false on chunk load
self.mod_c = False
# Current memoryview (if it exists)
self.buf = None
# Offset in chunks
self.c_offset = 0
# Offset from chunk start
self.b_offset = 0
if self._mode & _VFS_TRUNCATE:
self.truncate(0)
@property
def sess(self):
if self._sess is None:
self._sess = DBSession()
return self._sess
@property
def name(self):
return self.f.filename
@property
def mode(self):
if self._mode & _VFS_APPEND:
if self._mode & _VFS_READ:
return 'a+'
return 'a'
if self._mode & _VFS_TRUNCATE:
return 'w+'
if self._mode & _VFS_WRITE:
if self._mode & _VFS_READ:
return 'r+'
return 'w'
if self._mode & _VFS_READ:
return 'r'
raise ValueError('Invalid file mode')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, trace):
self.close()
def _update_chunk(self):
if self.c and (self.c.offset != self.c_offset):
if (self._mode & _VFS_WRITE) and self.mod_c:
self.c.sync_buffer()
self.sess.flush(objects=(self.c,))
self.sess.expunge(self.c)
self.c = None
self.last_c = None
self.mod_c = False
if (self.c is None) and (self.last_c != self.c_offset):
self.c = self.sess.query(FileChunk).get((self.f.id, self.c_offset))
self.last_c = self.c_offset
if self.c:
if self._mode & _VFS_WRITE:
self.buf = self.c.get_buffer()
else:
self.buf = self.c.data
else:
self.buf = None
def closed(self):
return (self.f is None)
def close(self):
if self._mode & _VFS_WRITE:
self.flush()
elif self.own_sess:
self.sess.rollback()
self.buf = None
self.c = None
self.f = None
self.c_offset = 0
self.b_offset = 0
def fileno(self):
raise IOError(errno.EBADF, 'VFS objects don\'t have file descriptors', self.f)
def flush(self):
if (self._mode & _VFS_WRITE) and self.c and self.mod_c:
self.c.sync_buffer()
self.sess.flush(objects=(self.c,))
if self.own_sess:
self.sess.commit()
self.mod_c = False
def isatty(self):
return False
def seekable(self):
return True
def seek(self, off=0, whence=0):
old = self.tell()
if whence == 0:
new = off
elif whence == 1:
new = old + off
elif whence == 2:
new = self.f.size + off
else:
new = old
if (new > self.f.size) and not (self._mode & _VFS_WRITE):
new = self.f.size
self.c_offset, self.b_offset = divmod(new, _CHUNK_SIZE)
return new
def tell(self):
return self.c_offset * _CHUNK_SIZE + self.b_offset
def truncate(self, sz=None):
if sz == self.f.size:
return sz
if sz < 0:
raise IOError(errno.EINVAL, 'New file size can\'t be negative', self.f)
if not (self._mode & _VFS_WRITE):
raise IOError(errno.EBADF, 'File is not open for writing', self.f)
if sz is None:
sz = self.c_offset * _CHUNK_SIZE + self.b_offset
end_c, end_b = self.c_offset, self.b_offset
else:
end_c, end_b = divmod(sz, _CHUNK_SIZE)
cur_c, cur_b = self.c_offset, self.b_offset
if sz > self.f.size:
self.f.size = sz
return sz
if self.mod_c:
self.flush()
if end_b == 0:
self.sess.query(FileChunk) \
.filter(FileChunk.file_id == self.f.id, FileChunk.offset >= end_c) \
.delete()
else:
self.sess.query(FileChunk) \
.filter(FileChunk.file_id == self.f.id, FileChunk.offset > end_c) \
.delete()
self.c_offset = end_c
self.b_offset = end_b
self._update_chunk()
if self.c and (len(self.buf) > self.b_offset):
del self.buf[self.b_offset:]
self.mod_c = True
self.c_offset = cur_c
self.b_offset = cur_b
self.f.size = sz
return sz
def detach(self):
raise io.UnsupportedOperation(errno.EBADF, 'Can\'t detach chunk. Data will not be complete.', self.f)
def readable(self):
return (self._mode & _VFS_READ)
def read(self, maxb=-1):
if not (self._mode & _VFS_READ):
raise IOError(errno.EBADF, 'File is not open for reading', self.f)
cur_pos = self.c_offset * _CHUNK_SIZE + self.b_offset
read_sz = self.f.size - cur_pos
if maxb is None:
maxb = -1
if (maxb == 0) or (read_sz <= 0):
return b''
if (maxb > 0) and (read_sz > maxb):
read_sz = maxb
retbuf = bytearray(read_sz)
cursor = 0
while read_sz > 0:
self._update_chunk()
if self.c is None:
to_read = min(read_sz, _CHUNK_SIZE - self.b_offset)
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
continue
chunk_len = len(self.buf)
if self.b_offset >= chunk_len:
to_read = min(read_sz, _CHUNK_SIZE - self.b_offset)
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
continue
to_read = min(chunk_len - self.b_offset, read_sz)
mv = memoryview(self.buf)
retbuf[cursor:cursor + to_read] = mv[self.b_offset:self.b_offset + to_read]
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
return bytes(retbuf)
def read1(self, maxb=-1):
if not (self._mode & _VFS_READ):
raise IOError(errno.EBADF, 'File is not open for reading', self.f)
raise NotImplementedError
def readinto(self, retbuf):
if not (self._mode & _VFS_READ):
raise IOError(errno.EBADF, 'File is not open for reading', self.f)
cur_pos = self.c_offset * _CHUNK_SIZE + self.b_offset
read_sz = len(retbuf)
file_sz = self.f.size - cur_pos
if file_sz < read_sz:
read_sz = file_sz
cursor = 0
orig_read_sz = read_sz
while read_sz > 0:
self._update_chunk()
if self.c is None:
to_read = min(read_sz, _CHUNK_SIZE - self.b_offset)
retbuf[cursor:cursor + to_read] = (0 for x in range(to_read))
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
continue
chunk_len = len(self.buf)
if self.b_offset >= chunk_len:
to_read = min(read_sz, _CHUNK_SIZE - self.b_offset)
retbuf[cursor:cursor + to_read] = (0 for x in range(to_read))
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
continue
to_read = min(chunk_len - self.b_offset, read_sz)
mv = memoryview(self.buf)
retbuf[cursor:cursor + to_read] = mv[self.b_offset:self.b_offset + to_read]
read_sz -= to_read
cursor += to_read
self.b_offset += to_read
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
return orig_read_sz
def readline(self, limit=-1):
if not (self._mode & _VFS_READ):
raise IOError(errno.EBADF, 'File is not open for reading', self.f)
raise NotImplementedError
def readlines(self, hint=-1):
if not (self._mode & _VFS_READ):
raise IOError(errno.EBADF, 'File is not open for reading', self.f)
raise NotImplementedError
readall = read
def writable(self):
return (self._mode & _VFS_WRITE)
def write(self, b):
if not (self._mode & _VFS_WRITE):
raise IOError(errno.EBADF, 'File is not open for writing', self.f)
write_sz = len(b)
if write_sz == 0:
return 0
srcmv = memoryview(b)
orig_write_sz = write_sz
cur_pos = self.c_offset * _CHUNK_SIZE + self.b_offset
cursor = 0
while write_sz > 0:
self._update_chunk()
to_write = min(write_sz, _CHUNK_SIZE - self.b_offset)
if self.c is None:
self.c = FileChunk(
file=self.f,
offset=self.c_offset
)
self.buf = self.c._buf = bytearray(self.b_offset + to_write)
self.last_c = self.c_offset
self.mod_c = True
chunk_len = len(self.buf)
if chunk_len < (self.b_offset + to_write):
extend_sz = self.b_offset + to_write - chunk_len
self.buf[chunk_len:chunk_len + extend_sz] = (0 for x in range(extend_sz))
mv = memoryview(self.buf)
mv[self.b_offset:self.b_offset + to_write] = srcmv[cursor:cursor + to_write]
self.mod_c = True
write_sz -= to_write
cursor += to_write
self.b_offset += to_write
if self.b_offset >= _CHUNK_SIZE:
self.c_offset += 1
self.b_offset = 0
after_pos = self.c_offset * _CHUNK_SIZE + self.b_offset
if after_pos > self.f.size:
self.f.size = after_pos
if write_sz <= 0:
return orig_write_sz
return (orig_write_sz - write_sz)
def writelines(self, lines):
if not (self._mode & _VFS_WRITE):
raise IOError(errno.EBADF, 'File is not open for writing', self.f)
raise NotImplementedError
class Tag(Base):
"""
Generic object tag.
"""
__tablename__ = 'tags_def'
__table_args__ = (
Comment('Generic tags'),
Index('tags_def_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
# no read cap
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_name' : _('Tags'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('tagid', 'name', 'descr'),
'grid_hidden' : ('tagid',),
'easy_search' : ('name', 'descr'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' :
Wizard(
Step('name', 'descr', title=_('Tag info')),
title=_('Add new tag')
)
}
}
)
id = Column(
'tagid',
UInt32(),
Sequence('tags_def_tagid_seq'),
Comment('Tag ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Tag name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 2
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Optional tag description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description'),
'column_flex' : 3
}
)
def __str__(self):
return '%s' % str(self.name)
class LogType(Base):
"""
Audit log entry type.
"""
__tablename__ = 'logs_types'
__table_args__ = (
Comment('Log entry types'),
Index('logs_types_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB', # or leave MyISAM?
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : '__NOPRIV__',
'show_in_menu' : 'admin',
'menu_section' : _('Logging'),
'menu_name' : _('Log Types'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('ltid', 'name'),
'grid_hidden' : ('ltid',),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'ltid',
UInt32(),
Sequence('logs_types_ltid_seq', start=101, increment=1),
Comment('Log entry type ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Log entry type name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
def __str__(self):
return '%s' % str(self.name)
class LogAction(Base):
"""
Audit log action type.
"""
__tablename__ = 'logs_actions'
__table_args__ = (
Comment('Log actions'),
Index('logs_actions_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB', # or leave MyISAM?
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : '__NOPRIV__',
'show_in_menu' : 'admin',
'menu_section' : _('Logging'),
'menu_name' : _('Log Actions'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('laid', 'name'),
'grid_hidden' : ('laid',),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'laid',
UInt32(),
Sequence('logs_actions_laid_seq', start=101, increment=1),
Comment('Log action ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Log action name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
def __str__(self):
return '%s' % str(self.name)
class LogData(Base):
"""
Audit log entry.
"""
__tablename__ = 'logs_data'
__table_args__ = (
Comment('Actual system log'),
{
'mysql_engine' : 'InnoDB', # or leave MyISAM?
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : '__NOPRIV__',
'cap_delete' : '__NOPRIV__',
'show_in_menu' : 'admin',
'menu_section' : _('Logging'),
'menu_name' : _('Log Data'),
'default_sort' : ({ 'property': 'ts' ,'direction': 'DESC' },),
'grid_view' : ('logid', 'ts', 'login', 'xtype', 'xaction', 'data'),
'grid_hidden' : ('logid',),
'easy_search' : ('login', 'data'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'logid',
UInt32(),
Sequence('logs_data_logid_seq'),
Comment('Log entry ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
timestamp = Column(
'ts',
TIMESTAMP(),
Comment('Log entry timestamp'),
CurrentTimestampDefault(),
nullable=False,
# default=zzz,
info={
'header_string' : _('Time')
}
)
login = Column(
Unicode(48),
Comment('Owner\'s login string'),
nullable=False,
info={
'header_string' : _('Username')
}
)
type_id = Column(
'type',
UInt32(),
ForeignKey('logs_types.ltid', name='logs_data_fk_type', onupdate='CASCADE'),
Comment('Log entry type'),
nullable=False,
info={
'header_string' : _('Type'),
'filter_type' : 'list'
}
)
action_id = Column(
'action',
UInt32(),
ForeignKey('logs_actions.laid', name='logs_data_fk_action', onupdate='CASCADE'),
Comment('Log entry action'),
nullable=False,
info={
'header_string' : _('Action'),
'filter_type' : 'list'
}
)
data = Column(
UnicodeText(),
Comment('Additional data'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Data'),
'column_flex' : 1
}
)
xtype = relationship(
'LogType',
innerjoin=True,
backref='messages'
)
xaction = relationship(
'LogAction',
innerjoin=True,
backref='messages'
)
def __str__(self):
return '%s: [%s.%s] %s' % (
str(self.timestamp),
str(self.xtype),
str(self.xaction),
str(self.data)
)
class NPSession(Base):
"""
NetProfile administrative session.
"""
__tablename__ = 'np_sessions'
__table_args__ = (
Comment('NetProfile UI sessions'),
Index('np_sessions_i_uid', 'uid'),
Index('np_sessions_i_sname', 'sname'),
Index('np_sessions_i_lastts', 'lastts'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_name' : _('UI Sessions'),
'default_sort' : ({ 'property': 'lastts' ,'direction': 'DESC' },),
'grid_view' : ('npsid', 'sname', 'user', 'login', 'startts', 'lastts', 'ipaddr', 'ip6addr'),
'grid_hidden' : ('npsid', 'sname'),
'easy_search' : ('sname', 'login'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'npsid',
UInt32(),
Sequence('np_sessions_npsid_seq'),
Comment('NP session ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
session_name = Column(
'sname',
ASCIIString(255),
Comment('NP session hash'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 3
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='np_sessions_fk_uid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('User ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('User'),
'filter_type' : 'none',
'column_flex' : 1
}
)
login = Column(
Unicode(48),
Comment('User login as string'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Username'),
'column_flex' : 1
}
)
start_time = Column(
'startts',
TIMESTAMP(),
Comment('Start time'),
nullable=True,
default=None,
info={
'header_string' : _('Start')
}
# server_default=text('NULL')
)
last_time = Column(
'lastts',
TIMESTAMP(),
Comment('Last seen time'),
CurrentTimestampDefault(on_update=True),
nullable=True,
# default=None,
info={
'header_string' : _('Last Update')
}
)
ip_address = Column(
'ipaddr',
IPv4Address(),
Comment('Client IPv4 address'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('IPv4 Address')
}
)
ipv6_address = Column(
'ip6addr',
IPv6Address(),
Comment('Client IPv6 address'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('IPv6 Address'),
'column_flex' : 1
}
)
@property
def end_time(self):
lastts = self.last_time
if not lastts:
return None
user = self.user
if not user:
return None
secpol = user.effective_policy
if not secpol:
return None
sto = secpol.sess_timeout
if (not sto) or (sto < 30):
return None
et = lastts + dt.timedelta(seconds=sto)
return et.replace(microsecond=0)
@classmethod
def __augment_pg_query__(cls, sess, query, params, req):
lim = query._limit
if lim and (lim < 50):
return query.options(
joinedload(NPSession.user)
)
return query
def __str__(self):
return '%s' % str(self.session_name)
def update_time(self, upt=None):
if upt is None:
upt = dt.datetime.now()
self.last_time = upt
def check_request(self, req, ts=None):
user = req.user
if user != self.user:
return False
if not user.enabled:
return False
if user.state != UserState.active:
return False
secpol = user.effective_policy
if secpol and (not secpol.check_old_session(req, user, self, ts)):
return False
return True
class PasswordHistory(Base):
"""
Users' password history entry.
"""
__tablename__ = 'users_pwhistory'
__table_args__ = (
Comment('Users\' old password history'),
Index('users_pwhistory_i_uid', 'uid'),
Index('users_pwhistory_i_ts', 'ts'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
}
}
)
id = Column(
'pwhid',
UInt32(),
Sequence('users_pwhistory_pwhid_seq'),
Comment('Password history entry ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='users_pwhistory_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User')
}
)
timestamp = Column(
'ts',
TIMESTAMP(),
Comment('Time of change'),
CurrentTimestampDefault(),
nullable=False,
# default=zzz,
info={
'header_string' : _('Time')
}
)
password = Column(
'pass',
ASCIIString(255),
Comment('Old password'),
nullable=False,
info={
'header_string' : _('Password')
}
)
class GlobalSettingSection(Base):
"""
Categories for global settings.
"""
__tablename__ = 'np_globalsettings_sections'
__table_args__ = (
Comment('NetProfile UI global setting sections'),
Index('np_globalsettings_sections_u_section', 'npmodid', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('Global Setting Sections'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('npgssid', 'module', 'name', 'descr'),
'grid_hidden' : ('npgssid',),
'easy_search' : ('name', 'descr'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new section'))
}
}
)
id = Column(
'npgssid',
UInt32(),
Sequence('np_globalsettings_sections_npgssid_seq'),
Comment('Global parameter section ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
module_id = Column(
'npmodid',
UInt32(),
ForeignKey('np_modules.npmodid', name='np_globalsettings_sections_fk_npmodid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('NetProfile module ID'),
nullable=False,
info={
'header_string' : _('Module'),
'filter_type' : 'list',
'column_flex' : 1
}
)
name = Column(
Unicode(255),
Comment('Global parameter section name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Global parameter section description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description'),
'column_flex' : 2
}
)
settings = relationship(
'GlobalSetting',
backref=backref('section', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def __str__(self):
return '%s' % str(self.name)
class UserSettingSection(Base):
"""
Categories for per-user settings.
"""
__tablename__ = 'np_usersettings_sections'
__table_args__ = (
Comment('NetProfile UI user setting sections'),
Index('np_usersettings_sections_u_section', 'npmodid', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('User Setting Sections'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('npussid', 'module', 'name', 'descr'),
'grid_hidden' : ('npussid',),
'easy_search' : ('name', 'descr'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new section'))
}
}
)
id = Column(
'npussid',
UInt32(),
Sequence('np_usersettings_sections_npussid_seq'),
Comment('User parameter section ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
module_id = Column(
'npmodid',
UInt32(),
ForeignKey('np_modules.npmodid', name='np_usersettings_sections_fk_npmodid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('NetProfile module ID'),
nullable=False,
info={
'header_string' : _('Module'),
'filter_type' : 'list',
'column_flex' : 1
}
)
name = Column(
Unicode(255),
Comment('User parameter section name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
description = Column(
'descr',
UnicodeText(),
Comment('User parameter section description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description'),
'column_flex' : 2
}
)
setting_types = relationship(
'UserSettingType',
backref=backref('section', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def __str__(self):
return '%s' % str(self.name)
def get_tree_node(self, req):
loc = get_localizer(req)
return {
'id' : 'ss' + str(self.id),
'text' : loc.translate(_(self.name)),
'leaf' : True,
'iconCls' : 'ico-cog'
}
class DynamicSetting(object):
def has_constraint(self, name):
if self.constraints and (name in self.constraints):
return True
return False
def get_constraint(self, name, default=None):
if self.constraints and (name in self.constraints):
return self.constraints[name]
return default
def has_option(self, name):
if self.options and (name in self.options):
return True
return False
def get_option(self, name, default=None):
if self.options and (name in self.options):
return self.options[name]
return default
def parse_param(self, param):
if self.type == 'checkbox':
if isinstance(param, bool):
return param
if param.lower() in {'true', '1', 'on', 'yes'}:
return True
return False
cast = self.get_constraint('cast')
if cast == 'int':
return int(param)
if cast == 'float':
return float(param)
return param
def param_to_db(self, param):
param = self.parse_param(param)
if self.type == 'checkbox':
if param:
return 'true'
return 'false'
return str(param)
def get_field_cfg(self, req):
cfg = {
'xtype' : 'textfield',
'allowBlank' : self.get_constraint('nullok', False),
'name' : self.name,
'fieldLabel' : self.title,
'description' : self.description
}
if self.type == 'text':
if self.get_constraint('cast') == 'int':
cfg['xtype'] = 'numberfield'
cfg['allowDecimals'] = False
if self.has_constraint('minval'):
cfg['minValue'] = int(self.get_constraint('minval'))
if self.has_constraint('maxval'):
cfg['maxValue'] = int(self.get_constraint('maxval'))
else:
if self.has_constraint('minlen'):
cfg['minLength'] = int(self.get_constraint('minlen'))
if self.has_constraint('maxlen'):
cfg['maxLength'] = int(self.get_constraint('maxlen'))
if self.has_constraint('regex'):
cfg['regex'] = int(self.get_constraint('regex'))
if self.type == 'checkbox':
cfg.update({
'xtype' : 'checkbox',
'inputValue' : 'true',
'uncheckedValue' : 'false'
})
if self.type == 'select':
chx = []
if self.get_constraint('nullok', False):
chx.append({
'id' : '',
'value' : ''
})
opts = self.get_option('options')
if opts:
for k, v in opts.items():
chx.append({
'id' : k,
'value' : v
})
cfg.update({
'xtype' : 'combobox',
'format' : 'string',
'displayField' : 'value',
'hiddenName' : self.name,
'valueField' : 'id',
'queryMode' : 'local',
'editable' : False,
'forceSelection' : True,
'store' : {
'xtype' : 'simplestore',
'fields' : ('id', 'value'),
'data' : chx
}
})
if self.type == 'password':
cfg['xtype'] = 'passwordfield'
if self.type == 'textarea':
cfg['xtype'] = 'textareafield'
return cfg
class GlobalSetting(Base, DynamicSetting):
"""
Global application settings.
"""
__tablename__ = 'np_globalsettings_def'
__table_args__ = (
Comment('NetProfile UI global settings'),
Index('np_globalsettings_def_u_name', 'name', unique=True),
Index('np_globalsettings_def_i_npmodid', 'npmodid'),
Index('np_globalsettings_def_i_npgssid', 'npgssid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('Global Settings'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('npglobid', 'module', 'section', 'name', 'title', 'type', 'value', 'default'),
'grid_hidden' : ('npglobid',),
'easy_search' : ('name', 'title'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'npglobid',
UInt32(),
Sequence('np_globalsettings_def_npglobid_seq'),
Comment('Global parameter ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
section_id = Column(
'npgssid',
UInt32(),
ForeignKey('np_globalsettings_sections.npgssid', name='np_globalsettings_def_fk_npgssid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Global parameter section ID'),
nullable=False,
info={
'header_string' : _('Section'),
'filter_type' : 'list',
'column_flex' : 2
}
)
module_id = Column(
'npmodid',
UInt32(),
ForeignKey('np_modules.npmodid', name='np_globalsettings_def_fk_npmodid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('NetProfile module ID'),
nullable=False,
info={
'header_string' : _('Module'),
'filter_type' : 'list',
'column_flex' : 2
}
)
name = Column(
ASCIIString(255),
Comment('Global parameter name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 2
}
)
title = Column(
Unicode(255),
Comment('Global parameter title'),
nullable=False,
info={
'header_string' : _('Title'),
'column_flex' : 3
}
)
type = Column(
ASCIIString(64),
Comment('Global parameter type'),
nullable=False,
default='text',
server_default='text',
info={
'header_string' : _('Type'),
'column_flex' : 1
}
)
value = Column(
ASCIIString(255),
Comment('Global parameter current value'),
nullable=False,
info={
'header_string' : _('Value'),
'column_flex' : 3
}
)
default = Column(
ASCIIString(255),
Comment('Global parameter default value'),
nullable=True,
server_default=text('NULL'),
info={
'header_string' : _('Default'),
'column_flex' : 3
}
)
options = Column(
'opt',
PickleType(pickler=HybridPickler),
Comment('Serialized options array'),
nullable=True,
info={
'header_string' : _('Options')
}
)
dynamic_options = Column(
'dynopt',
PickleType(pickler=HybridPickler),
Comment('Serialized dynamic options array'),
nullable=True,
info={
'header_string' : _('Dynamic Options')
}
)
constraints = Column(
'constr',
PickleType(pickler=HybridPickler),
Comment('Serialized constraints array'),
nullable=True,
info={
'header_string' : _('Constraints')
}
)
client_ok = Column(
'clientok',
NPBoolean(),
Comment('OK to pass to clientside?'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Client-side')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Global parameter description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
@hybrid_property
def python_value(self):
return self.parse_param(self.value)
@python_value.setter
def python_value_set(self, value):
self.value = self.param_to_db(value)
def __str__(self):
return '%s' % str(self.name)
@cache.cache_on_arguments()
def global_setting(name):
sess = DBSession()
try:
gs = sess.query(GlobalSetting).filter(GlobalSetting.name == name).one()
except NoResultFound:
return None
if gs.value is None:
return None
return gs.python_value
def _set_gs(mapper, conn, tgt):
if tgt.name:
global_setting.set(tgt.python_value, tgt.name)
def _del_gs(mapper, conn, tgt):
if tgt.name:
global_setting.invalidate(tgt.name)
event.listen(GlobalSetting, 'after_delete', _del_gs)
event.listen(GlobalSetting, 'after_insert', _set_gs)
event.listen(GlobalSetting, 'after_update', _set_gs)
class UserSettingType(Base, DynamicSetting):
"""
Per-user application setting types.
"""
__tablename__ = 'np_usersettings_types'
__table_args__ = (
Comment('NetProfile UI user setting types'),
Index('np_usersettings_types_u_name', 'name', unique=True),
Index('np_usersettings_types_i_npmodid', 'npmodid'),
Index('np_usersettings_types_i_npussid', 'npussid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('User Setting Types'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('npustid', 'module', 'section', 'name', 'title', 'type', 'default', 'clientok'),
'grid_hidden' : ('npustid', 'clientok'),
'easy_search' : ('name', 'title'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'npustid',
UInt32(),
Sequence('np_usersettings_types_npustid_seq'),
Comment('User parameter type ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
section_id = Column(
'npussid',
UInt32(),
ForeignKey('np_usersettings_sections.npussid', name='np_usersettings_types_fk_npussid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User parameter section ID'),
nullable=False,
info={
'header_string' : _('Section'),
'filter_type' : 'list',
'column_flex' : 2
}
)
module_id = Column(
'npmodid',
UInt32(),
ForeignKey('np_modules.npmodid', name='np_usersettings_types_fk_npmodid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('NetProfile module ID'),
nullable=False,
info={
'header_string' : _('Module'),
'filter_type' : 'list',
'column_flex' : 2
}
)
name = Column(
ASCIIString(255),
Comment('User parameter name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 2
}
)
title = Column(
Unicode(255),
Comment('User parameter title'),
nullable=False,
info={
'header_string' : _('Title'),
'column_flex' : 3
}
)
type = Column(
ASCIIString(64),
Comment('User parameter type'),
nullable=False,
default='text',
server_default='text',
info={
'header_string' : _('Type'),
'column_flex' : 1
}
)
default = Column(
ASCIIString(255),
Comment('User parameter default value'),
nullable=True,
server_default=text('NULL'),
info={
'header_string' : _('Default'),
'column_flex' : 3
}
)
options = Column(
'opt',
PickleType(pickler=HybridPickler),
Comment('Serialized options array'),
nullable=True,
info={
'header_string' : _('Options')
}
)
dynamic_options = Column(
'dynopt',
PickleType(pickler=HybridPickler),
Comment('Serialized dynamic options array'),
nullable=True,
info={
'header_string' : _('Dynamic Options')
}
)
constraints = Column(
'constr',
PickleType(pickler=HybridPickler),
Comment('Serialized constraints array'),
nullable=True,
info={
'header_string' : _('Constraints')
}
)
client_ok = Column(
'clientok',
NPBoolean(),
Comment('OK to pass to clientside?'),
nullable=False,
default=True,
server_default=npbool(True),
info={
'header_string' : _('Client-side')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Global parameter description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
settings = relationship(
'UserSetting',
backref=backref('type', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
@classmethod
def __augment_pg_query__(cls, sess, query, params, req):
lim = query._limit
if lim and (lim < 50):
return query.options(
joinedload(UserSettingType.module),
joinedload(UserSettingType.section)
)
return query
def __str__(self):
return '%s' % str(self.name)
class UserSetting(Base):
"""
Per-user application settings.
"""
@classmethod
def _filter_section(cls, query, value):
if isinstance(value, int):
return query.join(UserSettingType, UserSettingSection).filter(UserSettingSection.id == value)
return query
__tablename__ = 'np_usersettings_def'
__table_args__ = (
Comment('NetProfile UI user settings'),
Index('np_usersettings_def_u_us', 'uid', 'npustid', unique=True),
Index('np_usersettings_def_i_npustid', 'npustid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('User Settings'),
'default_sort' : (),
'grid_view' : ('npusid', 'user', 'type', 'value'),
'grid_hidden' : ('npusid',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'extra_search' : (
SelectFilter('section', _filter_section,
title=_('Section'),
data='NetProfile.store.core.UserSettingSection',
value_field='npussid',
display_field='name'
),
)
}
}
)
id = Column(
'npusid',
UInt32(),
Sequence('np_usersettings_def_npusid_seq'),
Comment('User parameter ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='np_usersettings_def_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'filter_type' : 'none',
'column_flex' : 1
}
)
type_id = Column(
'npustid',
UInt32(),
ForeignKey('np_usersettings_types.npustid', name='np_usersettings_def_fk_npustid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User parameter type ID'),
nullable=False,
info={
'header_string' : _('Type'),
'filter_type' : 'list',
'column_flex' : 1
}
)
value = Column(
ASCIIString(255),
Comment('User parameter current value'),
nullable=False,
info={
'header_string' : _('Value'),
'column_flex' : 2
}
)
@property
def name(self):
return self.type.name
@hybrid_property
def python_value(self):
return self.type.parse_param(self.value)
@python_value.setter
def python_value_set(self, value):
self.value = self.type.param_to_db(value)
def __str__(self):
return '%s.%s' % (
str(self.user),
str(self.type)
)
class DataCache(Base):
"""
General purpose per-user keyed data storage.
"""
__tablename__ = 'datacache'
__table_args__ = (
Comment('Data cache'),
Index('datacache_u_dc', 'uid', 'dcname', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ADMIN',
'cap_read' : 'BASE_ADMIN',
'cap_create' : 'BASE_ADMIN',
'cap_edit' : 'BASE_ADMIN',
'cap_delete' : 'BASE_ADMIN',
'show_in_menu' : 'admin',
'menu_section' : _('Settings'),
'menu_name' : _('Data Cache'),
'default_sort' : (),
'grid_view' : ('dcid', 'user', 'dcname'),
'grid_hidden' : ('dcid',),
'form_view' : ('user', 'dcname', 'dcvalue'),
'easy_search' : ('dcname',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'dcid',
UInt32(),
Sequence('datacache_dcid_seq'),
Comment('Data cache ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='datacache_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Data cache owner'),
nullable=False,
info={
'header_string' : _('User'),
'column_flex' : 1
}
)
name = Column(
'dcname',
ASCIIString(32),
Comment('Data cache name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
value = Column(
'dcvalue',
PickleType(),
Comment('Data cache value'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Value')
}
)
def __str__(self):
return '%s' % str(self.name)
_calendar_styles = {
1 : '#fa7166',
2 : '#cf2424',
3 : '#a01a1a',
4 : '#7e3838',
5 : '#ca7609',
6 : '#f88015',
7 : '#eda12a',
8 : '#d5b816',
9 : '#e281ca',
10 : '#bf53a4',
11 : '#9d3283',
12 : '#7a0f60',
13 : '#542382',
14 : '#7742a9',
15 : '#8763ca',
16 : '#b586e2',
17 : '#7399f9',
18 : '#4e79e6',
19 : '#2951b9',
20 : '#133897',
21 : '#1a5173',
22 : '#1a699c',
23 : '#3694b7',
24 : '#64b9d9',
25 : '#a8c67b',
26 : '#83ad47',
27 : '#2e8f0c',
28 : '#176413',
29 : '#0f4c30',
30 : '#386651',
31 : '#3ea987',
32 : '#7bc3b5'
}
class CalendarAccess(DeclEnum):
"""
Calendar access ENUM.
"""
none = 'N', _('None'), 10
read_only = 'RO', _('Read-only'), 20
read_write = 'RW', _('Read-write'), 30
class Calendar(Base):
"""
Event calendar owned by a user.
"""
__tablename__ = 'calendars_def'
__table_args__ = (
Comment('User calendars'),
Index('calendars_def_u_cal', 'uid', 'name', unique=True),
Index('calendars_def_i_gid', 'gid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'menu_name' : _('My Calendars'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('calid', 'name', 'user', 'group', 'group_access', 'global_access'),
'grid_hidden' : ('calid', 'user'),
'form_view' : ('name', 'group', 'group_access', 'global_access', 'style', 'descr'),
'easy_search' : ('name', 'descr'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new calendar'))
}
}
)
id = Column(
'calid',
UInt32(),
Sequence('calendars_def_calid_seq', start=101, increment=1),
Comment('Calendar ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='calendars_def_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'read_only' : True,
'filter_type' : 'none'
}
)
group_id = Column(
'gid',
UInt32(),
ForeignKey('groups.gid', name='calendars_def_fk_gid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Group ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Group'),
'filter_type' : 'none',
'column_flex' : 2
}
)
name = Column(
Unicode(255),
Comment('Calendar name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 3
}
)
group_access = Column(
CalendarAccess.db_type(),
Comment('Calendar access rule for owner group'),
nullable=False,
default=CalendarAccess.none,
server_default=CalendarAccess.none,
info={
'header_string' : _('Group Access'),
'column_flex' : 2
}
)
global_access = Column(
CalendarAccess.db_type(),
Comment('Calendar access rule for everyone not in group'),
nullable=False,
default=CalendarAccess.none,
server_default=CalendarAccess.none,
info={
'header_string' : _('Global Access'),
'column_flex' : 2
}
)
style = Column(
UInt32(),
Comment('Calendar style code'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Style'),
'min_value' : 0,
'max_value' : len(_calendar_styles),
'editor_xtype' : 'calendarcolor'
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Calendar description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
events = relationship(
'Event',
backref=backref('calendar', innerjoin=True, lazy='joined'),
cascade='all, delete-orphan',
passive_deletes=True
)
imports = relationship(
'CalendarImport',
backref=backref('calendar', innerjoin=True, lazy='joined'),
cascade='all, delete-orphan',
passive_deletes=True
)
def can_read(self, user):
if self.user_id == user.id:
return True
if (self.group_id is not None) and (self.group_id == user.group.id):
return (self.group_access != CalendarAccess.none)
return (self.global_access != CalendarAccess.none)
def can_write(self, user):
if self.user_id == user.id:
return True
if (self.group_id is not None) and (self.group_id == user.group.id):
return (self.group_access == CalendarAccess.read_write)
return (self.global_access == CalendarAccess.read_write)
def __str__(self):
return '%s' % str(self.name)
@classmethod
def __augment_query__(cls, sess, query, params, req):
query = query.filter(Calendar.user_id == req.user.id)
return query
@classmethod
def __augment_create__(cls, sess, obj, values, req):
obj.user_id = req.user.id
return True
@classmethod
def __augment_update__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
return False
@classmethod
def __augment_delete__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
return False
def _wizfld_import_cal(fld, model, req, **kwargs):
return {
'xtype' : 'combobox',
'allowBlank' : False,
'name' : 'caldef',
'format' : 'string',
'displayField' : 'Title',
'valueField' : 'CalendarId',
'hiddenName' : 'caldef',
'editable' : False,
'forceSelection' : True,
'store' : {
'type' : 'direct',
'model' : 'Extensible.calendar.data.CalendarModel',
'directFn' : 'NetProfile.api.Calendar.cal_avail',
'totalProperty' : 'total',
'rootProperty' : 'calendars'
},
'fieldLabel' : _('Calendar'),
'tpl' : '<tpl for="."><div class="x-boundlist-item">{Owner}: {Title}</div></tpl>'
}
def _wizcb_import_cal_submit(wiz, em, step, act, val, req):
if ('caldef' not in val) or (val['caldef'][:5] != 'user-'):
raise ValueError
cal_id = int(val['caldef'][5:])
sess = DBSession()
cal = sess.query(Calendar).get(cal_id)
if (not cal) or (not cal.can_read(req.user)):
raise ValueError
imp = CalendarImport()
imp.user = req.user
imp.calendar = cal
name = val.get('name')
if name:
imp.name = name
try:
style = int(val.get('style'))
if 0 < style <= len(_calendar_styles):
imp.style = style
except (TypeError, ValueError):
pass
sess.add(imp)
return {
'do' : 'close',
'reload' : True
}
class CalendarImport(Base):
"""
Represents a shared calendar which is imported to other user's namespace.
"""
__tablename__ = 'calendars_imports'
__table_args__ = (
Comment('User calendar imports'),
Index('calendars_imports_u_import', 'uid', 'calid', unique=True),
Index('calendars_imports_i_calid', 'calid'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'menu_name' : _('Other Calendars'),
'default_sort' : ({ 'property': 'calid' ,'direction': 'ASC' },),
'grid_view' : (
'calimpid',
'calendar',
MarkupColumn(
name='real_name',
header_string=_('Name'),
column_flex=3,
template='{real_name}'
)
),
'grid_hidden' : ('calimpid',),
'form_view' : ('calendar', 'name', 'style'),
'easy_search' : ('name',),
'extra_data' : ('real_name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : Wizard(
Step(
ExtJSWizardField(_wizfld_import_cal),
'name', 'style',
id='generic',
on_submit=_wizcb_import_cal_submit
),
title=_('Import a calendar'),
validator='ImportCalendar'
)
}
}
)
id = Column(
'calimpid',
UInt32(),
Sequence('calendars_imports_calimpid_seq'),
Comment('Calendar import ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='calendars_imports_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('User'),
'read_only' : True,
'filter_type' : 'none'
}
)
calendar_id = Column(
'calid',
UInt32(),
ForeignKey('calendars_def.calid', name='calendars_imports_fk_calid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Calendar ID'),
nullable=False,
info={
'header_string' : _('Calendar'),
'read_only' : True,
'filter_type' : 'list',
'column_flex' : 2
}
)
name = Column(
Unicode(255),
Comment('Calendar name'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Name'),
'column_flex' : 3
}
)
style = Column(
UInt32(),
Comment('Calendar style code'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Style'),
'min_value' : 0,
'max_value' : len(_calendar_styles),
'editor_xtype' : 'calendarcolor'
}
)
@property
def real_name(self):
if self.name:
return self.name
return self.calendar.name
def __str__(self):
return '%s' % self.real_name
@classmethod
def __augment_query__(cls, sess, query, params, req):
query = query.filter(CalendarImport.user_id == req.user.id)
return query
@classmethod
def __augment_create__(cls, sess, obj, values, req):
obj.user_id = req.user.id
return True
@classmethod
def __augment_update__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
return False
@classmethod
def __augment_delete__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
return False
class Event(Base):
"""
User-defined event. Stored in user calendar.
"""
__tablename__ = 'calendars_events'
__table_args__ = (
Comment('User calendar events'),
Index('calendars_events_i_calid', 'calid'),
Index('calendars_events_i_uid', 'uid'), # XXX: add gid?
Index('calendars_events_i_icaluid', 'icaluid'),
Index('calendars_events_i_dtstart', 'dtstart'),
Trigger('before', 'insert', 't_calendars_events_bi'),
Trigger('before', 'update', 't_calendars_events_bu'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'menu_name' : _('Events'),
'default_sort' : ({ 'property': 'dtstart' ,'direction': 'DESC' },),
'grid_view' : ('evid', 'user', 'calendar', 'summary', 'ctime', 'mtime', 'dtstart', 'dtend'),
'grid_hidden' : ('evid', 'ctime', 'mtime'),
'form_view' : (
'user', 'calendar', 'summary',
'dtstart', 'dtend', 'allday',
'loc', 'url', 'descr',
'ctime', 'mtime'
),
'easy_search' : ('summary',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple')
}
}
)
id = Column(
'evid',
UInt32(),
Sequence('calendars_events_evid_seq'),
Comment('Event ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
calendar_id = Column(
'calid',
UInt32(),
ForeignKey('calendars_def.calid', name='calendars_events_fk_calid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Calendar ID'),
nullable=False,
info={
'header_string' : _('Calendar'),
'read_only' : True,
'filter_type' : 'list'
}
)
user_id = Column(
'uid',
UInt32(),
ForeignKey('users.uid', name='calendars_events_fk_uid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('User ID'),
nullable=False,
info={
'header_string' : _('Creator'),
'read_only' : True,
'filter_type' : 'none'
}
)
summary = Column(
Unicode(255),
Comment('Event summary'),
nullable=False,
info={
'header_string' : _('Summary')
}
)
creation_time = Column(
'ctime',
TIMESTAMP(),
Comment('Creation timestamp'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Created')
}
)
modification_time = Column(
'mtime',
TIMESTAMP(),
Comment('Last modification timestamp'),
CurrentTimestampDefault(on_update=True),
nullable=False,
# default=zzz,
info={
'header_string' : _('Modified')
}
)
event_start = Column(
'dtstart',
TIMESTAMP(),
Comment('Event start timestamp'),
nullable=True,
default=None,
info={
'header_string' : _('Start')
}
)
event_end = Column(
'dtend',
TIMESTAMP(),
Comment('Event end timestamp'),
nullable=True,
default=None,
info={
'header_string' : _('End')
}
)
all_day = Column(
'allday',
NPBoolean(),
Comment('Is event all-day?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('All Day')
}
)
icalendar_uid = Column(
'icaluid',
Unicode(255),
Comment('iCalendar UID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('iCal UID')
}
)
location = Column(
'loc',
Unicode(255),
Comment('Event location'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Location')
}
)
url = Column(
Unicode(255),
Comment('Event-related URL'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('URL')
}
)
icalendar_data = Column(
'icaldata',
LargeBinary(),
Comment('Original iCalendar data'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('iCal Data')
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Event description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
@hybrid_property
def duration(self):
return self.event_end - self.event_start
def __str__(self):
return '%s' % str(self.summary)
@classmethod
def __augment_query__(cls, sess, query, params, req):
query = query.filter(Event.user_id == req.user.id)
return query
@classmethod
def __augment_create__(cls, sess, obj, values, req):
obj.user_id = req.user.id
cal = sess.query(Calendar).get(obj.calendar_id)
if (not cal) or (not cal.can_write(req.user)):
return False
return True
@classmethod
def __augment_update__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
cal = sess.query(Calendar).get(obj.calendar_id)
if cal and cal.can_write(req.user):
return True
return False
@classmethod
def __augment_delete__(cls, sess, obj, values, req):
if obj.user_id == req.user.id:
return True
cal = sess.query(Calendar).get(obj.calendar_id)
if cal and cal.can_write(req.user):
return True
return False
HWAddrHexIEEEFunction = SQLFunction(
'hwaddr_hex_i',
args=(SQLFunctionArgument('hwbin', BINARY(6)),),
returns=Unicode(15),
comment='Convert binary hardware address to IEEE-style string',
reads_sql=False,
writes_sql=False
)
HWAddrHexLinuxFunction = SQLFunction(
'hwaddr_hex_l',
args=(SQLFunctionArgument('hwbin', BINARY(6)),),
returns=Unicode(18),
comment='Convert binary hardware address to Linux-style string',
reads_sql=False,
writes_sql=False
)
HWAddrHexWindowsFunction = SQLFunction(
'hwaddr_hex_w',
args=(SQLFunctionArgument('hwbin', BINARY(6)),),
returns=Unicode(18),
comment='Convert binary hardware address to Windows-style string',
reads_sql=False,
writes_sql=False
)
HWAddrUnhexFunction = SQLFunction(
'hwaddr_unhex',
args=(SQLFunctionArgument('hwstr', Unicode(255)),),
returns=BINARY(6),
comment='Convert various hardware address formats to binary',
reads_sql=False,
writes_sql=False
)
|
nikitos/npui
|
netprofile_core/netprofile_core/models.py
|
Python
|
agpl-3.0
| 172,823
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
# pylint: disable=unused-argument
"""
In order to make our test suit work, we must use a MockDockerDaemon rather than communicating with a real Docker instance.
"""
#external imports
import json,os
#internal imports
import subuserlib.classes.userOwnedObject
class MockDockerDaemon(subuserlib.classes.userOwnedObject.UserOwnedObject):
images = {}
nextImageId = 1
def __load(self):
with open(self.imagesPath,"r") as imagesFile:
self.images = json.load(imagesFile)
def __save(self):
with open(self.imagesPath,"w") as imagesFile:
json.dump(self.images,imagesFile)
def __init__(self,user):
subuserlib.classes.userOwnedObject.UserOwnedObject.__init__(self,user)
self.imagesPath = "/root/subuser/test/docker/images.json"
if not os.path.exists(self.imagesPath):
self.imagesPath = "/home/travis/build/subuser-security/subuser/test/docker/images.json"
self.__load()
def getImageProperties(self,imageTagOrId):
"""
Returns a dictionary of image properties, or None if the image does not exist.
"""
if imageTagOrId in self.images:
return self.images[imageTagOrId]
else:
return None
def build(self,directoryWithDockerfile=None,useCache=True,rm=True,forceRm=True,quiet=False,quietClient=False,tag=None,dockerfile=None):
"""
Build a Docker image. If a the dockerfile argument is set to a string, use that string as the Dockerfile. Return the newly created images Id or raises an exception if the build fails.
"""
while str(self.nextImageId) in self.images:
self.nextImageId = self.nextImageId+1
newId = str(self.nextImageId)
parent = dockerfile.split("\n")[0].split(" ")[1].rstrip()
if "debian" in dockerfile:
parent = ""
self.images[newId] = {"Id":newId,"Parent":parent}
self.__save()
return newId
def removeImage(self,imageId):
del self.images[imageId]
self.__save()
def execute(self,args,cwd=None):
pass
|
Robobench/rapman-subuser
|
logic/subuserlib/classes/mockDockerDaemon.py
|
Python
|
lgpl-3.0
| 2,083
|
#!/usr/bin/env python
__author__ = 'AliHamdan'
__URL__ = 'github.com/kindredbay'
__version__ = '1.1'
from urllib2 import urlopen, URLError
import re, sys, time, os, optparse, subprocess
##########################################################################################################
class progressBar:
def __init__(self, barlength=25): #constructor with default value for barlength
self.barlength = barlength #barlength is then set to the objects variable
self.position = 0
self.longest = 0
def print_progress(self, cur, total, start):
currentper = cur / total #current percentage
elapsed = int(time.clock() - start) + 1
curbar = int(currentper * self.barlength)
bar = '\r[' + '='.join(['' for _ in range(curbar)]) # Draws Progress
bar += '>'
bar += ' '.join(['' for _ in range(int(self.barlength - curbar))]) + '] ' # Pads remaining space
bar += bytestostr(cur / elapsed) + '/s ' # Calculates Rate
bar += getHumanTime((total - cur) * (elapsed / cur)) + ' left' # Calculates Remaining time
if len(bar) > self.longest: # Keeps track of space to over write
self.longest = len(bar)
bar += ' '.join(['' for _ in range(self.longest - len(bar))])
sys.stdout.write(bar)
#sys.stdout.flush()
def print_end(self, *args): # Clears Progress Bar
sys.stdout.write('\r{0}\r'.format((' ' for _ in range(self.longest))))
#sys.stdout.flush()
def getHumanTime(sec):
if sec >= 3600: # Converts to Hours
return '{0:d} hour(s)'.format(int(sec / 3600))
elif sec >= 60: # Converts to Minutes
return '{0:d} minute(s)'.format(int(sec / 60))
else: # No Conversion
return '{0:d} second(s)'.format(int(sec))
def bytestostr(bts):
bts = float(bts)
if bts >= 1024 ** 4: # Converts to Terabytes
terabytes = bts / 1024 ** 4
size = '%.2fTb' % terabytes
elif bts >= 1024 ** 3: # Converts to Gigabytes
gigabytes = bts / 1024 ** 3
size = '%.2fGb' % gigabytes
elif bts >= 1024 ** 2: # Converts to Megabytes
megabytes = bts / 1024 ** 2
size = '%.2fMb' % megabytes
elif bts >= 1024: # Converts to Kilobytes
kilobytes = bts / 1024
size = '%.2fKb' % kilobytes
else: # No Conversion
size = '%.2fb' % bts
return size
###########################################################
#Prints logo and sleeps for a breif moment
def PrintLogo():
print '''
_/ _/ _/_/_/ _/
_/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/_/_/
_/ _/ _/ _/ _/ _/ _/_/ _/ _/ _/ _/ _/
_/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/
_/ _/_/ _/_/_/ _/_/_/ _/_/_/ _/ _/_/_/
_/
_/ v1.1
\n\n'''
time.sleep(2)
def cPrint(msType, message,delayAfter=0,delayBefore=0):
time.sleep(delayBefore)
try:
mType = {'warning':('WARNING','\033[33m'),'success':('SUCCESS','\033[36m'),'failure':('FAILED','\033[36m'), \
'prompt':('PROMPT','\033[35m'),'critical':('CRITICAL','\033[31m')}
color, header = mType[msType][1], mType[msType][0]
currentTime = time.strftime('%X')
colorReset = '\033[0m'
print color + '[{0}] [{1}] {2} {3}\n'.format(currentTime, header, message, colorReset)
time.sleep(delayAfter) #for readablity print slower to screen
#COLOUR DEBUG# ********Some colours appear different depending on OS*******
#cPrint('warning','This is a warning message')
#cPrint('prompt','This is a prompt message')
#cPrint('failure','This is a failure message')
#cPrint('success','This is a success message')
#cPrint('critical','This is a critial message')
except Exception as e:
print e
exit(1)
#returns the playlist ID
def getPlaylistID(url):
if 'list=' in url:
startSlice = url.index('=')+1 #getting index to start slice
playListID = url[startSlice:] #getting the id from slice start point
return playListID
else:
cPrint('critical','{0} is not a youtube playlist'.format(url))
exit(1);
#constructs the urls with starting url
def constructUrl(plUrls):
constructedUrls = []
for url in plUrls:
url_split = len(url) if not '&' in url else url.index('&') #index for split at &
constructedUrls.append('http://www.youtube.com/' + url[:url_split]) #adds ID to url head
return constructedUrls
#gets the playlist songs url's
def getPlaylistUrls(pageHtml, url):
playlist_ID = getPlaylistID(url)
urlPattern = re.compile(r'watch\?v=\S+?list=' + playlist_ID) #regex for playlist urls
urlMatches = list(set(re.findall(urlPattern, pageHtml))) #find urls/remove duplicates
if urlMatches:
readyUrlList = constructUrl(urlMatches) #returns constructed urls
cPrint('success','Found {0} items in playlist'.format(len(readyUrlList))) #prints amount of urls
return readyUrlList
else:
cPrint('critical','No items found in playlist')
exit(1)
#gets page HTML and returns it as string
def getPageHtml(url):
try:
yTube = urlopen(url).read() #stores page html
return str(yTube)
except URLError as e: #returns error if url is invalid
cPrint('critical','{0}'.format(e.reason))
exit(1)
#sets up directory
def CreateDir(dirc=None):
#if directory is not supplied we create default directory
dirPath = os.path.abspath('yougulps') if dirc == None else dirc+'/yougulps'
try:
if not os.path.exists(dirPath): #creates dir if it doesn not already exist
os.makedirs(dirPath)
cPrint('prompt','Creating directory...')
time.sleep(1)
cPrint('success','Directory created at {0}'.format(dirPath))
return dirPath #returns path string
else:
#asks the user if he wants to use the existing directory
cPrint('warning','{0} already exists!'.format(dirPath))
cPrint('prompt','Would you like to continue with existing directory? (y/n)')
use = raw_input()
if use.lower() == 'y':
cPrint('success','Using directory {0}'.format(dirPath))
return dirPath
else:
cPrint('critical','No directory to save to. Terminating...')
exit(0)
except OSError as e: #returns error if there is a problem with the directory
cPrint('critical','{0}'.format(e.reason))
exit(1)
def DownloadList(path, urlList):
for url in urlList:
try:
yt = YouTube(url)
except Exception as e:
cPrint('failure','Skipping item with url "{0}" because {1}'.format(url,str(e)))
continue
try: #tries to find the video in 720p
video = yt.get('mp4','720p')
except: #sorts videos by res and picks the highest available if 720p isn't
video = sorted(yt.filter('mp4'), key=lambda video: int(video.resolution[:-1]), reverse=True)[0]
cPrint('prompt','Downloading "{0}"'.format(yt.filename.encode("utf-8")))
try:
bar = progressBar()
video.download(path, on_progress=bar.print_progress, on_finish=bar.print_end)
cPrint('success','Downloaded "{0}" successfully'.format(yt.filename.encode("utf-8")))
except OSError:
cPrint('warning','"{0}" already exists in this directory! Skipping...'.format(yt.filename.encode("utf-8")))
#Handles user input
def Main():
parser = optparse.OptionParser('usage &prog '+' -U <playlist URL>') #usage
parser.add_option('-U', dest='url', type='string', help='Specify playlist URL')
parser.add_option('-D', dest='dest', type='string', help='Specify output destination')
(options, args) = parser.parse_args() #parser object with ops and args
if (options.url == None):
print parser.usage #print usage if no args passed
exit(0) #exit program
else:
PrintLogo()
#adds protocol if not found
playListUrl = 'https://' + options.url if not options.url.startswith('http') \
else options.url
#use users directory if he passes it in otherwise we use default
destination = CreateDir(options.dest) if options.dest != None else CreateDir()
playlistPageContent = getPageHtml(playListUrl)
playlistUrls = getPlaylistUrls(playlistPageContent, playListUrl)
DownloadList(destination, playlistUrls) #downloads urls in the complete list
def install():
try:
import pytube
except ImportError:
cPrint('warning','Some dependencies were not found',1)
cPrint('prompt','Attempting to install dependencies...',1)
try:
reqFile = 'requirements.txt'
command = 'pip2 install -r ' + reqFile
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
#print process.communicate()
if process.returncode == 0:
cPrint('success','Dependencies successfully installed',1,1)
return True
else:
cPrint('failure','Install failed.')
cPrint('warning','Please use sudo on your next run',1)
exit(1)
except Exception as e:
print e
exit(1)
return True
if __name__ == '__main__':
if install():
from pytube import YouTube
Main()
|
KindredBay/YouGulp
|
yougulp.py
|
Python
|
mit
| 9,483
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import (
instance_group_manager_pb2,
)
from google3.cloud.graphite.mmv2.services.google.compute import (
instance_group_manager_pb2_grpc,
)
from typing import List
class InstanceGroupManager(object):
def __init__(
self,
id: int = None,
creation_timestamp: str = None,
name: str = None,
description: str = None,
zone: str = None,
region: str = None,
distribution_policy: dict = None,
instance_template: str = None,
versions: list = None,
instance_group: str = None,
target_pools: list = None,
base_instance_name: str = None,
fingerprint: str = None,
current_actions: dict = None,
status: dict = None,
target_size: int = None,
self_link: str = None,
auto_healing_policies: list = None,
update_policy: dict = None,
named_ports: list = None,
stateful_policy: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.distribution_policy = distribution_policy
self.instance_template = instance_template
self.versions = versions
self.target_pools = target_pools
self.base_instance_name = base_instance_name
self.target_size = target_size
self.auto_healing_policies = auto_healing_policies
self.update_policy = update_policy
self.named_ports = named_ports
self.stateful_policy = stateful_policy
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = instance_group_manager_pb2_grpc.ComputeInstanceGroupManagerServiceStub(
channel.Channel()
)
request = instance_group_manager_pb2.ApplyComputeInstanceGroupManagerRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):
request.resource.distribution_policy.CopyFrom(
InstanceGroupManagerDistributionPolicy.to_proto(
self.distribution_policy
)
)
else:
request.resource.ClearField("distribution_policy")
if Primitive.to_proto(self.instance_template):
request.resource.instance_template = Primitive.to_proto(
self.instance_template
)
if InstanceGroupManagerVersionsArray.to_proto(self.versions):
request.resource.versions.extend(
InstanceGroupManagerVersionsArray.to_proto(self.versions)
)
if Primitive.to_proto(self.target_pools):
request.resource.target_pools.extend(Primitive.to_proto(self.target_pools))
if Primitive.to_proto(self.base_instance_name):
request.resource.base_instance_name = Primitive.to_proto(
self.base_instance_name
)
if Primitive.to_proto(self.target_size):
request.resource.target_size = Primitive.to_proto(self.target_size)
if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
):
request.resource.auto_healing_policies.extend(
InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
)
)
if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):
request.resource.update_policy.CopyFrom(
InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)
)
else:
request.resource.ClearField("update_policy")
if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):
request.resource.named_ports.extend(
InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)
)
if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):
request.resource.stateful_policy.CopyFrom(
InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)
)
else:
request.resource.ClearField("stateful_policy")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeInstanceGroupManager(request)
self.id = Primitive.from_proto(response.id)
self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.zone = Primitive.from_proto(response.zone)
self.region = Primitive.from_proto(response.region)
self.distribution_policy = InstanceGroupManagerDistributionPolicy.from_proto(
response.distribution_policy
)
self.instance_template = Primitive.from_proto(response.instance_template)
self.versions = InstanceGroupManagerVersionsArray.from_proto(response.versions)
self.instance_group = Primitive.from_proto(response.instance_group)
self.target_pools = Primitive.from_proto(response.target_pools)
self.base_instance_name = Primitive.from_proto(response.base_instance_name)
self.fingerprint = Primitive.from_proto(response.fingerprint)
self.current_actions = InstanceGroupManagerCurrentActions.from_proto(
response.current_actions
)
self.status = InstanceGroupManagerStatus.from_proto(response.status)
self.target_size = Primitive.from_proto(response.target_size)
self.self_link = Primitive.from_proto(response.self_link)
self.auto_healing_policies = InstanceGroupManagerAutoHealingPoliciesArray.from_proto(
response.auto_healing_policies
)
self.update_policy = InstanceGroupManagerUpdatePolicy.from_proto(
response.update_policy
)
self.named_ports = InstanceGroupManagerNamedPortsArray.from_proto(
response.named_ports
)
self.stateful_policy = InstanceGroupManagerStatefulPolicy.from_proto(
response.stateful_policy
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = instance_group_manager_pb2_grpc.ComputeInstanceGroupManagerServiceStub(
channel.Channel()
)
request = instance_group_manager_pb2.DeleteComputeInstanceGroupManagerRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):
request.resource.distribution_policy.CopyFrom(
InstanceGroupManagerDistributionPolicy.to_proto(
self.distribution_policy
)
)
else:
request.resource.ClearField("distribution_policy")
if Primitive.to_proto(self.instance_template):
request.resource.instance_template = Primitive.to_proto(
self.instance_template
)
if InstanceGroupManagerVersionsArray.to_proto(self.versions):
request.resource.versions.extend(
InstanceGroupManagerVersionsArray.to_proto(self.versions)
)
if Primitive.to_proto(self.target_pools):
request.resource.target_pools.extend(Primitive.to_proto(self.target_pools))
if Primitive.to_proto(self.base_instance_name):
request.resource.base_instance_name = Primitive.to_proto(
self.base_instance_name
)
if Primitive.to_proto(self.target_size):
request.resource.target_size = Primitive.to_proto(self.target_size)
if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
):
request.resource.auto_healing_policies.extend(
InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
)
)
if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):
request.resource.update_policy.CopyFrom(
InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)
)
else:
request.resource.ClearField("update_policy")
if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):
request.resource.named_ports.extend(
InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)
)
if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):
request.resource.stateful_policy.CopyFrom(
InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)
)
else:
request.resource.ClearField("stateful_policy")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteComputeInstanceGroupManager(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = instance_group_manager_pb2_grpc.ComputeInstanceGroupManagerServiceStub(
channel.Channel()
)
request = instance_group_manager_pb2.ListComputeInstanceGroupManagerRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListComputeInstanceGroupManager(request).items
def to_proto(self):
resource = instance_group_manager_pb2.ComputeInstanceGroupManager()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):
resource.distribution_policy.CopyFrom(
InstanceGroupManagerDistributionPolicy.to_proto(
self.distribution_policy
)
)
else:
resource.ClearField("distribution_policy")
if Primitive.to_proto(self.instance_template):
resource.instance_template = Primitive.to_proto(self.instance_template)
if InstanceGroupManagerVersionsArray.to_proto(self.versions):
resource.versions.extend(
InstanceGroupManagerVersionsArray.to_proto(self.versions)
)
if Primitive.to_proto(self.target_pools):
resource.target_pools.extend(Primitive.to_proto(self.target_pools))
if Primitive.to_proto(self.base_instance_name):
resource.base_instance_name = Primitive.to_proto(self.base_instance_name)
if Primitive.to_proto(self.target_size):
resource.target_size = Primitive.to_proto(self.target_size)
if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
):
resource.auto_healing_policies.extend(
InstanceGroupManagerAutoHealingPoliciesArray.to_proto(
self.auto_healing_policies
)
)
if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):
resource.update_policy.CopyFrom(
InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)
)
else:
resource.ClearField("update_policy")
if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):
resource.named_ports.extend(
InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)
)
if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):
resource.stateful_policy.CopyFrom(
InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)
)
else:
resource.ClearField("stateful_policy")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class InstanceGroupManagerDistributionPolicy(object):
def __init__(self, zones: list = None, target_shape: str = None):
self.zones = zones
self.target_shape = target_shape
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerDistributionPolicy()
if InstanceGroupManagerDistributionPolicyZonesArray.to_proto(resource.zones):
res.zones.extend(
InstanceGroupManagerDistributionPolicyZonesArray.to_proto(
resource.zones
)
)
if InstanceGroupManagerDistributionPolicyTargetShapeEnum.to_proto(
resource.target_shape
):
res.target_shape = InstanceGroupManagerDistributionPolicyTargetShapeEnum.to_proto(
resource.target_shape
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerDistributionPolicy(
zones=InstanceGroupManagerDistributionPolicyZonesArray.from_proto(
resource.zones
),
target_shape=InstanceGroupManagerDistributionPolicyTargetShapeEnum.from_proto(
resource.target_shape
),
)
class InstanceGroupManagerDistributionPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerDistributionPolicy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerDistributionPolicy.from_proto(i) for i in resources]
class InstanceGroupManagerDistributionPolicyZones(object):
def __init__(self, zone: str = None):
self.zone = zone
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerDistributionPolicyZones()
)
if Primitive.to_proto(resource.zone):
res.zone = Primitive.to_proto(resource.zone)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerDistributionPolicyZones(
zone=Primitive.from_proto(resource.zone),
)
class InstanceGroupManagerDistributionPolicyZonesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceGroupManagerDistributionPolicyZones.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerDistributionPolicyZones.from_proto(i) for i in resources
]
class InstanceGroupManagerVersions(object):
def __init__(
self, name: str = None, instance_template: str = None, target_size: dict = None
):
self.name = name
self.instance_template = instance_template
self.target_size = target_size
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerVersions()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.instance_template):
res.instance_template = Primitive.to_proto(resource.instance_template)
if InstanceGroupManagerFixedOrPercent.to_proto(resource.target_size):
res.target_size.CopyFrom(
InstanceGroupManagerFixedOrPercent.to_proto(resource.target_size)
)
else:
res.ClearField("target_size")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerVersions(
name=Primitive.from_proto(resource.name),
instance_template=Primitive.from_proto(resource.instance_template),
target_size=InstanceGroupManagerFixedOrPercent.from_proto(
resource.target_size
),
)
class InstanceGroupManagerVersionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerVersions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerVersions.from_proto(i) for i in resources]
class InstanceGroupManagerFixedOrPercent(object):
def __init__(self, fixed: int = None, percent: int = None, calculated: int = None):
self.fixed = fixed
self.percent = percent
self.calculated = calculated
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerFixedOrPercent()
if Primitive.to_proto(resource.fixed):
res.fixed = Primitive.to_proto(resource.fixed)
if Primitive.to_proto(resource.percent):
res.percent = Primitive.to_proto(resource.percent)
if Primitive.to_proto(resource.calculated):
res.calculated = Primitive.to_proto(resource.calculated)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerFixedOrPercent(
fixed=Primitive.from_proto(resource.fixed),
percent=Primitive.from_proto(resource.percent),
calculated=Primitive.from_proto(resource.calculated),
)
class InstanceGroupManagerFixedOrPercentArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerFixedOrPercent.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerFixedOrPercent.from_proto(i) for i in resources]
class InstanceGroupManagerCurrentActions(object):
def __init__(
self,
none: int = None,
creating: int = None,
creating_without_retries: int = None,
verifying: int = None,
recreating: int = None,
deleting: int = None,
abandoning: int = None,
restarting: int = None,
refreshing: int = None,
):
self.none = none
self.creating = creating
self.creating_without_retries = creating_without_retries
self.verifying = verifying
self.recreating = recreating
self.deleting = deleting
self.abandoning = abandoning
self.restarting = restarting
self.refreshing = refreshing
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerCurrentActions()
if Primitive.to_proto(resource.none):
res.none = Primitive.to_proto(resource.none)
if Primitive.to_proto(resource.creating):
res.creating = Primitive.to_proto(resource.creating)
if Primitive.to_proto(resource.creating_without_retries):
res.creating_without_retries = Primitive.to_proto(
resource.creating_without_retries
)
if Primitive.to_proto(resource.verifying):
res.verifying = Primitive.to_proto(resource.verifying)
if Primitive.to_proto(resource.recreating):
res.recreating = Primitive.to_proto(resource.recreating)
if Primitive.to_proto(resource.deleting):
res.deleting = Primitive.to_proto(resource.deleting)
if Primitive.to_proto(resource.abandoning):
res.abandoning = Primitive.to_proto(resource.abandoning)
if Primitive.to_proto(resource.restarting):
res.restarting = Primitive.to_proto(resource.restarting)
if Primitive.to_proto(resource.refreshing):
res.refreshing = Primitive.to_proto(resource.refreshing)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerCurrentActions(
none=Primitive.from_proto(resource.none),
creating=Primitive.from_proto(resource.creating),
creating_without_retries=Primitive.from_proto(
resource.creating_without_retries
),
verifying=Primitive.from_proto(resource.verifying),
recreating=Primitive.from_proto(resource.recreating),
deleting=Primitive.from_proto(resource.deleting),
abandoning=Primitive.from_proto(resource.abandoning),
restarting=Primitive.from_proto(resource.restarting),
refreshing=Primitive.from_proto(resource.refreshing),
)
class InstanceGroupManagerCurrentActionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerCurrentActions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerCurrentActions.from_proto(i) for i in resources]
class InstanceGroupManagerStatus(object):
def __init__(
self,
is_stable: bool = None,
version_target: dict = None,
stateful: dict = None,
autoscaler: str = None,
):
self.is_stable = is_stable
self.version_target = version_target
self.stateful = stateful
self.autoscaler = autoscaler
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerStatus()
if Primitive.to_proto(resource.is_stable):
res.is_stable = Primitive.to_proto(resource.is_stable)
if InstanceGroupManagerStatusVersionTarget.to_proto(resource.version_target):
res.version_target.CopyFrom(
InstanceGroupManagerStatusVersionTarget.to_proto(
resource.version_target
)
)
else:
res.ClearField("version_target")
if InstanceGroupManagerStatusStateful.to_proto(resource.stateful):
res.stateful.CopyFrom(
InstanceGroupManagerStatusStateful.to_proto(resource.stateful)
)
else:
res.ClearField("stateful")
if Primitive.to_proto(resource.autoscaler):
res.autoscaler = Primitive.to_proto(resource.autoscaler)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatus(
is_stable=Primitive.from_proto(resource.is_stable),
version_target=InstanceGroupManagerStatusVersionTarget.from_proto(
resource.version_target
),
stateful=InstanceGroupManagerStatusStateful.from_proto(resource.stateful),
autoscaler=Primitive.from_proto(resource.autoscaler),
)
class InstanceGroupManagerStatusArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerStatus.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerStatus.from_proto(i) for i in resources]
class InstanceGroupManagerStatusVersionTarget(object):
def __init__(self, is_reached: bool = None):
self.is_reached = is_reached
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerStatusVersionTarget()
)
if Primitive.to_proto(resource.is_reached):
res.is_reached = Primitive.to_proto(resource.is_reached)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatusVersionTarget(
is_reached=Primitive.from_proto(resource.is_reached),
)
class InstanceGroupManagerStatusVersionTargetArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerStatusVersionTarget.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerStatusVersionTarget.from_proto(i) for i in resources
]
class InstanceGroupManagerStatusStateful(object):
def __init__(
self, has_stateful_config: bool = None, per_instance_configs: dict = None
):
self.has_stateful_config = has_stateful_config
self.per_instance_configs = per_instance_configs
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerStatusStateful()
if Primitive.to_proto(resource.has_stateful_config):
res.has_stateful_config = Primitive.to_proto(resource.has_stateful_config)
if InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(
resource.per_instance_configs
):
res.per_instance_configs.CopyFrom(
InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(
resource.per_instance_configs
)
)
else:
res.ClearField("per_instance_configs")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatusStateful(
has_stateful_config=Primitive.from_proto(resource.has_stateful_config),
per_instance_configs=InstanceGroupManagerStatusStatefulPerInstanceConfigs.from_proto(
resource.per_instance_configs
),
)
class InstanceGroupManagerStatusStatefulArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerStatusStateful.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerStatusStateful.from_proto(i) for i in resources]
class InstanceGroupManagerStatusStatefulPerInstanceConfigs(object):
def __init__(self, all_effective: bool = None):
self.all_effective = all_effective
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerStatusStatefulPerInstanceConfigs()
)
if Primitive.to_proto(resource.all_effective):
res.all_effective = Primitive.to_proto(resource.all_effective)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatusStatefulPerInstanceConfigs(
all_effective=Primitive.from_proto(resource.all_effective),
)
class InstanceGroupManagerStatusStatefulPerInstanceConfigsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerStatusStatefulPerInstanceConfigs.from_proto(i)
for i in resources
]
class InstanceGroupManagerAutoHealingPolicies(object):
def __init__(self, health_check: str = None, initial_delay_sec: int = None):
self.health_check = health_check
self.initial_delay_sec = initial_delay_sec
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerAutoHealingPolicies()
)
if Primitive.to_proto(resource.health_check):
res.health_check = Primitive.to_proto(resource.health_check)
if Primitive.to_proto(resource.initial_delay_sec):
res.initial_delay_sec = Primitive.to_proto(resource.initial_delay_sec)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerAutoHealingPolicies(
health_check=Primitive.from_proto(resource.health_check),
initial_delay_sec=Primitive.from_proto(resource.initial_delay_sec),
)
class InstanceGroupManagerAutoHealingPoliciesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerAutoHealingPolicies.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerAutoHealingPolicies.from_proto(i) for i in resources
]
class InstanceGroupManagerUpdatePolicy(object):
def __init__(
self,
type: str = None,
instance_redistribution_type: str = None,
minimal_action: str = None,
max_surge: dict = None,
max_unavailable: dict = None,
replacement_method: str = None,
):
self.type = type
self.instance_redistribution_type = instance_redistribution_type
self.minimal_action = minimal_action
self.max_surge = max_surge
self.max_unavailable = max_unavailable
self.replacement_method = replacement_method
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicy()
if InstanceGroupManagerUpdatePolicyTypeEnum.to_proto(resource.type):
res.type = InstanceGroupManagerUpdatePolicyTypeEnum.to_proto(resource.type)
if InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.to_proto(
resource.instance_redistribution_type
):
res.instance_redistribution_type = InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.to_proto(
resource.instance_redistribution_type
)
if InstanceGroupManagerUpdatePolicyMinimalActionEnum.to_proto(
resource.minimal_action
):
res.minimal_action = InstanceGroupManagerUpdatePolicyMinimalActionEnum.to_proto(
resource.minimal_action
)
if InstanceGroupManagerFixedOrPercent.to_proto(resource.max_surge):
res.max_surge.CopyFrom(
InstanceGroupManagerFixedOrPercent.to_proto(resource.max_surge)
)
else:
res.ClearField("max_surge")
if InstanceGroupManagerFixedOrPercent.to_proto(resource.max_unavailable):
res.max_unavailable.CopyFrom(
InstanceGroupManagerFixedOrPercent.to_proto(resource.max_unavailable)
)
else:
res.ClearField("max_unavailable")
if InstanceGroupManagerUpdatePolicyReplacementMethodEnum.to_proto(
resource.replacement_method
):
res.replacement_method = InstanceGroupManagerUpdatePolicyReplacementMethodEnum.to_proto(
resource.replacement_method
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerUpdatePolicy(
type=InstanceGroupManagerUpdatePolicyTypeEnum.from_proto(resource.type),
instance_redistribution_type=InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.from_proto(
resource.instance_redistribution_type
),
minimal_action=InstanceGroupManagerUpdatePolicyMinimalActionEnum.from_proto(
resource.minimal_action
),
max_surge=InstanceGroupManagerFixedOrPercent.from_proto(resource.max_surge),
max_unavailable=InstanceGroupManagerFixedOrPercent.from_proto(
resource.max_unavailable
),
replacement_method=InstanceGroupManagerUpdatePolicyReplacementMethodEnum.from_proto(
resource.replacement_method
),
)
class InstanceGroupManagerUpdatePolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerUpdatePolicy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerUpdatePolicy.from_proto(i) for i in resources]
class InstanceGroupManagerNamedPorts(object):
def __init__(self, name: str = None, port: int = None):
self.name = name
self.port = port
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerNamedPorts()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.port):
res.port = Primitive.to_proto(resource.port)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerNamedPorts(
name=Primitive.from_proto(resource.name),
port=Primitive.from_proto(resource.port),
)
class InstanceGroupManagerNamedPortsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerNamedPorts.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerNamedPorts.from_proto(i) for i in resources]
class InstanceGroupManagerStatefulPolicy(object):
def __init__(self, preserved_state: dict = None):
self.preserved_state = preserved_state
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_group_manager_pb2.ComputeInstanceGroupManagerStatefulPolicy()
if InstanceGroupManagerStatefulPolicyPreservedState.to_proto(
resource.preserved_state
):
res.preserved_state.CopyFrom(
InstanceGroupManagerStatefulPolicyPreservedState.to_proto(
resource.preserved_state
)
)
else:
res.ClearField("preserved_state")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatefulPolicy(
preserved_state=InstanceGroupManagerStatefulPolicyPreservedState.from_proto(
resource.preserved_state
),
)
class InstanceGroupManagerStatefulPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceGroupManagerStatefulPolicy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceGroupManagerStatefulPolicy.from_proto(i) for i in resources]
class InstanceGroupManagerStatefulPolicyPreservedState(object):
def __init__(self, disks: dict = None):
self.disks = disks
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerStatefulPolicyPreservedState()
)
if Primitive.to_proto(resource.disks):
res.disks = Primitive.to_proto(resource.disks)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatefulPolicyPreservedState(
disks=Primitive.from_proto(resource.disks),
)
class InstanceGroupManagerStatefulPolicyPreservedStateArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceGroupManagerStatefulPolicyPreservedState.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerStatefulPolicyPreservedState.from_proto(i)
for i in resources
]
class InstanceGroupManagerStatefulPolicyPreservedStateDisks(object):
def __init__(self, auto_delete: str = None):
self.auto_delete = auto_delete
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_group_manager_pb2.ComputeInstanceGroupManagerStatefulPolicyPreservedStateDisks()
)
if InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.to_proto(
resource.auto_delete
):
res.auto_delete = InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.to_proto(
resource.auto_delete
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceGroupManagerStatefulPolicyPreservedStateDisks(
auto_delete=InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.from_proto(
resource.auto_delete
),
)
class InstanceGroupManagerStatefulPolicyPreservedStateDisksArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceGroupManagerStatefulPolicyPreservedStateDisks.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceGroupManagerStatefulPolicyPreservedStateDisks.from_proto(i)
for i in resources
]
class InstanceGroupManagerDistributionPolicyTargetShapeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerDistributionPolicyTargetShapeEnum.Value(
"ComputeInstanceGroupManagerDistributionPolicyTargetShapeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerDistributionPolicyTargetShapeEnum.Name(
resource
)[
len("ComputeInstanceGroupManagerDistributionPolicyTargetShapeEnum") :
]
class InstanceGroupManagerUpdatePolicyTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyTypeEnum.Value(
"ComputeInstanceGroupManagerUpdatePolicyTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyTypeEnum.Name(
resource
)[
len("ComputeInstanceGroupManagerUpdatePolicyTypeEnum") :
]
class InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.Value(
"ComputeInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.Name(
resource
)[
len(
"ComputeInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum"
) :
]
class InstanceGroupManagerUpdatePolicyMinimalActionEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyMinimalActionEnum.Value(
"ComputeInstanceGroupManagerUpdatePolicyMinimalActionEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyMinimalActionEnum.Name(
resource
)[
len("ComputeInstanceGroupManagerUpdatePolicyMinimalActionEnum") :
]
class InstanceGroupManagerUpdatePolicyReplacementMethodEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyReplacementMethodEnum.Value(
"ComputeInstanceGroupManagerUpdatePolicyReplacementMethodEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerUpdatePolicyReplacementMethodEnum.Name(
resource
)[
len("ComputeInstanceGroupManagerUpdatePolicyReplacementMethodEnum") :
]
class InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.Value(
"ComputeInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_group_manager_pb2.ComputeInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.Name(
resource
)[
len(
"ComputeInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum"
) :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/compute/instance_group_manager.py
|
Python
|
apache-2.0
| 45,199
|
""" Contains unit tests of NetworkAgent module
"""
import DIRAC.AccountingSystem.Agent.NetworkAgent as module
import unittest
from mock.mock import MagicMock
__RCSID__ = "$Id$"
MQURI1 = 'mq.dirac.net::Topic::perfsonar.summary.packet-loss-rate'
MQURI2 = 'mq.dirac.net::Queue::perfsonar.summary.histogram-owdelay'
ROOT_PATH = '/Resources/Sites'
SITE1 = 'LCG.Dirac.net'
SITE2 = 'LCG.DiracToRemove.net'
SITE3 = 'VAC.DiracToAdd.org'
SITE1_HOST1 = 'perfsonar.diracold.net'
SITE1_HOST2 = 'perfsonar-to-disable.diracold.net'
SITE2_HOST1 = 'perfsonar.diractoremove.net'
SITE3_HOST1 = 'perfsonar.diractoadd.org'
INITIAL_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE2, SITE2_HOST1 ): 'True'
}
UPDATED_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'False',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE3, SITE3_HOST1 ): 'True'
}
class NetworkAgentSuccessTestCase( unittest.TestCase ):
""" Test class to check success scenarios.
"""
def setUp( self ):
# external dependencies
module.datetime = MagicMock()
# internal dependencies
module.S_ERROR = MagicMock()
module.S_OK = MagicMock()
module.gLogger = MagicMock()
module.AgentModule = MagicMock()
module.Network = MagicMock()
module.gConfig = MagicMock()
module.CSAPI = MagicMock()
module.createConsumer = MagicMock()
# prepare test object
module.NetworkAgent.__init__ = MagicMock( return_value = None )
module.NetworkAgent.am_getOption = MagicMock( return_value = 100 ) # buffer timeout
self.agent = module.NetworkAgent()
self.agent.initialize()
def test_updateNameDictionary( self ):
module.gConfig.getConfigurationTree.side_effect = [
{'OK': True, 'Value': INITIAL_CONFIG },
{'OK': True, 'Value': UPDATED_CONFIG },
]
# check if name dictionary is empty
self.assertFalse( self.agent.nameDictionary )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE1_HOST2], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE2_HOST1], SITE2 )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE3_HOST1], SITE3 )
# check if hosts were removed form dictionary
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE1_HOST2] )
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE2_HOST1] )
def test_agentExecute( self ):
module.NetworkAgent.am_getOption.return_value = '%s, %s' % ( MQURI1, MQURI2 )
module.gConfig.getConfigurationTree.return_value = {'OK': True, 'Value': INITIAL_CONFIG }
# first run
result = self.agent.execute()
self.assertTrue( result['OK'] )
# second run (simulate new messages)
self.agent.messagesCount += 10
result = self.agent.execute()
self.assertTrue( result['OK'] )
# third run (no new messages - restart consumers)
result = self.agent.execute()
self.assertTrue( result['OK'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( NetworkAgentSuccessTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
Andrew-McNab-UK/DIRAC
|
AccountingSystem/Agent/test/Test_NetworkAgent.py
|
Python
|
gpl-3.0
| 3,707
|
import sys
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
minDiff = sys.maxsize
prev = None
def getMinimumDifference(self, root: TreeNode) -> int:
self.getMinimumDifferenceHelper(root)
return self.minDiff
def getMinimumDifferenceHelper(self, root):
if not root:
return
self.getMinimumDifferenceHelper(root.left)
if self.prev:
self.minDiff = min(self.minDiff, abs(root.val - self.prev.val))
self.prev = root
self.getMinimumDifferenceHelper(root.right)
root = TreeNode(10)
root.left = TreeNode(3)
root.left.left = TreeNode(2)
root.left.right = TreeNode(8)
root.left.right.left = TreeNode(7)
root.left.right.right = TreeNode(9)
root.right = TreeNode(15)
root.right.left = TreeNode(13)
root.right.right = TreeNode(17)
root.right.right.right = TreeNode(19)
ob = Solution()
print(ob.getMinimumDifference(root))
|
shobhitmishra/CodingProblems
|
LeetCode/Session3/minAbsDifference.py
|
Python
|
mit
| 1,010
|
import maybe
class Valid:
a = None
def __init__(self, a):
self.a = a
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.a == other.a)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'Valid(%s)' % self.a
def is_valid(self):
return True
def map(self, f):
return Valid(f(self.a))
def flat_map(self, f):
return f(self.a)
def ap(self, v):
if v.is_valid():
return Valid(self.a(v.a))
else:
return v
def to_maybe(self):
return maybe.Just(self.a)
class Invalid:
es = None
def __init__(self, es):
self.es = es
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.es == other.es)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'Invalid(%s)' % self.es
def is_valid(self):
return False
def map(self, f):
return self
def flat_map(self, f):
return self
def ap(self, v):
if v.is_valid():
return self
else:
return Invalid(self.es + v.es)
def to_maybe(self):
return maybe.Nothing()
def get_required_env(name):
from os import getenv
value = getenv(name)
if value is None:
return Invalid(['env var ' + name + ' required'])
else:
return Valid(value)
def lift_aN(arity, f):
def curry(arity, f, acc = []):
if arity == 1:
def g(x):
args = []
args.extend(acc)
args.append(x)
return f(*args)
return g
else:
def g(x):
args = []
args.extend(acc)
args.append(x)
return curry(arity - 1, f, args)
return g
if arity >= 1:
return Valid(curry(arity, f))
else:
return Invalid(["n must be positive in lift_aN(n, f)"])
def lift_a(f):
return lift_aN(1, f)
def lift_a2(f):
return lift_aN(2, f)
def lift_a3(f):
return lift_aN(3, f)
|
udacity/pygow
|
pygow/validation.py
|
Python
|
bsd-3-clause
| 2,190
|
#!/usr/bin/python3
# -*- coding: utf-8
"""Unit tests for code in modeline.py."""
from modeline import MODELINE_SUPPORTED_EDITORS, get_modeline, get_modelines
import unittest
class TestModelineGeneration(unittest.TestCase):
"""Unit test for get_modeline()"""
test_settings = {
'indentation_level': 4,
'tab_width': 8,
'expand_tab': True,
'invalid_option': 10
}
def test_modelinegeneration(self):
"""Just checking that we do not throw at the moment."""
for editor in MODELINE_SUPPORTED_EDITORS | {'invalid-editor'}:
get_modeline(editor, self.test_settings)
def test_multiplemodelinegeneration(self):
"""Just checking that we do not throw at the moment."""
get_modelines(
MODELINE_SUPPORTED_EDITORS | {'invalid-editor'},
self.test_settings)
# vim: filetype=python tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
SylvainDe/letscode
|
modeline_tests.py
|
Python
|
mit
| 933
|
#!/usr/bin/env python
import hal
class HandlerClass:
def on_led_change(self,hal_led,data=None):
'''
the gladevcp.change led had a transition
'''
if hal_led.hal_pin.get():
if self.halcomp["number"] > 0.0:
self.change_text.set_label("Insert too number %d" % (int(self.halcomp["number"])))
else:
self.change_text.set_label("Remove tool")
else:
self.change_text.set_label("")
def __init__(self, halcomp,builder,useropts):
self.halcomp = halcomp
self.change_text = builder.get_object("change-text")
self.halcomp.newpin("number", hal.HAL_FLOAT, hal.HAL_IN)
def get_handlers(halcomp,builder,useropts):
return [HandlerClass(halcomp,builder,useropts)]
|
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver
|
configs/sim/remap/manual-toolchange-with-tool-length-switch/python/gladevcp-handler.py
|
Python
|
gpl-2.0
| 801
|
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os, pdb
from sklearn.preprocessing import StandardScaler
import scipy.misc as misc
import time
import pickle, glob
from moviepy.editor import VideoFileClip
from feature import extract_features, single_img_features, get_hog_features, color_convert_from_RGB, sample_hog_vis
from utils import prepare_data, draw_boxes
from classification import get_classifer
from fast_multiscale_search import multiscale_search
from false_pos_filter import false_pos_filter
########################################################################
############################# Training #################################
################# Prepare data
print('################# Prepare data ###################')
data_dir = '../data'
cars_train, cars_test, notcars_train, notcars_test = prepare_data(data_dir, train_ratio=0.9)
################ Extract feature
print('################ Extract feature #################')
## Parameters
param = {
'color_space' : 'YCrCb', # Can be RGB, HSV, (LUV, HLS, YUV, YCrCb leads to Nan for PNG image)
'orient' : 11, # HOG orientations
'pix_per_cell' : 8, # HOG pixels per cell
'cell_per_block' : 2, # HOG cells per block
'hog_channel' : "ALL", # Can be 0, 1, 2, or "ALL"
'spatial_size' : (16, 16), # Spatial binning dimensions
'hist_bins' : 32, # Number of histogram bins
'spatial_feat' : False, # Spatial features on or off
'hist_feat' : False, # Histogram features on or off
'hog_feat' : True, # HOG features on or off
}
## Hog visualization
sample_hog_vis(cars_train[0], notcars_train[0], param)
## Extract features
cars_train_fea = extract_features(cars_train, param, data_aug=True)
cars_test_fea = extract_features(cars_test, param, data_aug=True)
notcars_train_fea = extract_features(notcars_train, param, data_aug=True)
notcars_test_fea = extract_features(notcars_test, param, data_aug=True)
#
x_train = np.vstack((cars_train_fea, notcars_train_fea)).astype(np.float64)
x_test = np.vstack((cars_test_fea, notcars_test_fea)).astype(np.float64)
# Fit a per-column scaler
x_scaler = StandardScaler().fit(x_train)
scaled_x_train = x_scaler.transform(x_train)
scaled_x_test = x_scaler.transform(x_test)
# Define the labels vector
y_train = np.hstack((np.ones(len(cars_train_fea)), np.zeros(len(notcars_train_fea))))
y_test = np.hstack((np.ones(len(cars_test_fea)), np.zeros(len(notcars_test_fea))))
################# Classification
print('################ Classification #################')
svc = get_classifer(scaled_x_train, scaled_x_test, y_train, y_test)
################# Save
model_param_pickle = {'svc':svc, 'x_scaler':x_scaler, 'param':param}
pickle.dump( model_param_pickle, open( "model_param_pickle.p", "wb" ))
########################################################################
############################# Testing ##################################
model_param_pickle = pickle.load( open( "model_param_pickle.p", "rb" ) )
svc = model_param_pickle["svc"]
x_scaler = model_param_pickle["x_scaler"]
param = model_param_pickle["param"]
## For writeup
img_path = sorted(glob.glob('../test_images/*.jpg'))[0]
img_RGB = mpimg.imread(img_path)
# if img_path.endswith('png'):
# img_RGB = img_RGB.astype(np.float32)*255
# img_RGB = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
bbox_list = multiscale_search(img_RGB, svc, x_scaler, param)
false_pos_filter(img_RGB, bbox_list, save=True)
# pdb.set_trace()
heat_list = []
def pipeline_video(img):
global heat_list
bbox_list = multiscale_search(img, svc, x_scaler, param)
after_img, _, heat_list =false_pos_filter(img, bbox_list, threshold=1.5, heat_list=heat_list, smooth=6, save=False)
return after_img
# white_output = 'result.mp4'
# clip1 = VideoFileClip('../test_video.mp4')
# white_clip = clip1.fl_image(pipeline_video) #NOTE: this function expects color images!!
# white_clip.write_videofile(white_output, audio=False)
white_output = 'project_video_result.mp4'
clip1 = VideoFileClip('../project_video.mp4')
white_clip = clip1.fl_image(pipeline_video) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
|
charliememory/AutonomousDriving
|
CarND-Vehicle-Detection/src/main.py
|
Python
|
gpl-3.0
| 4,240
|
#
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2018 Olivier Aubert <contact@olivieraubert.net>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# AdA RDF Exporter, based on WebAnnotation exporter
name="AdA RDF exporter"
import logging
logger = logging.getLogger(__name__)
from gettext import gettext as _
from collections import namedtuple
from urllib.parse import quote
from gi.repository import Gtk
import advene.core.config as config
import advene.util.helper as helper
from advene.plugins.webannotation_export import WebAnnotationExporter
from advene.gui.views.table import COLUMN_TYPE
from advene.gui.views.checker import FeatureChecker, register_checker, AnnotationTable
def register(controller=None):
controller.register_exporter(AdARDFExporter)
# We also register a checker component that checks the keyword
# syntax, in GUI mode
# FIXME: we depend on gui.views.checker which is loaded as a
# plugin, so it may not be available at plugin load time
return True
# Evolving/ContrastingAnnotationType markers
TO_KW = '[TO]'
VS_KW = '[VS]'
def keywords_to_struct(keywords, on_error=None):
"""Generator that outputs typed values from keyword lists.
on_error allows to get error messages as callbacks
type is either predefined, contrasting or evolving.
"""
def report_error(msg):
logger.error(msg)
if on_error is not None:
on_error(msg)
if not keywords:
return
TypedValues = namedtuple('TypedValue', ['type', 'values'])
prev = None
need_value = False
while keywords:
current = keywords.pop(0)
if current in (TO_KW, VS_KW):
if need_value:
report_error("Syntax error: expecting a value, not %s keyword." % current)
prev = None
need_value = False
if prev is None:
report_error("Syntax error: %s keyword should have a value before." % current)
prev = None
need_value = False
elif not keywords:
report_error("Syntax error: %s keyword should have a value after." % current)
prev = None
need_value = False
else:
need_value = True
if current == TO_KW:
if prev.type == "predefined":
# We may have accumulated predefined
# values. Keep the last one, but yield the
# other.
if len(prev.values) > 1:
yield TypedValues(type="predefined", values=prev.values[:-1])
prev = TypedValues(type="evolving", values=prev.values[-1:])
elif prev.type != "evolving":
report_error("Syntax error: mixed contrasting/evolving values in %s" % current)
prev = None
need_value = False
elif current == VS_KW:
if prev.type == "predefined":
# We may have accumulated predefined
# values. Keep the last one, but yield the
# other.
if len(prev.values) > 1:
yield TypedValues(type="predefined", values=prev.values[:-1])
prev = TypedValues(type="contrasting", values=prev.values[-1:])
elif prev.type != "contrasting":
report_error("Syntax error: mixed contrasting/evolving values in %s" % current)
prev = None
need_value = False
else:
report_error("This should never happen.")
else:
if prev:
if need_value or prev.type == "predefined":
prev = TypedValues(type=prev.type, values=prev.values + [current])
else:
# Change of sequence type.
yield prev
prev = TypedValues(type="predefined", values=[ current ])
else:
prev = TypedValues(type="predefined", values=[ current ])
need_value = False
yield prev
class AdARDFExporter(WebAnnotationExporter):
name = _("AdA RDF exporter")
extension = 'ada.jsonld'
def __init__(self, controller=None, source=None, callback=None):
super().__init__(controller=controller, source=source, callback=callback)
self.not_part_of_ontology = set()
def annotation_uri(self, a, media_uri):
return "%s/%s" % (media_uri, a.id)
def annotation_jsonld(self, a, media_uri):
# First check if it is part of the ontology schema
type_uri = a.type.getMetaData(config.data.namespace, "ontology_uri")
if not type_uri:
# Report only once by type
if a.type not in self.not_part_of_ontology:
logger.warning(_("Cannot determine ontology URI for type %s"), self.controller.get_title(a.type))
self.not_part_of_ontology.add(a.type)
# Just ignore this annotation
return None
# Get standard WebAnnotation jsonld serialization
data = super().annotation_jsonld(a, media_uri)
# Enrich with AdA-specific properties
value_type_mapping = {
"evolving": "ao:EvolvingValuesAnnotationType",
"contrasting": "ao:ContrastingValuesAnnotationType",
"predefined": "ao:PredefinedValuesAnnotationType"
}
# Build body according to content type
def new_body(btype=None):
"""Create a new body node
"""
body = {
"ao:annotationType": type_uri
}
if btype is not None:
body['@type'] = btype
return body
if a.type.mimetype == 'text/x-advene-keyword-list':
if (a.content.mimetype != a.type.mimetype
and a.content.mimetype == 'text/plain'):
a.content.mimetype = a.type.mimetype
keywords = a.content.parsed()
def get_keyword_uri(kw):
uri = keywords.get(kw, 'ontology_uri')
if uri is None:
# Generate a dummy URI
return f"http://www.advene.org/ns/_local/keyword/{quote(kw.replace(' ', '_'))}"
return uri
keyword_struct = list(keywords_to_struct(list(keywords)))
bodies = []
for typedvalues in keyword_struct:
if typedvalues is not None:
body = new_body(value_type_mapping[typedvalues.type])
if typedvalues.type == "predefined":
if len(typedvalues.values) == 1:
# Single value. Let's try to get its numeric value
kw = typedvalues.values[0]
body['ao:annotationValue'] = get_keyword_uri(kw)
num_value = keywords.get(kw, 'numeric_value')
if num_value is not None:
body['ao:annotationNumericValue'] = num_value
else:
# Multiple values
body['ao:annotationValue'] = [ get_keyword_uri(kw) for kw in typedvalues.values ]
else:
# Generate a sequence for contrasting/evolving values.
body['ao:annotationValueSequence'] = { "@list": [
get_keyword_uri(kw) for kw in typedvalues.values
] }
body['ao:annotationNumericValueSequence'] = { "@list": [
keywords.get(kw, 'numeric_value', -1) for kw in typedvalues.values
] }
bodies.append(body)
error = None
if not bodies:
# Could not parse correctly.
error = "Could not parse keywords %s for annotation %s" % (keywords, a.uri)
logger.warning(error)
bodies = [ ]
# Attach comment to the last body
if keywords.get_comment() and bodies:
bodies[-1]['rdfs:comment'] = keywords.get_comment()
# Add textual body
body = new_body(btype="oa:TextualBody")
body['value'] = self.controller.get_title(a)
if error:
body['advene:ERROR'] = error
bodies.append(body)
else:
body = new_body(btype="oa:TextualBody")
# Default: use raw content data
body['value'] = a.content.data
# Special cases
if a.type.id == 'ShotDuration':
# Hardcoded case: duration in ms
body['value'] = a.fragment.duration
else:
# If a representation is specified, then use it.
rep = a.type.getMetaData(config.data.namespace, "representation")
if rep:
body['value'] = self.controller.get_title(a)
bodies = [ body ]
if len(bodies) == 1:
data['body'] = bodies[0]
else:
data['body'] = bodies
return data
def export(self, filename=None):
# Works in source is a package or a type
package = self.source.ownerPackage
data = super().export(None)
# Get the namespace from the package metdata
ontology = package.getMetaData(config.data.namespace, "ontology_uri")
if not ontology:
return _("Cannot find the ontology URI. It should be defined as package metadata.")
data['@context'].append({
"ao": ontology,
"ao:annotationType": { "@type": "@id" },
"ao:annotationValue": { "@type": "@id" },
"ao:annotationValueSequence": { "@type": "@id" }
})
return self.output(data, filename)
@register_checker
class AdAChecker(FeatureChecker):
name = "AdA syntax"
description = _("For every annotation type that has predefined keywords, this table displays the annotations that contain unspecified keywords or invalid syntax. Update is not real-time, you need to manually update the view with the button below.")
def build_widget(self):
self.table = AnnotationTable(controller=self.controller, custom_data=lambda a: (str, ))
column = self.table.columns['custom0']
column.props.title = _("Error")
self.widget = Gtk.VBox()
b = Gtk.Button("Update")
b.connect('clicked', lambda i: self.update_view())
self.widget.pack_start(b, False, False, 0)
self.widget.pack_start(self.table.widget, True, True, 0)
return self.widget
def update_model(self, package=None):
# Do not update information live, it is too costly.
pass
def update_view(self):
# Dict of errors indexed by annotation
errors = {}
def custom_data(a):
if a is None:
return (str, )
else:
return ("\n".join(errors.get(a, [])), )
for at in self.controller.package.annotationTypes:
completions = set(helper.get_type_predefined_completions(at))
if completions:
# There are completions. Check for every annotation if
# they use a keyword not predefined.
for a in at.annotations:
def add_error(msg):
errors.setdefault(a, []).append(msg)
keywords = a.content.parsed()
if len(keywords) == 0 and keywords.get_comment() == "" and len(a.content.data) > 0:
# There seems to be a content, but we could find no keyword and no comment.
add_error("Unparsable content")
continue
# Parse keywords to detect syntax errors
for s in keywords_to_struct(list(keywords), add_error):
pass
self.table.set_elements(list(errors.keys()), custom_data)
self.table.model.set_sort_column_id(COLUMN_TYPE, Gtk.SortType.ASCENDING)
if __name__ == "__main__":
# Let's do some tests. This will be moved to unit tests later on.
samples = {
"a1": 1,
"a1,a2": 1,
"a1,[TO],a2": 1,
"a1,[VS],a2": 1,
"a1,[TO],a2,[TO],a3": 1,
"a1,[TO],a2,[TO],a3,a4": 2,
"a1,[TO],a2,[VS],a3": 1, # Expecting syntax error
"a1,a2,[TO],a3,a4,[VS],a5": 3
}
for s in samples.keys():
print(s, "\n", list(keywords_to_struct(s.split(","))), "\n\n")
|
oaubert/advene
|
lib/advene/plugins/ada_rdf_export.py
|
Python
|
gpl-2.0
| 13,473
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('cfp', '0004_paperapplication_duration'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='user',
field=models.OneToOneField(related_name='applicant', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterField(
model_name='paperapplication',
name='applicant',
field=models.ForeignKey(related_name='applications', to='cfp.Applicant', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
WebCampZg/conference-web
|
cfp/migrations/0005_auto_20150319_0019.py
|
Python
|
bsd-3-clause
| 767
|
import abc
import json
from collections import defaultdict
from dataclasses import dataclass
from typing import (
Any,
Callable,
DefaultDict,
Generic,
Iterable,
List,
Optional,
overload,
Set,
Type,
TypeVar,
)
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from backend.common.cache_clearing.get_affected_queries import TCacheKeyAndQuery
from backend.common.helpers.listify import delistify, listify
from backend.common.models.cached_model import CachedModel, TAffectedReferences
from backend.common.queries.database_query import CachedDatabaseQuery
TModel = TypeVar("TModel", bound=CachedModel)
@dataclass(frozen=True)
class TUpdatedModel(Generic[TModel]):
model: TModel
updated_attrs: Set[str]
is_new: bool
class ManipulatorBase(abc.ABC, Generic[TModel]):
_post_delete_hooks: List[Callable[[List[TModel]], None]] = None # pyre-ignore[8]
_post_update_hooks: List[ # pyre-ignore[8]
Callable[[List[TUpdatedModel[TModel]]], None]
] = None
def __init_subclass__(cls, *args, **kwargs):
"""
This is a bit of python magic - we can't just initialize the variables to [] in their
definitions above, because they simply get evaluated once at module import time. This
has the effect that all manipulators end up -sharing- their callbacks! Not what we want!
Instead, since we use python >= 3.6, we can use this __init_subclass__ hook, which
makes specifying this sort of thing easier without needing to implement a full metaclass
See: https://docs.python.org/3/reference/datamodel.html#object.__init_subclass__
"""
super().__init_subclass__(*args, **kwargs)
cls._post_delete_hooks = []
cls._post_update_hooks = []
@classmethod
def register_post_delete_hook(
cls, func: Callable[[List[TModel]], None]
) -> Callable[[List[TModel]], None]:
cls._post_delete_hooks.append(func)
return func
@classmethod
def register_post_update_hook(
cls, func: Callable[[List[TUpdatedModel[TModel]]], None]
) -> Callable[[List[TUpdatedModel[TModel]]], None]:
cls._post_update_hooks.append(func)
return func
@classmethod
@abc.abstractmethod
def updateMerge(
cls, new_model: TModel, old_model: TModel, auto_union: bool
) -> TModel:
"""
Child classes should implement this method with specific merging logic
"""
...
"""
createOrUpdate is the main interface to a manipulator - given a singular/list of models from a caller
it will either create it in the ndb or do read-modify-write on the existing version
"""
@overload
@classmethod
def createOrUpdate(
cls,
new_models: TModel,
auto_union: bool = True,
run_post_update_hook: bool = True,
) -> TModel:
...
@overload
@classmethod
def createOrUpdate(
cls,
new_models: List[TModel],
auto_union: bool = True,
run_post_update_hook: bool = True,
) -> List[TModel]:
...
@classmethod
def createOrUpdate(
cls, new_models, auto_union=True, run_post_update_hook=True
) -> Any:
existing_or_new = listify(cls.findOrSpawn(new_models, auto_union))
models_to_put = [model for model in existing_or_new if model._dirty]
ndb.put_multi(models_to_put)
cls._clearCache(existing_or_new)
if run_post_update_hook:
cls._run_post_update_hook(models_to_put)
for model in existing_or_new:
model._dirty = False
return delistify(existing_or_new)
"""
delete_keys / delete are the main interfaces to delete models + clear associated cache
"""
@classmethod
def delete_keys(cls, model_keys: Iterable[ndb.Key]) -> None:
models = [model_key.get() for model_key in model_keys]
cls.delete(models)
@overload
@classmethod
def delete(self, models: TModel, run_post_delete_hook=True) -> None:
...
@overload
@classmethod
def delete(self, models: List[TModel], run_post_delete_hook=True) -> None:
...
@classmethod
def delete(self, models, run_post_delete_hook=True) -> None:
models = list(filter(None, listify(models)))
keys = [model.key for model in models]
ndb.delete_multi(keys)
for model in models:
model._dirty = True
self._computeAndSaveAffectedReferences(model)
if run_post_delete_hook:
self._run_post_delete_hook(models)
self._clearCache(models)
"""
findOrSpawn will take either a singular model or a list of models and merge them
with the (optionally present) existing versions
"""
@overload
@classmethod
def findOrSpawn(cls, new_models: TModel, auto_union: bool = True) -> TModel:
...
@overload
@classmethod
def findOrSpawn(
cls, new_models: List[TModel], auto_union: bool = True
) -> List[TModel]:
...
@classmethod
def findOrSpawn(cls, new_models, auto_union=True) -> Any:
new_models = listify(new_models)
old_models = ndb.get_multi(
[model.key for model in new_models], use_cache=False, use_memcache=False
)
updated_models = [
cls.updateMergeBase(new_model, old_model, auto_union)
for (new_model, old_model) in zip(new_models, old_models)
]
return delistify(updated_models)
@classmethod
def updateMergeBase(
cls, new_model: TModel, old_model: Optional[TModel], auto_union
) -> TModel:
"""
Given an "old" and a "new" model object, replace the fields in the
"old" one that are present in the "new" one, but keep fields from
the "old" one that are null or the empty list in the "new" one.
"""
if old_model is None:
new_model._dirty = True
new_model._is_new = True
cls._computeAndSaveAffectedReferences(new_model)
return new_model
cls._computeAndSaveAffectedReferences(old_model, new_model)
return cls.updateMerge(new_model, old_model, auto_union)
@classmethod
def mergeModels(
self,
new_models: List[TModel],
old_models: List[TModel],
auto_union: bool = True,
) -> List[TModel]:
"""
Returns a list of models containing the union of new_models and old_models.
If a model with the same key is in both input lists, the new_model is merged with the old_model.
"""
old_models_by_key = {}
untouched_old_keys = set()
for model in old_models:
model_key = model.key.id()
old_models_by_key[model_key] = model
untouched_old_keys.add(model_key)
merged_models: List[TModel] = []
for model in new_models:
model_key = model.key.id()
if model_key in old_models_by_key:
merged_models.append(
self.updateMergeBase(
model, old_models_by_key[model_key], auto_union=auto_union
)
)
untouched_old_keys.remove(model_key)
else:
merged_models.append(model)
for untouched_key in untouched_old_keys:
merged_models.append(old_models_by_key[untouched_key])
return merged_models
@classmethod
def _computeAndSaveAffectedReferences(
cls, old_model: TModel, new_model: Optional[TModel] = None
) -> None:
"""
This method is called whenever a model may potentially be created or updated.
Stores the affected references in the original instance of the model.
"""
for attr in old_model._affected_references.keys():
for a in [old_model, new_model] if new_model is not None else [old_model]:
val = listify(getattr(a, attr))
old_model._affected_references[attr] = old_model._affected_references[
attr
].union(val)
@classmethod
def _run_post_delete_hook(cls, models: List[TModel]) -> None:
"""
Asynchronously runs the manipulator's post delete hooks if available.
"""
if not models:
return
for hook in cls._post_delete_hooks:
deferred.defer(
hook,
models,
_queue="post-update-hooks",
_target="py3-tasks-io",
_url=f"/_ah/queue/deferred_{cls.__name__}_runPostDeleteHook",
)
@classmethod
def _run_post_update_hook(cls, models: List[TModel]) -> None:
"""
Asynchronously runs the manipulator's post update hooks if available.
"""
if not models:
return
updated_models = [
TUpdatedModel(
model=model,
updated_attrs=model._updated_attrs or set(),
is_new=model._is_new,
)
for model in models
]
for hook in cls._post_update_hooks:
deferred.defer(
hook,
updated_models,
_queue="post-update-hooks",
_target="py3-tasks-io",
_url=f"/_ah/queue/deferred_{cls.__name__}_runPostUpdateHook",
)
"""
Helpers for subclasses
"""
@staticmethod
def _update_attrs(new_model: TModel, old_model: TModel, auto_union: bool) -> None:
"""
Given an "old" and a "new" model, replace the fields in the
"old" that are present in the "new", but keep fields from
the "old" that are null in the "new".
"""
updated_attrs: Set[str] = set()
for attr in old_model._mutable_attrs:
if (
getattr(new_model, attr, None) is not None
or attr in old_model._allow_none_attrs
):
if getattr(new_model, attr) != getattr(old_model, attr):
setattr(old_model, attr, getattr(new_model, attr))
updated_attrs.add(attr)
old_model._dirty = True
if getattr(new_model, attr, None) == "None":
if getattr(old_model, attr, None) is not None:
setattr(old_model, attr, None)
updated_attrs.add(attr)
old_model._dirty = True
for attr in old_model._json_attrs:
if getattr(new_model, attr) is not None:
if (getattr(old_model, attr) is None) or (
json.loads(getattr(new_model, attr))
!= json.loads(getattr(old_model, attr))
):
setattr(old_model, attr, getattr(new_model, attr))
# changinging 'attr_json' doesn't clear lazy-loaded '_attr'
setattr(old_model, "_{}".format(attr.replace("_json", "")), None)
updated_attrs.add(attr)
old_model._dirty = True
list_attrs = old_model._list_attrs
if not auto_union:
list_attrs = list_attrs.union(old_model._auto_union_attrs)
for attr in list_attrs:
if len(getattr(new_model, attr)) > 0 or not auto_union:
if getattr(new_model, attr) != getattr(old_model, attr):
setattr(old_model, attr, getattr(new_model, attr))
updated_attrs.add(attr)
old_model._dirty = True
for attr in old_model._auto_union_attrs if auto_union else {}:
old_set = set(getattr(old_model, attr))
new_set = set(getattr(new_model, attr))
unioned = old_set.union(new_set)
if unioned != old_set:
setattr(old_model, attr, list(unioned))
updated_attrs.add(attr)
old_model._dirty = True
old_model._updated_attrs = updated_attrs
"""
cache clearing hook
"""
@classmethod
def _clearCache(cls, models: Iterable[TModel]) -> None:
"""
Make deferred calls to clear caches
Needs to save _affected_references and the dirty flag
"""
all_affected_references: List[TAffectedReferences] = []
for model in models:
if model._dirty and model._affected_references:
all_affected_references.append(model._affected_references)
if all_affected_references:
deferred.defer(
cls._clearCacheDeferred,
all_affected_references,
_queue="cache-clearing",
# this does not exist in Cloud Tasks
# _transactional=ndb.in_transaction(),
_target="py3-tasks-io",
_url=f"/_ah/queue/deferred_{cls.__name__}_clearCache",
)
@classmethod
def _clearCacheDeferred(
cls, all_affected_references: List[TAffectedReferences]
) -> None:
to_clear: DefaultDict[Type[CachedDatabaseQuery], Set[str]] = defaultdict(set)
for affected_references in all_affected_references:
for cache_key, query in cls.getCacheKeysAndQueries(affected_references):
to_clear[query].add(cache_key)
for query, cache_keys in to_clear.items():
query.delete_cache_multi(cache_keys)
@classmethod
@abc.abstractmethod
def getCacheKeysAndQueries(
cls, affected_refs: TAffectedReferences
) -> List[TCacheKeyAndQuery]:
"""
Child classes should replace method with appropriate call to CacheClearer.
"""
...
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/manipulators/manipulator_base.py
|
Python
|
mit
| 13,772
|
#!/usr/bin/env python3
import sys
import os
import shutil
import unittest
import git_test_fixture
import git_wrapper
import path_utils
import git_pull
import git_push
import git_fetch
import git_remote
import mvtools_test_fixture
class GitVisitorBackendsTest(unittest.TestCase):
def makeFilename(self):
self.internal_counter += 1
filename = "testfile_%s.txt" % self.internal_counter
return filename
def makeContent(self):
self.internal_counter += 1
content = "rubbish_content_%s" % self.internal_counter
return content
def setUp(self):
v, r = self.delegate_setUp()
if not v:
self.tearDown()
self.fail(r)
def delegate_setUp(self):
self.internal_counter = 0
v, r = mvtools_test_fixture.makeAndGetTestFolder("git_visitor_backends_test")
if not v:
return v, r
self.test_base_dir = r[0] # base test folder. shared amongst other test cases
self.test_dir = r[1] # test folder, specific for each test case (i.e. one level above self.test_base_dir)
# test repos paths
self.first_repo = path_utils.concat_path(self.test_dir, "first")
self.second_repo = path_utils.concat_path(self.test_dir, "second")
self.third_repo = path_utils.concat_path(self.test_dir, "third")
# creates test repos
v, r = git_wrapper.init(self.test_dir, "second", True)
if not v:
return v, r
v, r = git_wrapper.clone(self.second_repo, self.first_repo, "origin")
if not v:
return v, r
v, r = git_wrapper.clone(self.second_repo, self.third_repo, "origin")
if not v:
return v, r
# create a file with rubbish on first, and push it to its remote (second)
self.first_repo_first_file = self.makeFilename()
v, r = git_test_fixture.git_createAndCommit(self.first_repo, self.first_repo_first_file, self.makeContent(), "commit_msg")
if not v:
return v, r
v, r = git_wrapper.push(self.first_repo, "origin", "master")
if not v:
return v, r
# pull changes from first into third, through second
v, r = git_wrapper.pull(self.third_repo, "origin", "master")
if not v:
return v, r
return True, ""
def tearDown(self):
shutil.rmtree(self.test_base_dir)
def testPull(self):
# setup
newfile = self.makeFilename()
newfile_r1 = path_utils.concat_path(self.first_repo, newfile)
v, r = git_test_fixture.git_createAndCommit(self.third_repo, newfile, self.makeContent(), "commit_msg")
if not v:
self.fail(r)
v, r = git_wrapper.push(self.third_repo, "origin", "master")
if not v:
self.fail(r)
# test
remotes = {}
remotes["origin"] = { "push": self.second_repo, "fetch": self.second_repo }
branches = ["master"]
# file must not pre-exist
self.assertFalse(os.path.exists( newfile_r1 ))
v, r = git_pull.do_pull(self.first_repo, remotes, branches)
# must exist now because it was just pulled
self.assertTrue(os.path.exists( newfile_r1 ))
# operation must have succeded without any failures
self.assertFalse(v)
def testPush(self):
# setup
newfile = self.makeFilename()
newfile_r1 = path_utils.concat_path(self.first_repo, newfile)
v, r = git_test_fixture.git_createAndCommit(self.third_repo, newfile, self.makeContent(), "commit_msg")
if not v:
self.fail(r)
# test
remotes = {}
remotes["origin"] = { "push": self.second_repo, "fetch": self.second_repo }
branches = ["master"]
# file must not pre-exist
self.assertFalse(os.path.exists( newfile_r1 ))
v, r = git_push.do_push(self.third_repo, remotes, branches)
# operation must have succeded without any failures
self.assertFalse(v)
# pull the file into first we just pushed from third
v, r = git_wrapper.pull(self.first_repo, "origin", "master")
if not v:
self.fail(r)
# must exist now because it was just pulled
self.assertTrue(os.path.exists( newfile_r1 ))
def testFetch(self):
# setup
newfile = self.makeFilename()
newfile_r1 = path_utils.concat_path(self.first_repo, newfile)
v, r = git_test_fixture.git_createAndCommit(self.third_repo, newfile, self.makeContent(), "commit_msg")
if not v:
self.fail(r)
v, r = git_wrapper.push(self.third_repo, "origin", "master")
if not v:
self.fail(r)
# test
remotes = {}
remotes["origin"] = { "push": self.second_repo, "fetch": self.second_repo }
branches = ["master"]
# file must not pre-exist
self.assertFalse(os.path.exists( newfile_r1 ))
v, r = git_fetch.do_fetch(self.first_repo, remotes)
# operation must have succeded without any failures
self.assertFalse(v)
# merges after the fetch
v, r = git_wrapper.merge(self.first_repo, "origin", "master")
if not v:
self.fail(r)
# must exist now because it was just pulled
self.assertTrue(os.path.exists( newfile_r1 ))
def testRemote(self):
# setup
remote = "origin"
operation = ""
fourth_repo = path_utils.concat_path(self.test_dir, "fourth")
fifth_repo = path_utils.concat_path(self.test_dir, "fifth")
v, r = git_wrapper.init(self.test_dir, "fourth", True)
if not v:
self.fail(r)
# fifth repo must not pre-exist
self.assertFalse(os.path.exists(fifth_repo))
v, r = git_remote.remote_change_url(self.first_repo, remote, operation, fourth_repo)
# operation must have succeded without any failures
self.assertFalse(v)
# push to new remote
v, r = git_wrapper.push(self.first_repo, "origin", "master")
if not v:
self.fail(r)
# clone fourth into fifth to check for repo1's contents
v, r = git_wrapper.clone(fourth_repo, fifth_repo, "origin")
if not v:
self.fail(r)
# file from repo1 must exist inside fifth repo
self.assertTrue(os.path.exists( path_utils.concat_path(fifth_repo, self.first_repo_first_file) ) )
if __name__ == '__main__':
unittest.main()
|
mvendra/mvtools
|
tests/git_visitor_backends_test.py
|
Python
|
mit
| 6,542
|
import setuptools
setuptools.setup(
name='django-markup',
version='1.5.8',
packages=setuptools.find_packages()
)
|
ixc/django-markup
|
setup.py
|
Python
|
bsd-3-clause
| 126
|
# -*- coding: utf-8 -*-
from gluon import current
def config(settings):
"""
Template settings for Syria
- designed to be used in a Cascade with an application template
"""
#T = current.T
# Pre-Populate
settings.base.prepopulate.append("locations/SY")
# Uncomment to restrict to specific country/countries
settings.gis.countries.append("SY")
# -------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages["ar"] = "Arabic"
# Default Language (put this in custom template if-required)
#settings.L10n.default_language = "ar"
# Default timezone for users
settings.L10n.timezone = "Asia/Damascus"
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 963
settings.fin.currencies["SYP"] = "Syrian Pound"
settings.fin.currency_default = "SYP"
# END =========================================================================
|
flavour/eden
|
modules/templates/locations/SY/config.py
|
Python
|
mit
| 1,007
|
"""Support for Generic Modbus Thermostats."""
from __future__ import annotations
from datetime import timedelta
import logging
import struct
from typing import Any
from pymodbus.exceptions import ConnectionException, ModbusException
from pymodbus.pdu import ExceptionResponse
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
CONF_NAME,
CONF_OFFSET,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_TEMPERATURE_UNIT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import (
ConfigType,
DiscoveryInfoType,
HomeAssistantType,
)
from .const import (
ATTR_TEMPERATURE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_CLIMATES,
CONF_CURRENT_TEMP,
CONF_CURRENT_TEMP_REGISTER_TYPE,
CONF_DATA_COUNT,
CONF_DATA_TYPE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_PRECISION,
CONF_SCALE,
CONF_STEP,
CONF_TARGET_TEMP,
DATA_TYPE_CUSTOM,
DEFAULT_STRUCT_FORMAT,
MODBUS_DOMAIN,
)
from .modbus import ModbusHub
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities,
discovery_info: DiscoveryInfoType | None = None,
):
"""Read configuration and create Modbus climate."""
if discovery_info is None:
return
entities = []
for entity in discovery_info[CONF_CLIMATES]:
hub: ModbusHub = hass.data[MODBUS_DOMAIN][discovery_info[CONF_NAME]]
count = entity[CONF_DATA_COUNT]
data_type = entity[CONF_DATA_TYPE]
name = entity[CONF_NAME]
structure = entity[CONF_STRUCTURE]
if data_type != DATA_TYPE_CUSTOM:
try:
structure = f">{DEFAULT_STRUCT_FORMAT[data_type][count]}"
except KeyError:
_LOGGER.error(
"Climate %s: Unable to find a data type matching count value %s, try a custom type",
name,
count,
)
continue
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error("Error in sensor %s structure: %s", name, err)
continue
if count * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count (%d words)",
size,
count,
)
continue
entity[CONF_STRUCTURE] = structure
entities.append(ModbusThermostat(hub, entity))
async_add_entities(entities)
class ModbusThermostat(ClimateEntity):
"""Representation of a Modbus Thermostat."""
def __init__(
self,
hub: ModbusHub,
config: dict[str, Any],
):
"""Initialize the modbus thermostat."""
self._hub: ModbusHub = hub
self._name = config[CONF_NAME]
self._slave = config.get(CONF_SLAVE)
self._target_temperature_register = config[CONF_TARGET_TEMP]
self._current_temperature_register = config[CONF_CURRENT_TEMP]
self._current_temperature_register_type = config[
CONF_CURRENT_TEMP_REGISTER_TYPE
]
self._target_temperature = None
self._current_temperature = None
self._data_type = config[CONF_DATA_TYPE]
self._structure = config[CONF_STRUCTURE]
self._count = config[CONF_DATA_COUNT]
self._precision = config[CONF_PRECISION]
self._scale = config[CONF_SCALE]
self._scan_interval = timedelta(seconds=config[CONF_SCAN_INTERVAL])
self._offset = config[CONF_OFFSET]
self._unit = config[CONF_TEMPERATURE_UNIT]
self._max_temp = config[CONF_MAX_TEMP]
self._min_temp = config[CONF_MIN_TEMP]
self._temp_step = config[CONF_STEP]
self._available = True
async def async_added_to_hass(self):
"""Handle entity which will be added."""
async_track_time_interval(
self.hass, lambda arg: self._update(), self._scan_interval
)
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
# Handle polling directly in this entity
return False
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_mode(self):
"""Return the current HVAC mode."""
return HVAC_MODE_AUTO
@property
def hvac_modes(self):
"""Return the possible HVAC modes."""
return [HVAC_MODE_AUTO]
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
# Home Assistant expects this method.
# We'll keep it here to avoid getting exceptions.
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the target temperature."""
return self._target_temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT if self._unit == "F" else TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._max_temp
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._temp_step
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if ATTR_TEMPERATURE not in kwargs:
return
target_temperature = int(
(kwargs.get(ATTR_TEMPERATURE) - self._offset) / self._scale
)
byte_string = struct.pack(self._structure, target_temperature)
register_value = struct.unpack(">h", byte_string[0:2])[0]
self._write_register(self._target_temperature_register, register_value)
self._update()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def _update(self):
"""Update Target & Current Temperature."""
self._target_temperature = self._read_register(
CALL_TYPE_REGISTER_HOLDING, self._target_temperature_register
)
self._current_temperature = self._read_register(
self._current_temperature_register_type, self._current_temperature_register
)
self.schedule_update_ha_state()
def _read_register(self, register_type, register) -> float | None:
"""Read register using the Modbus hub slave."""
try:
if register_type == CALL_TYPE_REGISTER_INPUT:
result = self._hub.read_input_registers(
self._slave, register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, register, self._count
)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
byte_string = b"".join(
[x.to_bytes(2, byteorder="big") for x in result.registers]
)
val = struct.unpack(self._structure, byte_string)
if len(val) != 1 or not isinstance(val[0], (float, int)):
_LOGGER.error(
"Unable to parse result as a single int or float value; adjust your configuration. Result: %s",
str(val),
)
return -1
val = val[0]
register_value = format(
(self._scale * val) + self._offset, f".{self._precision}f"
)
register_value = float(register_value)
self._available = True
return register_value
def _write_register(self, register, value):
"""Write holding register using the Modbus hub slave."""
try:
self._hub.write_registers(self._slave, register, value)
except ConnectionException:
self._available = False
return
self._available = True
|
w1ll1am23/home-assistant
|
homeassistant/components/modbus/climate.py
|
Python
|
apache-2.0
| 8,738
|
from Queue.Queue import Queue
if __name__ == '__main__':
q = Queue()
q.en_queue("first")
print(q.queue_front())
print(q.queue_rear())
q.en_queue("second")
print(q.queue_front())
print(q.queue_rear())
q.en_queue("third")
print(q.queue_front())
print(q.queue_rear())
|
martindavid/code-sandbox
|
algorithm/COMP90038/queue_test.py
|
Python
|
mit
| 305
|
import six
from unittest import TestCase
from dark.reads import Read
from dark.local_align import LocalAlignment
class TestLocalAlign(TestCase):
"""
Test the LocalAlignment class.
With match +1, mismatch -1, gap open -1, gap extend -1 and
gap extend decay 0.0.
"""
def testPositiveMismatch(self):
"""
If the mismatch value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Mismatch must be negative',
LocalAlignment, seq1, seq2, mismatch=3)
def testZeroMismatch(self):
"""
If the mismatch value passed is zero, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Mismatch must be negative',
LocalAlignment, seq1, seq2, mismatch=0)
def testPositiveGap(self):
"""
If the gap value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Gap must be negative',
LocalAlignment, seq1, seq2, gap=3)
def testZeroGap(self):
"""
If the gap value passed is zero, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Gap must be negative',
LocalAlignment, seq1, seq2, gap=0)
def testPositiveGapExtend(self):
"""
If the gap extend value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError,
'Gap extension penalty cannot be positive',
LocalAlignment, seq1, seq2, gapExtend=3)
def testFirstSequenceEmpty(self):
"""
If the first sequence passed is empty, an exception must be raised.
"""
seq1 = Read('seq1', '')
seq2 = Read('seq2', 'agtcagtcagtc')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq1',
LocalAlignment, seq1, seq2)
def testSecondSequenceEmpty(self):
"""
If the second sequence passed is empty, an exception must be raised.
"""
seq1 = Read('seq1', 'agtcagtcagtc')
seq2 = Read('seq2', '')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq2',
LocalAlignment, seq1, seq2)
def testBothSequencesEmpty(self):
"""
If two empty sequences are passed, an exception must be raised.
"""
seq1 = Read('seq1', '')
seq2 = Read('seq2', '')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq1',
LocalAlignment, seq1, seq2)
def testGapAtStartOfSeq1(self):
seq1 = Read('seq1', 'gaatcg')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 2 Match end: 7\n'
'seq1 1 GAATCG 6\n'
' ||||||\n'
'seq2 2 GAATCG 7')
self.assertEqual(result, alignment)
def testGapAtStartOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'gaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 2 Match end: 7\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 2 GAATCG 7\n'
' ||||||\n'
'seq2 1 GAATCG 6')
self.assertEqual(result, alignment)
def testGapAtEndOfSeq1(self):
seq1 = Read('seq1', 'cgaatc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 1 CGAATC 6\n'
' ||||||\n'
'seq2 1 CGAATC 6')
self.assertEqual(result, alignment)
def testGapAtEndOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'cgaatc')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 1 CGAATC 6\n'
' ||||||\n'
'seq2 1 CGAATC 6')
self.assertEqual(result, alignment)
def testGapAtBothEndsOfSeq1(self):
seq1 = Read('seq1', 'gaatc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=\n'
'seq1 Match start: 1 Match end: 5\n'
'seq2 Match start: 2 Match end: 6\n'
'seq1 1 GAATC 5\n'
' |||||\n'
'seq2 2 GAATC 6')
self.assertEqual(result, alignment)
def testGapAtBothEndsOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'gaatc')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=\n'
'seq1 Match start: 2 Match end: 6\n'
'seq2 Match start: 1 Match end: 5\n'
'seq1 2 GAATC 6\n'
' |||||\n'
'seq2 1 GAATC 5')
self.assertEqual(result, alignment)
def testAlignmentWithGapInMiddle(self):
seq1 = Read('seq1', 'agtcagtcagtc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 2=1D1=\n'
'seq1 Match start: 7 Match end: 10\n'
'seq2 Match start: 5 Match end: 7\n'
'seq1 7 TCAG 10\n'
' || |\n'
'seq2 5 TC-G 7')
self.assertEqual(result, alignment)
def testTwoEqualSequences(self):
"""
When two identical sequences are given, the result should
show that the sequences completely match.
"""
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 7=\n'
'seq1 Match start: 1 Match end: 7\n'
'seq2 Match start: 1 Match end: 7\n'
'seq1 1 CGAATCG 7\n'
' |||||||\n'
'seq2 1 CGAATCG 7')
self.assertEqual(result, alignment)
def testTwoCompletelyDifferentSequences(self):
"""
When two completely different sequences are given, the result
should be the two sequences with an empty alignment.
"""
seq1 = Read('seq1', 'aaaaaa')
seq2 = Read('seq2', 'gggggg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nNo alignment between seq1 and seq2\n')
self.assertEqual(result, alignment)
def testWikiAnswer(self):
"""
Test the example given in Wikipedia:
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 1=1I5=1D1=\n'
'seq1 Match start: 1 Match end: 8\n'
'seq2 Match start: 1 Match end: 8\n'
'seq1 1 A-CACACTA 8\n'
' | ||||| |\n'
'seq2 1 AGCACAC-A 8')
self.assertEqual(result, alignment)
def testWikiAnswerWithMatchOne(self):
"""
Test the example given in Wikipedia
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
Wikipedia uses a match score of two, here we use a score of one.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=1)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=1D1=\n'
'seq1 Match start: 2 Match end: 8\n'
'seq2 Match start: 3 Match end: 8\n'
'seq1 2 CACACTA 8\n'
' ||||| |\n'
'seq2 3 CACAC-A 8')
self.assertEqual(result, alignment)
def testWikiAnswerAsDict(self):
"""
Test the example given in Wikipedia:
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
with the return result being a dict.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=2)
result = align.createAlignment()
self.assertEqual(
{
'cigar': '1=1I5=1D1=',
'sequence1Start': 1,
'sequence1End': 8,
'sequence2Start': 1,
'sequence2End': 8,
'text': [
'seq1 1 A-CACACTA 8',
' | ||||| |',
'seq2 1 AGCACAC-A 8',
]
},
result
)
def testWikiAnswerWithMatchOneAsDict(self):
"""
Test the example given in Wikipedia
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
Wikipedia uses a match score of two, here we use a score of one.
Get the result as a dict.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=1)
result = align.createAlignment()
self.assertEqual(
{
'cigar': '5=1D1=',
'sequence1Start': 2,
'sequence1End': 8,
'sequence2Start': 3,
'sequence2End': 8,
'text': [
'seq1 2 CACACTA 8',
' ||||| |',
'seq2 3 CACAC-A 8',
]
},
result
)
|
terrycojones/dark-matter
|
test/test_local_align.py
|
Python
|
mit
| 11,532
|
#!/usr/bin/env python
"""Base test classes for API handlers tests."""
import functools
from typing import Type
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import tests_pb2
from grr_response_server.gui import api_call_context
# This import guarantees that all API-related RDF types will get imported
# (as they're all references by api_call_router).
# pylint: disable=unused-import
from grr_response_server.gui import api_call_router
# pylint: enable=unused-import
from grr_response_server.gui import api_call_router_registry
from grr.test_lib import acl_test_lib
from grr.test_lib import test_lib
class ApiCallHandlerTest(test_lib.GRRBaseTest):
def setUp(self):
super().setUp()
# The user we use for API tests.
self.context = api_call_context.ApiCallContext("api_test_user")
self.test_username = self.context.username
acl_test_lib.CreateUser(self.context.username)
class SampleGetHandlerArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.SampleGetHandlerArgs
def WithApiCallRouter(name,
api_call_router_cls: Type[api_call_router.ApiCallRouter]):
"""Makes given function execute with specified router registered.
Args:
name: A name of the api call router.
api_call_router_cls: An ApiCallRouter class object.
Returns:
A decorator function that registers and unregisters the ApiCallRouter.
"""
def Decorator(func):
@functools.wraps(func)
def Wrapper(*args, **kwargs):
with _ApiCallRouterContext(name, api_call_router_cls):
func(*args, **kwargs)
return Wrapper
return Decorator
class _ApiCallRouterContext(object):
"""A context manager for execution with certain ApiCallRouter registered."""
def __init__(self, name, api_call_router_cls):
self._name = name
self._api_call_router = api_call_router_cls
def __enter__(self):
api_call_router_registry.RegisterApiCallRouter(self._name,
self._api_call_router)
def __exit__(self, exc_type, exc_value, traceback):
del exc_type, exc_value, traceback # Unused.
api_call_router_registry.UnregisterApiCallRouter(self._name)
|
google/grr
|
grr/server/grr_response_server/gui/api_test_lib.py
|
Python
|
apache-2.0
| 2,205
|
# Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, pjh@cs.washington.edu
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
# This file contains helper methods for dealing with Ubuntu services.
from trace.traceinfo_class import traceinfo
from util.pjh_utils import *
import trace.run_common as run
import signal
import subprocess
import time
# List of services that this script knows how to handle:
KNOWN_SERVICES = ['apache2', 'memcached', 'mysql']
SERVICE_PIDFILES = {
'apache2' : '/var/run/apache2.pid',
'memcached' : '/var/run/memcached.pid',
'mysql' : '/var/run/mysqld/mysqld.pid'
}
SERVICE_CMD_TIMEOUT = 30
# This method knows how to read the pid file or how to parse ps(1) output
# for all of the services in the global KNOWN_SERVICES list.
# Returns: the pid of the service's top-level process, or -1 on error.
def get_service_pid(service):
tag = 'get_service_pid'
if service not in KNOWN_SERVICES:
print_error(tag, ("unexpected service {}").format(service))
return -1
try:
pidfile = SERVICE_PIDFILES[service]
except KeyError:
print_error(tag, ("couldn't get pidfile for {}").format(service))
return -1
pid = run.read_pidfile(pidfile)
return pid
# Uses the Ubuntu service(8) command to run the specified command
# (e.g. start, stop) for the specified service. service(8) is
# convenient and avoids the problem of having to correctly set up
# environment variables and command-line parameters to start these
# services ourself. One drawback of service(8), however, is that it
# doesn't tell us the pid of the service when it it started, so we
# call app-specific methods here to get the pid of the service
# (usually this can be done easily by reading a "pid file" that each
# service establishes, but in future cases it may be necessary to
# parse ps(1) output, like is done for the chrome+chromedriver script).
#
# WARNING: this method issues sudo shell commands - this is a security
# hazard if uncontrolled input is passed to this method!
#
# Returns a tuple:
# (True if the service command returned successfull, False if not;
# the pid of the started service if the command was 'start', or -1
# if the pid could not be found for some reason.)
def service_cmd(service, command, outputdir, service_stdout, service_stderr):
tag = 'service_cmd'
if service not in KNOWN_SERVICES:
print_error(tag, ("unexpected service {}").format(service))
return (False, -1)
if command not in ['start', 'stop']:
# be a little bit safe anyway...
print_error(tag, ("command {} not expected - not executing "
"service command to avoid a security hazard!").format(
command))
return (False, -1)
cmdline = "sudo bash -c 'service {} {}'".format(service, command)
print_debug(tag, ("cmdline: {}").format(cmdline))
# Expected return code is 0 in pretty much every case: stopping a
# service that is running or is already stopped, starting a service
# that is stopped or is already running (we might want to try to
# detect this last case here, but whatever).
# Actually, it turns out that the value returned when starting
# an already-started service is service-dependent - mysql returns
# 1, apache2 and memcached return 0. Whatever.
# I haven't tried any other commands besides start and stop.
try:
retcode = subprocess.call(cmdline, shell=True,
stdout=service_stdout, stderr=service_stderr,
timeout=SERVICE_CMD_TIMEOUT)
time.sleep(1)
# seems like a good idea to pause a tiny bit after starting
# or stopping service, so that whatever we do next (perhaps
# re-starting the service, examining its pid file, etc.)
# knows "for sure" that the service call is complete.
except subprocess.TimeoutExpired:
print_error(tag, ("command \"{}\" timed out! ({} "
"seconds)").format(cmdline, SERVICE_CMD_TIMEOUT))
return (False, -1)
if retcode != 0:
if command == 'stop' and service == 'mysql' and retcode == 1:
# Ignore: 1 is returned if stopped when mysql not running
pass
else:
# I've hit errors here for apache2 start, with this in the
# stderr:
# (98)Address already in use: make_sock: could not bind
# to address [::]:80
# (98)Address already in use: make_sock: could not bind
# to address 0.0.0.0:80
# no listening sockets available, shutting down
# Unable to open logs
# Todo: retry some number of times in this method when this
# particular error is hit...
print_error(tag, ("command \"{}\" returned non-0 code "
"{}, returning now").format(cmdline, retcode))
return (False, -1)
if command == 'start':
# Is there any race condition between the start of a service
# and the presence of the pid file? Not in my experience, but
# add a little tiny wait just in case. Also, the services
# remove their pid files when they are stopped, so there should
# be no chance of getting a stale pid.
# Actually, maybe there is a race condition - when trying to
# read the apache2 pidfile once, I got an error where the
# line was read from the file, but it didn't match my simple
# pid regex. So I bumped up the tiny wait a little more here,
# and added a retry loop in get_service_pid() -> read_pidfile().
time.sleep(1)
service_pid = get_service_pid(service)
print_debug(tag, ("get_service_pid({}) returned {}").format(
service, service_pid))
else:
service_pid = 0 # caller should ignore
return (True, service_pid)
#############################################################################
# This method is designed to be used as the execfn for an app_to_run
# object for any arbitrary ubuntu service ("service --status-all") where
# we want to start + trace the service, run its client manually, and
# then stop the service. The service must be in the global KNOWN_SERVICES
# list elsewhere in this file.
#
# Returns: a target_pids list containing the top-level pid of the apache
# process, or an empty list on error.
def runservice_manualclient(outputdir, service):
tag = 'runservice_manualclient'
if service not in KNOWN_SERVICES:
print_error(tag, ("service {} not in KNOWN_SERVICES {}, "
"returning empty target_pids now").format(service,
KNOWN_SERVICES))
return []
target_pids = []
tracer = traceinfo(service)
(service_stdout, service_stderr) = run.stdout_stderr_init(
outputdir, service)
(success, meh) = service_cmd(service, 'stop',
outputdir, service_stdout, service_stderr)
if not success:
print_error(tag, ("initial {} stop failed, returning [] "
"now").format(service))
for f in [service_stdout, service_stderr]:
f.close()
return []
success = tracer.trace_on(outputdir,
descr="starting {}".format(service))
if not success:
print_error(tag, ("trace_on failed, returning [] now"))
for f in [service_stdout, service_stderr]:
f.close()
return []
(success, service_pid) = service_cmd(service, 'start',
outputdir, service_stdout, service_stderr)
if service_pid < 2:
success = False
# If service start didn't succeed, we'll skip the client execution,
# but we'll still try to stop the service and then turn tracing
# off.
if success:
# Pause until Ctrl-C (SIGINT) is received. Call a nop signal
# handler when signal is received, then reset signal behavior
# back to default.
# http://docs.python.org/3/library/signal.html
signal.signal(signal.SIGINT, run.signal_handler_nop)
print(("Tracing is on and {} service is started").format(service))
print(("Run your client, then press Ctrl-C to stop the "
"service and disable tracing"))
signal.pause() # Note: Linux-only
signal.signal(signal.SIGINT, signal.SIG_DFL)
success = True
service_cmd(service, 'stop', outputdir, service_stdout, service_stderr)
# Stop trace *after* stopping service.
(tracesuccess, buffer_full) = tracer.trace_off(
"stopping {}".format(service), service_pid)
if not tracesuccess:
print_error(tag, ("trace_off failed"))
success = False
elif buffer_full:
print_error(tag, ("trace buffer filled up before "
"tracing turned off - considering this an error "
"here").format())
success = False
for f in [service_stdout, service_stderr]:
f.close()
if success:
print_debug(tag, ("everything ran successfully, appending "
"service_pid {} to target_pids and returning").format(
service_pid))
target_pids.append(service_pid)
else:
print_error(tag, ("something failed, so not appending "
"service_pid {} to target_pids").format(service_pid))
target_pids = []
return target_pids
if __name__ == '__main__':
print_error_exit("not an executable module")
|
pjh/vm-analyze
|
app_scripts/ubuntu_services.py
|
Python
|
bsd-3-clause
| 8,613
|
import fileinput
import sys
import re
import shlex
import subprocess
import asyncio
import selectors
import os
import codecs
import queue
def run(command):
"""
parse the command with help of shlex
and create a generator which feeds the command
with input and read output
Note: only works under unix because Pipe are not selectable under windows
"""
command = shlex.split(command)
selector = selectors.DefaultSelector()
def _run(input):
p=subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
selector.register(p.stdin, selectors.EVENT_WRITE)
selector.register(p.stdout, selectors.EVENT_READ)
more = memoryview(b"")
input_offset=0
stdout_decoder = codecs.getincrementaldecoder("utf8")()
output = []
while selector.get_map():
ready = selector.select()
for key,events in ready:
if key.fileobj is p.stdin:
if input_offset==len(more):
try:
line = next(input)+"\n"
except StopIteration:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
more= memoryview(line.encode("utf8"))
try:
input_offset = os.write(key.fd, more)
except BrokenPipeError :
selector.unregister(key.fileobj)
key.fileobj.close()
else:
try:
input_offset += os.write(key.fd, more[input_offset:])
except BrokenPipeError :
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj == p.stdout:
data = os.read(key.fd,32768)
if not data:
data = stdout_decoder.decode(b"",True)
selector.unregister(key.fileobj)
key.fileobj.close()
if data:
data = data.split("\n")
if data[0]:
output.append(data[0])
yield "".join(output)
for line in data[1:]:
yield line
else:
data = stdout_decoder.decode(data)
while data != "":
try:
endofline = data.index("\n")
except ValueError:
output.append(data)
data =""
else:
output.append(data[:endofline])
yield "".join(output)
output = []
data =data[endofline+1:]
else:
raise Exception("unexpected event {}".format(key))
return _run
def grep(pattern,flags=0):
"""
create a generator which consume input
and filter the ones which match the pattern
"""
regex = re.compile(pattern,flags)
def _grep(input):
yield from (line for line in input if regex.search(line))
return _grep
def sub(pattern,replacement,*,count=0,flags=0):
"""
create a generator which consume input
and replace the ones which match the pattern
with the replacement string/function (see re.sub for detail)
"""
regex = re.compile(pattern,flags)
def _sub(input):
yield from (regex.sub(repl=replacement,string=line,count=count) for line in input)
return _sub
def cat(*file_names):
"""
create a generator of line in file_names files (without endline)
"""
def _cat(input):
if file_names:
input = fileinput.FileInput(file_names)
else:
input = sys.stdin
for elt in input:
yield elt.rstrip("\n")
return _cat
def output(end=None):
"""
create a generator which consume input and print it to stdout
"""
def _output(input):
for line in input:
print(line,end=end)
return _output
def split(separator=None):
"""
create a generator which split the inputs on separator
"""
def _split(input):
yield from (line.split(separator) for line in input)
return _split
def sort(key=None,reverse=False):
"""
create a generator which iter on sorted input
"""
def _sort(input):
yield from sorted(input,key=key,reverse=reverse)
return _sort
def join(separator=""):
"""
create a generator which do join on each element of input iterable
"""
def _join(input):
yield from (separator.join(line) for line in input)
return _join
def filter(func):
"""
return a generator which
filter all element where func return true
"""
def _filter(input):
yield from (elt for elt in input if func(elt))
return _filter
def map(func):
"""
create a generator which apply func to each element of input
"""
def _map(input):
yield from (func(elt) for elt in input)
return _map
def head(n=10):
"""
create a generator which return the first n elements
"""
def _head(input):
for i,elt in enumerate(input,1):
yield elt
if i == n:
break
return _head
def tail(n=10):
"""
create a generator which return the n last elements
"""
tail_queue = queue.Queue(maxsize=n)
def _tail(input):
for elt in input:
if tail_queue.full():
tail_queue.get()
tail_queue.put(elt)
while not tail_queue.empty():
yield tail_queue.get()
return _tail
def null():
"""
return an empty generator
"""
def _null(input):
return iter([])
return _null
def chain(*generators,input=None):
"""
chains each generator
"""
if input is not None:
input = iter(input)
for i,generator in enumerate(generators,1):
if generator is None:
raise ValueError("generator n°{} is None, probably forget to return function from generator factory".format(i))
input = generator(input)
return input
if __name__ == "__main__":
if sys.argv[1] == "run":
chain(cat(sys.argv[2]),
run(sys.argv[3]),
output())
if sys.argv[1] == "grep":
chain(cat(*sys.argv[3:]),
grep(sys.argv[2]),
output())
if sys.argv[1] == "tail":
chain(cat(*sys.argv[2:]),
tail(),
output())
if sys.argv[1] == "head":
chain(cat(*sys.argv[2:]),
head(n=2),
output())
elif sys.argv[1] == "1":
# stupid example
chain(cat(sys.argv[2:]),
split(","),
sort(key=lambda k:int(k[1]),reverse=True),
join("\t"),
output())
elif sys.argv[1] == "2":
#reddit example
#https://www.reddit.com/r/Python/comments/33qzzf/what_features_should_python_steal_from_other/cqnof0t
chain(cat(),
filter(lambda s: s and s[0] != '#'),
map(float),
sort(),
head(10),
output())
|
xcombelle/chaintools
|
chaintools.py
|
Python
|
mit
| 7,879
|
# 005_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '005'
AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turning the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
|
sighill/shade_app
|
apis/raw/005_raw/005_cleaner.py
|
Python
|
mit
| 1,625
|
# ... client initialization left out
data_client = client.data
dataset_id = 1
# Gets a single file named exactly my_file.json
dataset_file = data_client.get_dataset_file(dataset_id, "my_file.json")
dataset_file.url # url that can be used to download the file
dataset_file.path # the filepath as it appears in Citrination
# Gets all the files in a dataset, organized by version,
# represented as a list of DatasetFile objects
dataset_files = data_client.get_dataset_files(dataset_id)
|
CitrineInformatics/python-citrination-client
|
docs/source/code_samples/data/file_urls.py
|
Python
|
apache-2.0
| 489
|
from direct.showbase.ShowBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from toontown.toon import LaffMeter
from toontown.toonbase import TTLocalizer
class TreasureScorePanel(DirectFrame):
def __init__(self):
DirectFrame.__init__(self, relief=None, image_color=GlobalDialogColor, image_scale=(0.24, 1.0, 0.24), image_pos=(0.0, 0.1, 0.0))
self.score = 0
self.scoreText = DirectLabel(self, relief=None, text=str(self.score), text_scale=0.08, pos=(0.0, 0.0, -0.09))
self.nameText = DirectLabel(self, relief=None, text=TTLocalizer.DivingGameTreasuresRetrieved, text_scale=0.05, text_pos=(0.0, 0.06), text_wordwrap=7.5, text_shadow=(1, 1, 1, 1))
self.show()
return
def cleanup(self):
del self.scoreText
del self.nameText
self.destroy()
def incrScore(self):
self.score += 1
self.scoreText['text'] = str(self.score)
def makeTransparent(self, alpha):
self.setTransparency(1)
self.setColorScale(1, 1, 1, alpha)
|
ksmit799/Toontown-Source
|
toontown/minigame/TreasureScorePanel.py
|
Python
|
mit
| 1,083
|
from . operators import operators
def Expr(node): # A container for an expression.
return (lambda x: x), [node.value]
def BinOp(node): # a + b
return operators(node.op), [node.left, node.right]
def BoolOp(node): # a and b and c
return operators(node.op), node.values
def UnaryOp(node): # -a, not a, +a, ~a
return operators(node.op), [node.operand]
def Compare(node): # a < b < c > d
ops = [operators(o) for o in node.ops]
def compare(left, *values):
assert len(ops) == len(values)
for op, value in zip(ops, values):
if not op(left, value):
return False
left = value
return True
return compare, [node.left] + node.comparators
def Call(node): # f(a, *b, **c)
if not node.keywords:
def call(caller, *args):
return caller(*args)
return call, [node.func] + node.args
arg_length = len(node.args)
kv = [(k.arg, k.value) for k in node.keywords]
keys, value_nodes = zip(*kv)
def call(caller, *args_values):
args, values = args_values[:arg_length], args_values[arg_length:]
return caller(*args, **dict(zip(keys, values)))
return call, [node.func] + node.args + list(value_nodes)
def IfExp(node): # a if b else c
def if_exp(body, test, orelse):
return body if test else orelse
return if_exp, [node.body, node.test, node.orelse]
|
timedata-org/expressy
|
expressy/ast_handlers/expressions.py
|
Python
|
mit
| 1,421
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0006_cartitem_line_item_total'),
]
operations = [
migrations.AddField(
model_name='cart',
name='subtotal',
field=models.DecimalField(default=0.0, max_digits=50, decimal_places=2),
preserve_default=False,
),
]
|
insta-code1/ecommerce
|
src/carts/migrations/0007_cart_subtotal.py
|
Python
|
mit
| 474
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Sample pointcloud with shape (1568, 3).
LIDAR_CLOUD_PATH = 'ops/testdata/pointcloud.npy'
def get_transformation_matrix(transform):
"""Converts [tx, ty, tz, rx, ry, rz] to a transform matrix."""
rx = transform[3]
ry = transform[4]
rz = transform[5]
rz = tf.clip_by_value(rz, -np.pi, np.pi)
ry = tf.clip_by_value(ry, -np.pi, np.pi)
rx = tf.clip_by_value(rx, -np.pi, np.pi)
cos_rx = tf.cos(rx)
sin_rx = tf.sin(rx)
rotx_1 = tf.stack([1.0, 0.0, 0.0])
rotx_2 = tf.stack([0.0, cos_rx, -sin_rx])
rotx_3 = tf.stack([0.0, sin_rx, cos_rx])
xmat = tf.stack([rotx_1, rotx_2, rotx_3])
cos_ry = tf.cos(ry)
sin_ry = tf.sin(ry)
roty_1 = tf.stack([cos_ry, 0.0, sin_ry])
roty_2 = tf.stack([0.0, 1.0, 0.0])
roty_3 = tf.stack([-sin_ry, 0.0, cos_ry])
ymat = tf.stack([roty_1, roty_2, roty_3])
cos_rz = tf.cos(rz)
sin_rz = tf.sin(rz)
rotz_1 = tf.stack([cos_rz, -sin_rz, 0.0])
rotz_2 = tf.stack([sin_rz, cos_rz, 0.0])
rotz_3 = tf.stack([0.0, 0.0, 1.0])
zmat = tf.stack([rotz_1, rotz_2, rotz_3])
rotate = tf.matmul(tf.matmul(xmat, ymat), zmat)
translate = transform[:3]
mat = tf.concat([rotate, tf.expand_dims(translate, 1)], axis=1)
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 4], dtype=tf.float32)
mat = tf.concat([mat, hom_filler], axis=0)
return mat
def np_get_transformation_matrix(transform):
"""Converts [tx, ty, tz, rx, ry, rz] to a transform matrix."""
rx = transform[3]
ry = transform[4]
rz = transform[5]
rz = np.clip(rz, -np.pi, np.pi)
ry = np.clip(ry, -np.pi, np.pi)
rx = np.clip(rx, -np.pi, np.pi)
cos_rx = np.cos(rx)
sin_rx = np.sin(rx)
rotx_1 = np.stack([1.0, 0.0, 0.0])
rotx_2 = np.stack([0.0, cos_rx, -sin_rx])
rotx_3 = np.stack([0.0, sin_rx, cos_rx])
xmat = np.stack([rotx_1, rotx_2, rotx_3])
cos_ry = np.cos(ry)
sin_ry = np.sin(ry)
roty_1 = np.stack([cos_ry, 0.0, sin_ry])
roty_2 = np.stack([0.0, 1.0, 0.0])
roty_3 = np.stack([-sin_ry, 0.0, cos_ry])
ymat = np.stack([roty_1, roty_2, roty_3])
cos_rz = np.cos(rz)
sin_rz = np.sin(rz)
rotz_1 = np.stack([cos_rz, -sin_rz, 0.0])
rotz_2 = np.stack([sin_rz, cos_rz, 0.0])
rotz_3 = np.stack([0.0, 0.0, 1.0])
zmat = np.stack([rotz_1, rotz_2, rotz_3])
rotate = np.dot(np.dot(xmat, ymat), zmat)
translate = transform[:3]
mat = np.concatenate((rotate, np.expand_dims(translate, 1)), axis=1)
hom_filler = np.array([[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
mat = np.concatenate((mat, hom_filler), axis=0)
return mat
def transform_cloud_xyz(cloud, transform):
num_points = cloud.shape.as_list()[0]
ones = tf.ones(shape=[num_points, 1], dtype=tf.float32)
hom_cloud = tf.concat([cloud, ones], axis=1)
hom_cloud_t = tf.transpose(hom_cloud)
mat = get_transformation_matrix(transform)
transformed_cloud = tf.matmul(mat, hom_cloud_t)
transformed_cloud = tf.transpose(transformed_cloud)
transformed_cloud = transformed_cloud[:, :3]
return transformed_cloud
def np_transform_cloud_xyz(cloud, transform):
num_points = cloud.shape[0]
ones = np.ones(shape=[num_points, 1], dtype=np.float32)
hom_cloud = np.concatenate((cloud, ones), axis=1)
hom_cloud_t = np.transpose(hom_cloud)
mat = np_get_transformation_matrix(transform)
transformed_cloud = np.dot(mat, hom_cloud_t)
transformed_cloud = np.transpose(transformed_cloud)
transformed_cloud = transformed_cloud[:, :3]
return transformed_cloud
def batch_transform_cloud_xyz(cloud, transform):
results = []
cloud_items = tf.unstack(cloud)
if len(transform.shape.as_list()) == 2:
transform_items = tf.unstack(transform)
else:
transform_items = [transform] * len(cloud_items)
for cloud_item, transform_item in zip(cloud_items, transform_items):
results.append(transform_cloud_xyz(cloud_item, transform_item))
return tf.stack(results)
|
cshallue/models
|
research/vid2depth/ops/icp_util.py
|
Python
|
apache-2.0
| 4,710
|
'''
LICENSING
-------------------------------------------------
golix: A python library for Golix protocol object manipulation.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
A NOTE ON RANDOM NUMBERS...
PyCryptoDome sources randomness from os.urandom(). This should be secure
for most applications. HOWEVER, if your system is low on entropy (can
be an issue in high-demand applications like servers), urandom *will not
block to wait for entropy*, and will revert (ish?) to potentially
insufficiently secure pseudorandom generation. In that case, it might be
better to source from elsewhere (like a hardware RNG).
Some initial temporary thoughts:
1. Need to refactor signing, etc into identities.
2. Identity base class should declare supported cipher suites as a set
3. Each identity class should += the set with their support, allowing
for easy multi-inheritance for multiple identity support
4. Identities then insert the author into the file
5. How does this interact with asymmetric objects with symmetric sigs?
Should just look for an instance of the object? It would be nice
to totally factor crypto awareness out of the objects entirely,
except (of course) for address algorithms.
6. From within python, should the identies be forced to ONLY support
a single ciphersuite? That would certainly make life easier. A
LOT easier. Yeah, let's do that then. Multi-CS identities can
multi-subclass, and will need to add some kind of glue code for
key reuse. Deal with that later, but it'll probably entail
backwards-incompatible changes.
7. Then, the identities should also generate secrets. That will also
remove people from screwing up and using ex. random.random().
But what to do with the API for that? Should identity.finalize(obj)
return (key, obj) pair or something? That's not going to be useful
for all objects though, because not all objects use secrets. Really,
the question is, how to handle GEOCs in a way that makes sense?
Maybe add an Identity.secrets(ghid) attribute or summat? Though
returning just the bytes would be really unfortunate for app
development, because you'd have to unpack the generated bytes to
figure out the ghid. What about returning a namedtuple, and adding
a field for secrets in the GEOC? that might be something to add
to the actual objects (ex GEOC) instead of the identity. That would
also reduce the burden on identities for state management of
generated objects, which should really be handled at a higher level
than this library.
8. Algorithm precedence order should be defined globally, but capable
of being overwritten
'''
# Global dependencies
import abc
import os
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.kdf import hkdf
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from donna25519 import PrivateKey as ECDHPrivate
from donna25519 import PublicKey as ECDHPublic
from smartyparse import ParseError
# Interpackage dependencies
from .exceptions import SecurityError
from .utils import Ghid
from .utils import _dummy_ghid
from .crypto_utils import ADDRESS_ALGOS
from .crypto_utils import Secret
from .crypto_utils import AsymHandshake
from .crypto_utils import AsymAck
from .crypto_utils import AsymNak
from .crypto_utils import _dummy_asym
from .crypto_utils import _dummy_mac
from .crypto_utils import _dummy_signature
from .crypto_utils import _dummy_address
from .crypto_utils import _dummy_pubkey
from .crypto_utils import _dummy_pubkey_exchange
from ._getlow import GIDC
from ._getlow import GEOC
from ._getlow import GOBS
from ._getlow import GOBD
from ._getlow import GDXX
from ._getlow import GARQ
from ._getlow import GARQHandshake
from ._getlow import GARQAck
from ._getlow import GARQNak
# Some globals
CRYPTO_BACKEND = default_backend()
DEFAULT_ADDRESSER = 1
DEFAULT_CIPHER = 1
# Control * imports
__all__ = [
'FirstParty1',
'SecondParty1',
'ThirdParty1'
]
# Some utilities
class _NoopSHA512(hashes.SHA512):
def __init__(self, noopdata, *args, **kwargs):
self.__data = noopdata
super().__init__(*args, **kwargs)
self.algorithm = self
def copy(self):
''' Total NOOP, because self cannot change.
'''
return self
def update(self, data):
# Noop noop noop
pass
def finalize(self):
# Yay we get to do something!
return self.__data
class _IdentityBase(metaclass=abc.ABCMeta):
def __init__(self, keys, ghid):
self._ghid = ghid
try:
self._signature_key = keys['signature']
self._encryption_key = keys['encryption']
self._exchange_key = keys['exchange']
except (KeyError, TypeError) as e:
raise RuntimeError(
'Generating ID from existing keys requires dict-like obj '
'with "signature", "encryption", and "exchange" keys.'
) from e
@property
def ghid(self):
return self._ghid
@property
def ciphersuite(self):
return self._ciphersuite
@classmethod
def _dispatch_address(cls, address_algo):
if address_algo == 'default':
address_algo = cls.DEFAULT_ADDRESS_ALGO
elif address_algo not in ADDRESS_ALGOS:
raise ValueError(
'Address algorithm unavailable for use: ' + str(address_algo)
)
return address_algo
@classmethod
def _typecheck_secret(cls, secret):
# Awkward but gets the job done
if not isinstance(secret, Secret):
return False
if secret.cipher != cls._ciphersuite:
return False
return True
class _ObjectHandlerBase(metaclass=abc.ABCMeta):
''' Base class for anything that needs to unpack Golix objects.
'''
@staticmethod
def unpack_identity(packed):
gidc = GIDC.unpack(packed)
return gidc
@staticmethod
def unpack_container(packed):
geoc = GEOC.unpack(packed)
return geoc
@staticmethod
def unpack_bind_static(packed):
gobs = GOBS.unpack(packed)
return gobs
@staticmethod
def unpack_bind_dynamic(packed):
gobd = GOBD.unpack(packed)
return gobd
@staticmethod
def unpack_debind(packed):
gdxx = GDXX.unpack(packed)
return gdxx
@staticmethod
@abc.abstractmethod
def unpack_request(packed):
''' Unpacks requests. Different for firstparties and
thirdparties, but used by both in unpack_any.
'''
pass
def unpack_any(self, packed):
''' Try to unpack using any available parser.
Raises TypeError if no parser is found.
'''
for parser in (self.unpack_identity,
self.unpack_container,
self.unpack_bind_static,
self.unpack_bind_dynamic,
self.unpack_debind,
self.unpack_request):
try:
obj = parser(packed)
# Hm, don't really like this.
except (ParseError, TypeError):
pass
else:
break
else:
raise ParseError(
'Packed data does not appear to be a Golix object.'
)
return obj
class _SecondPartyBase(metaclass=abc.ABCMeta):
@classmethod
def from_keys(cls, keys, address_algo):
''' Creates a secondparty from unpacked keys -- DON'T use this
if you have an existing MIDC.
'''
try:
# Turn them into bytes first.
packed_keys = cls._pack_keys(keys)
except (KeyError, TypeError) as e:
raise RuntimeError(
'Generating ID from existing keys requires dict-like obj '
'with "signature", "encryption", and "exchange" keys.'
) from e
gidc = GIDC(
signature_key=packed_keys['signature'],
encryption_key=packed_keys['encryption'],
exchange_key=packed_keys['exchange']
)
gidc.pack(cipher=cls._ciphersuite, address_algo=address_algo)
ghid = gidc.ghid
self = cls(keys=keys, ghid=ghid)
self.packed = gidc.packed
return self
@classmethod
def from_identity(cls, gidc):
''' Loads an unpacked gidc into a SecondParty. Note that this
does not select the correct SecondParty for any given gidc's
ciphersuite.
'''
ghid = gidc.ghid
keys = cls._unpack_keys({
'signature': gidc.signature_key,
'encryption': gidc.encryption_key,
'exchange': gidc.exchange_key
})
self = cls(keys=keys, ghid=ghid)
return self
@classmethod
def from_packed(cls, packed):
''' Loads a packed gidc into a SecondParty. Also does not select
the correct SecondParty for the packed gidc's ciphersuite.
'''
gidc = _ObjectHandlerBase.unpack_identity(packed)
self = cls.from_identity(gidc)
self.packed = packed
return self
@classmethod
@abc.abstractmethod
def _pack_keys(cls, keys):
''' Convert self.keys from objects used for crypto operations
into bytes-like objects suitable for output into a GIDC.
'''
pass
@classmethod
@abc.abstractmethod
def _unpack_keys(cls, keys):
''' Convert keys dic into objects used for crypto operations
from bytes-like objects used in GIDC.
'''
pass
class _FirstPartyBase(_ObjectHandlerBase, metaclass=abc.ABCMeta):
DEFAULT_ADDRESS_ALGO = DEFAULT_ADDRESSER
def __init__(self, keys=None, ghid=None, address_algo='default', *args,
**kwargs):
self.address_algo = self._dispatch_address(address_algo)
# Load an existing identity
if keys is not None and ghid is not None:
self._second_party = self._generate_second_party(
keys,
self.address_algo
)
# Catch any improper declaration
elif keys is not None or ghid is not None:
raise TypeError(
'Generating an ID manually from existing keys requires '
'both keys and ghid.'
)
# Generate a new identity
else:
keys = self._generate_keys()
self._second_party = self._generate_second_party(
keys,
self.address_algo
)
ghid = self._second_party.ghid
# Now dispatch super() with the adjusted keys, ghid
super().__init__(keys=keys, ghid=ghid, *args, **kwargs)
@classmethod
def _typecheck_2ndparty(cls, obj):
# Type check the partner. Must be SecondPartyX or similar.
if not isinstance(obj, cls._2PID):
raise TypeError(
'Object must be a SecondParty of compatible type '
'with the FirstParty initiating the request/ack/nak.'
)
else:
return True
@property
def second_party(self):
# Note: this is going to error out if we're loading an identity, since
# we're not currently passing in the packed identity.
return self._second_party
def make_container(self, secret, plaintext):
if not self._typecheck_secret(secret):
raise TypeError(
'Secret must be a properly-formatted Secret compatible with '
'the current identity\'s declared ciphersuite.'
)
geoc = GEOC(author=self.ghid)
geoc.payload = self._encrypt(secret, plaintext)
geoc.pack(cipher=self.ciphersuite, address_algo=self.address_algo)
signature = self._sign(geoc.ghid.address)
geoc.pack_signature(signature)
return geoc
def make_bind_static(self, target):
gobs = GOBS(
binder = self.ghid,
target = target
)
gobs.pack(cipher=self.ciphersuite, address_algo=self.address_algo)
signature = self._sign(gobs.ghid.address)
gobs.pack_signature(signature)
return gobs
def make_bind_dynamic(self, counter, target_vector, ghid_dynamic=None):
gobd = GOBD(
binder = self.ghid,
counter = counter,
target_vector = target_vector,
ghid_dynamic = ghid_dynamic
)
gobd.pack(cipher=self.ciphersuite, address_algo=self.address_algo)
signature = self._sign(gobd.ghid.address)
gobd.pack_signature(signature)
return gobd
def make_debind(self, target):
gdxx = GDXX(
debinder = self.ghid,
target = target
)
gdxx.pack(cipher=self.ciphersuite, address_algo=self.address_algo)
signature = self._sign(gdxx.ghid.address)
gdxx.pack_signature(signature)
return gdxx
def make_handshake(self, secret, target):
return AsymHandshake(
author = self.ghid,
target = target,
secret = secret
)
def make_ack(self, target, status=0):
return AsymAck(
author = self.ghid,
target = target,
status = status
)
def make_nak(self, target, status=0):
return AsymNak(
author = self.ghid,
target = target,
status = status
)
def make_request(self, recipient, request):
self._typecheck_2ndparty(recipient)
# I'm actually okay with this performance hit, since it forces some
# level of type checking here. Which is, I think, in this case, good.
if isinstance(request, AsymHandshake):
request = GARQHandshake(
author = request.author,
target = request.target,
secret = request.secret
)
elif isinstance(request, AsymAck):
request = GARQAck(
author = request.author,
target = request.target,
status = request.status
)
elif isinstance(request, AsymNak):
request = GARQNak(
author = request.author,
target = request.target,
status = request.status
)
else:
raise TypeError(
'Request must be an AsymHandshake, AsymAck, or AsymNak '
'(or subclass thereof).'
)
request.pack()
plaintext = request.packed
# Convert the plaintext to a proper payload and create a garq from it
payload = self._encrypt_asym(recipient, plaintext)
del plaintext
garq = GARQ(
recipient = recipient.ghid,
payload = payload
)
# Pack 'er up and generate a MAC for it
garq.pack(cipher=self.ciphersuite, address_algo=self.address_algo)
garq.pack_signature(
self._mac(
key = self._derive_shared(recipient),
data = garq.ghid.address
)
)
return garq
@classmethod
def receive_container(cls, author, secret, container):
if not isinstance(container, GEOC):
raise TypeError(
'Container must be an unpacked GEOC, for example, as returned '
'from unpack_container.'
)
cls._typecheck_2ndparty(author)
signature = container.signature
cls._verify(author, signature, container.ghid.address)
plaintext = cls._decrypt(secret, container.payload)
# This will need to be converted into a namedtuple or something
return plaintext
@classmethod
def receive_bind_static(cls, binder, binding):
if not isinstance(binding, GOBS):
raise TypeError(
'Binding must be an unpacked GOBS, for example, as returned '
'from unpack_bind_static.'
)
cls._typecheck_2ndparty(binder)
signature = binding.signature
cls._verify(binder, signature, binding.ghid.address)
# This will need to be converted into a namedtuple or something
return binding.target
@classmethod
def receive_bind_dynamic(cls, binder, binding):
if not isinstance(binding, GOBD):
raise TypeError(
'Binding must be an unpacked GOBD, for example, as returned '
'from unpack_bind_dynamic.'
)
cls._typecheck_2ndparty(binder)
signature = binding.signature
cls._verify(binder, signature, binding.ghid.address)
# This will need to be converted into a namedtuple or something
return binding.target
@classmethod
def receive_debind(cls, debinder, debinding):
if not isinstance(debinding, GDXX):
raise TypeError(
'Debinding must be an unpacked GDXX, for example, as returned '
'from unpack_debind.'
)
cls._typecheck_2ndparty(debinder)
signature = debinding.signature
cls._verify(debinder, signature, debinding.ghid.address)
# This will need to be converted into a namedtuple or something
return debinding.target
def unpack_request(self, packed):
garq = GARQ.unpack(packed)
plaintext = self._decrypt_asym(garq.payload)
# Could do this with a loop, but it gets awkward when trying to
# assign stuff to the resulting object.
try:
unpacked = GARQHandshake.unpack(plaintext)
request = AsymHandshake(
author = unpacked.author,
target = unpacked.target,
secret = unpacked.secret
)
except ParseError:
try:
unpacked = GARQAck.unpack(plaintext)
request = AsymAck(
author = unpacked.author,
target = unpacked.target,
status = unpacked.status
)
except ParseError:
try:
unpacked = GARQNak.unpack(plaintext)
request = AsymNak(
author = unpacked.author,
target = unpacked.target,
status = unpacked.status
)
except ParseError:
raise SecurityError('Could not securely unpack request.')
garq._plaintext = request
garq._author = request.author
return garq
def receive_request(self, requestor, request):
''' Verifies the request and exposes its contents.
'''
# Typecheck all the things
self._typecheck_2ndparty(requestor)
# Also make sure the request is something we've already unpacked
if not isinstance(request, GARQ):
raise TypeError(
'Request must be an unpacked GARQ, as returned from '
'unpack_request.'
)
try:
plaintext = request._plaintext
except AttributeError as e:
raise TypeError(
'Request must be an unpacked GARQ, as returned from '
'unpack_request.'
) from e
self._verify_mac(
key = self._derive_shared(requestor),
data = request.ghid.address,
mac = request.signature
)
return plaintext
@classmethod
@abc.abstractmethod
def _generate_second_party(cls, keys, address_algo):
''' MUST ONLY be called when generating one from scratch, not
when loading one. Loading must always be done directly through
loading a SecondParty.
'''
pass
@abc.abstractmethod
def _generate_keys(self):
''' Create a set of keys for use in the identity.
Must return a mapping of keys with the following values:
{
'signature': <signature key>,
'encryption': <encryption key>,
'exchange': <exchange key>
}
In a form that is usable by the rest of the FirstParty
crypto functions (this is dependent on the individual class'
implementation, ex its crypto library).
'''
pass
@classmethod
@abc.abstractmethod
def new_secret(cls, *args, **kwargs):
''' Placeholder method to create new symmetric secret. Returns
a Secret().
'''
return Secret(cipher=cls._ciphersuite, *args, **kwargs)
@abc.abstractmethod
def _sign(self, data):
''' Placeholder signing method.
'''
pass
@abc.abstractmethod
def _verify(self, public, signature, data):
''' Verifies signature against data using SecondParty public.
raises SecurityError if verification fails.
returns True on success.
'''
pass
@abc.abstractmethod
def _encrypt_asym(self, public, data):
''' Placeholder asymmetric encryptor.
'''
pass
@abc.abstractmethod
def _decrypt_asym(self, data):
''' Placeholder asymmetric decryptor.
'''
pass
@classmethod
@abc.abstractmethod
def _decrypt(cls, secret, data):
''' Placeholder symmetric decryptor.
'''
pass
@classmethod
@abc.abstractmethod
def _encrypt(cls, secret, data):
''' Placeholder symmetric encryptor.
'''
pass
@abc.abstractmethod
def _derive_shared(self, partner):
''' Derive a shared secret (not necessarily a Secret!) with the
partner.
'''
pass
@classmethod
@abc.abstractmethod
def _mac(cls, key, data):
''' Generate a MAC for data using key.
'''
pass
@classmethod
@abc.abstractmethod
def _verify_mac(cls, key, mac, data):
''' Generate a MAC for data using key.
'''
pass
@abc.abstractmethod
def _serialize(self):
''' Convert private keys into a standardized format. Don't save,
just return a dictionary with bytes objects:
{
'ghid': self.ghid,
'signature': self._signature_key,
'encryption': self._encryption_key,
'exchange': self._exchange_key
}
(etc)
'''
pass
@classmethod
@abc.abstractmethod
def _from_serialized(cls, serialization):
''' Create an instance of the class from a dictionary as created
by cls._serialize.
'''
pass
class _ThirdPartyBase(_ObjectHandlerBase, metaclass=abc.ABCMeta):
''' Subclass this (on a per-ciphersuite basis) for servers, and
other parties that have no access to privileged information.
They can only verify.
'''
@property
def ciphersuite(self):
return self._ciphersuite
@classmethod
def _dispatch_address(cls, address_algo):
if address_algo == 'default':
address_algo = cls.DEFAULT_ADDRESS_ALGO
elif address_algo not in ADDRESS_ALGOS:
raise ValueError(
'Address algorithm unavailable for use: ' + str(address_algo)
)
return address_algo
@staticmethod
def unpack_object(packed):
''' Unpacks any Golix object.
'''
success = False
for golix_format in (GIDC, GEOC, GOBS, GOBD, GDXX, GARQ):
try:
obj = golix_format.unpack(packed)
success = True
# Hm, don't really like this.
except (ParseError, TypeError):
pass
if not success:
raise ParseError(
'Packed data does not appear to be a Golix object.'
)
return obj
@classmethod
def unpack_request(cls, packed):
''' Unpack public everything from a request.
(Cannot verify, at least for the existing ciphersuites, as of
2016-03).
'''
garq = GARQ.unpack(packed)
return garq
@classmethod
def verify_object(cls, second_party, obj):
''' Verifies the signature of any symmetric object (aka
everything except GARQ) against data.
raises TypeError if obj is an asymmetric object (or otherwise
unsupported).
raises SecurityError if verification fails.
returns True on success.
'''
if isinstance(obj, GEOC) or \
isinstance(obj, GOBS) or \
isinstance(obj, GOBD) or \
isinstance(obj, GDXX):
return cls._verify(
public = second_party,
signature = obj.signature,
data = obj.ghid.address
)
elif isinstance(obj, GARQ):
raise ValueError(
'Asymmetric objects cannot be verified by third parties. '
'They can only be verified by their recipients.'
)
elif isinstance(obj, GIDC):
raise ValueError(
'Identity containers are inherently un-verified.'
)
else:
raise TypeError('Obj must be a Golix object: GIDC, GEOC, etc.')
@classmethod
@abc.abstractmethod
def _verify(cls, public, signature, data):
''' Verifies signature against data using SecondParty public.
raises SecurityError if verification fails.
returns True on success.
'''
pass
class SecondParty0(_SecondPartyBase, _IdentityBase):
_ciphersuite = 0
@classmethod
def _pack_keys(cls, keys):
return keys
@classmethod
def _unpack_keys(cls, keys):
return keys
class FirstParty0(_FirstPartyBase, _IdentityBase):
''' FOR TESTING PURPOSES ONLY.
Entirely inoperative. Correct API, but ignores all input, creating
only a symbolic output.
NOTE THAT INHERITANCE ORDER MATTERS! Must be first a FirstParty,
and second an Identity.
'''
_ciphersuite = 0
_2PID = SecondParty0
# Well it's not exactly repeating yourself, though it does mean there
# are sorta two ways to perform decryption. Best practice = always decrypt
# using the author's SecondParty
@classmethod
def _generate_second_party(cls, keys, address_algo):
keys = {}
keys['signature'] = _dummy_pubkey
keys['encryption'] = _dummy_pubkey
keys['exchange'] = _dummy_pubkey_exchange
return cls._2PID.from_keys(keys, address_algo)
def _generate_keys(self):
keys = {}
keys['signature'] = _dummy_pubkey
keys['encryption'] = _dummy_pubkey
keys['exchange'] = _dummy_pubkey_exchange
return keys
def _serialize(self):
return {
'ghid': bytes(self.ghid),
'signature': self._signature_key,
'encryption': self._encryption_key,
'exchange': self._exchange_key
}
@classmethod
def _from_serialized(cls, serialization):
try:
ghid = Ghid.from_bytes(serialization['ghid'])
keys = {
'signature': serialization['signature'],
'encryption': serialization['encryption'],
'exchange': serialization['exchange']
}
except (TypeError, KeyError) as e:
raise TypeError(
'serialization must be compatible with _serialize.'
) from e
return cls(keys=keys, ghid=ghid)
@classmethod
def new_secret(cls):
''' Placeholder method to create new symmetric secret.
'''
return super().new_secret(key=bytes(32), seed=None)
def _sign(self, data):
''' Placeholder signing method.
Data must be bytes-like. Private key should be a dictionary
formatted with all necessary components for a private key (?).
'''
return _dummy_signature
@classmethod
def _verify(cls, public, signature, data):
''' Verifies an author's signature against bites. Errors out if
unsuccessful. Returns True if successful.
Data must be bytes-like. public_key should be a dictionary
formatted with all necessary components for a public key (?).
Signature must be bytes-like.
'''
cls._typecheck_2ndparty(public)
return True
def _encrypt_asym(self, public, data):
''' Placeholder asymmetric encryptor.
Data should be bytes-like. Public key should be a dictionary
formatted with all necessary components for a public key.
'''
self._typecheck_2ndparty(public)
return _dummy_asym
def _decrypt_asym(self, data):
''' Placeholder asymmetric decryptor.
Maybe add kwarguments do define what kind of internal object is
returned? That would be smart.
Or, even better, do an arbitrary object content, and then encode
what class of internal object to use there. That way, it's not
possible to accidentally encode secrets publicly, but you can
also emulate behavior of normal exchange.
Data should be bytes-like. Public key should be a dictionary
formatted with all necessary components for a public key.
'''
# Note that this will error out when trying to load components,
# since it's 100% an invalid declaration of internal content.
# But, it's a good starting point.
return _dummy_asym
@classmethod
def _decrypt(cls, secret, data):
''' Placeholder symmetric decryptor.
Data should be bytes-like. Key should be bytes-like.
'''
return data
@classmethod
def _encrypt(cls, secret, data):
''' Placeholder symmetric encryptor.
Data should be bytes-like. Key should be bytes-like.
'''
return data
def _derive_shared(self, partner):
''' Derive a shared secret with the partner.
'''
self._typecheck_2ndparty(partner)
return b'[[ Placeholder shared secret ]]'
@classmethod
def _mac(cls, key, data):
''' Generate a MAC for data using key.
'''
return _dummy_mac
@classmethod
def _verify_mac(cls, key, mac, data):
return True
class ThirdParty0(_ThirdPartyBase):
_ciphersuite = 0
# Note that, since this classmethod is from a different class, the
# cls passed internally will be FirstParty0, NOT ThirdParty0.
_verify = FirstParty0._verify
class SecondParty1(_SecondPartyBase, _IdentityBase):
_ciphersuite = 1
@classmethod
def _pack_keys(cls, keys):
packkeys = {
'signature': int.to_bytes(
keys['signature'].public_numbers().n,
length=512,
byteorder='big'),
'encryption': int.to_bytes(
keys['encryption'].public_numbers().n,
length=512,
byteorder='big'),
'exchange': keys['exchange'].public,
}
return packkeys
@classmethod
def _unpack_keys(cls, keys):
n_sig = int.from_bytes(keys['signature'], byteorder='big')
n_enc = int.from_bytes(keys['encryption'], byteorder='big')
nums_sig = rsa.RSAPublicNumbers(n=n_sig, e=65537)
nums_enc = rsa.RSAPublicNumbers(n=n_enc, e=65537)
unpackkeys = {
'signature': nums_sig.public_key(CRYPTO_BACKEND),
'encryption': nums_enc.public_key(CRYPTO_BACKEND),
'exchange': ECDHPublic(bytes(keys['exchange'])),
}
return unpackkeys
# RSA-PSS Signature salt length.
# Put these here because explicit is better than implicit!
_PSS_SALT_LENGTH = hashes.SHA512.digest_size
class FirstParty1(_FirstPartyBase, _IdentityBase):
''' ... Hmmm
'''
_ciphersuite = 1
_2PID = SecondParty1
# Well it's not exactly repeating yourself, though it does mean there
# are sorta two ways to perform decryption. Best practice = always decrypt
# using the author's SecondParty
@classmethod
def _generate_second_party(cls, keys, address_algo):
pubkeys = {
'signature': keys['signature'].public_key(),
'encryption': keys['encryption'].public_key(),
'exchange': keys['exchange'].get_public()
}
del keys
return cls._2PID.from_keys(keys=pubkeys, address_algo=address_algo)
@classmethod
def _generate_keys(cls):
keys = {}
keys['signature'] = rsa.generate_private_key(
public_exponent = 65537,
key_size = 4096,
backend = CRYPTO_BACKEND
)
keys['encryption'] = rsa.generate_private_key(
public_exponent = 65537,
key_size = 4096,
backend = CRYPTO_BACKEND
)
keys['exchange'] = ECDHPrivate()
return keys
def _serialize(self):
return {
'ghid': bytes(self.ghid),
'signature': self._signature_key.private_bytes(
encoding = serialization.Encoding.DER,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
),
'encryption': self._encryption_key.private_bytes(
encoding = serialization.Encoding.DER,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
),
'exchange': bytes(self._exchange_key.private)
}
@classmethod
def _from_serialized(cls, condensed):
try:
ghid = Ghid.from_bytes(condensed['ghid'])
keys = {
'signature': serialization.load_der_private_key(
data = condensed['signature'],
password = None,
backend = CRYPTO_BACKEND
),
'encryption': serialization.load_der_private_key(
data = condensed['encryption'],
password = None,
backend = CRYPTO_BACKEND
),
'exchange': ECDHPrivate.load(condensed['exchange'])
}
except (TypeError, KeyError) as e:
raise TypeError(
'serialization must be compatible with _serialize.'
) from e
return cls(keys=keys, ghid=ghid)
@classmethod
def new_secret(cls):
''' Returns a new secure Secret().
'''
key = os.urandom(32)
nonce = os.urandom(16)
return super().new_secret(key=key, seed=nonce)
@classmethod
def _encrypt(cls, secret, data):
''' Symmetric encryptor.
'''
# Could we do eg memoryview instead?
if not isinstance(data, bytes):
data = bytes(data)
instance = ciphers.Cipher(
ciphers.algorithms.AES(secret.key),
ciphers.modes.CTR(secret.seed),
backend = CRYPTO_BACKEND
)
worker = instance.encryptor()
return worker.update(data) + worker.finalize()
@classmethod
def _decrypt(cls, secret, data):
''' Symmetric decryptor.
Handle multiple ciphersuites by having a SecondParty for
whichever author created it, and calling their decrypt instead.
'''
# Could we do eg memoryview instead?
if not isinstance(data, bytes):
data = bytes(data)
instance = ciphers.Cipher(
ciphers.algorithms.AES(secret.key),
ciphers.modes.CTR(secret.seed),
backend = CRYPTO_BACKEND
)
worker = instance.decryptor()
return worker.update(data) + worker.finalize()
def _sign(self, data):
''' Signing method.
'''
signer = self._signature_key.signer(
padding.PSS(
mgf = padding.MGF1(hashes.SHA512()),
salt_length = _PSS_SALT_LENGTH
),
hashes.SHA512()
)
signer._hash_ctx = _NoopSHA512(data)
return signer.finalize()
# IT WOULD BE NICE TO BE ABLE TO USE THIS GRRRRRRRRRRRRRRRRRR
signature = self._signature_key.sign(
bytes(data),
padding.PSS(
mgf = padding.MGF1(hashes.SHA512()),
salt_length = _PSS_SALT_LENGTH
),
_NoopSHA512(data)
)
return signature
@classmethod
def _verify(cls, public, signature, data):
''' Verifies an author's signature against bites. Errors out if
unsuccessful. Returns True if successful.
Data must be bytes-like. public_key should be a dictionary
formatted with all necessary components for a public key (?).
Signature must be bytes-like.
'''
cls._typecheck_2ndparty(public)
try:
verifier = public._signature_key.verifier(
bytes(signature),
padding.PSS(
mgf = padding.MGF1(hashes.SHA512()),
salt_length = _PSS_SALT_LENGTH
),
hashes.SHA512()
)
verifier._hash_ctx = _NoopSHA512(data)
verifier.verify()
# IT WOULD BE NICE TO BE ABLE TO USE THIS TOO!!!! grumble grumble
# public._signature_key.verify(
# bytes(signature),
# bytes(data),
# padding.PSS(
# mgf = padding.MGF1(hashes.SHA512()),
# salt_length = _PSS_SALT_LENGTH
# ),
# _NoopSHA512(data)
# )
except InvalidSignature as exc:
raise SecurityError('Failed to verify signature.') from exc
return True
def _encrypt_asym(self, public, data):
''' Placeholder asymmetric encryptor.
Data should be bytes-like. Public key should be a dictionary
formatted with all necessary components for a public key.
'''
self._typecheck_2ndparty(public)
ciphertext = public._encryption_key.encrypt(
bytes(data),
padding.OAEP(
mgf = padding.MGF1(algorithm=hashes.SHA512()),
algorithm = hashes.SHA512(),
label = b''
)
)
return ciphertext
def _decrypt_asym(self, data):
''' Placeholder asymmetric decryptor.
'''
plaintext = self._encryption_key.decrypt(
bytes(data),
padding.OAEP(
mgf = padding.MGF1(algorithm=hashes.SHA512()),
algorithm = hashes.SHA512(),
label = b''
)
)
return plaintext
def _derive_shared(self, partner):
''' Derive a shared secret with the partner.
'''
# Call the donna25519 exchange method and return bytes
ecdh = self._exchange_key.do_exchange(partner._exchange_key)
# Get both of our addresses and then the bitwise XOR of them both
my_hash = self.ghid.address
their_hash = partner.ghid.address
salt = bytes([a ^ b for a, b in zip(my_hash, their_hash)])
instance = hkdf.HKDF(
algorithm = hashes.SHA512(),
length = hashes.SHA512.digest_size,
salt = salt,
info = b'',
backend = CRYPTO_BACKEND
)
key = instance.derive(ecdh)
# Might as well do this immediately, not that it really adds anything
del ecdh, my_hash, their_hash, salt
return key
@classmethod
def _mac(cls, key, data):
''' Generate a MAC for data using key.
'''
h = hmac.HMAC(
key,
hashes.SHA512(),
backend = CRYPTO_BACKEND
)
h.update(data)
return h.finalize()
@classmethod
def _verify_mac(cls, key, mac, data):
''' Verify an existing MAC.
'''
if not isinstance(mac, bytes):
mac = bytes(mac)
if not isinstance(data, bytes):
data = bytes(data)
h = hmac.HMAC(
key,
hashes.SHA512(),
backend = CRYPTO_BACKEND
)
h.update(data)
try:
h.verify(mac)
except InvalidSignature as exc:
raise SecurityError('Failed to verify MAC.') from exc
return True
class ThirdParty1(_ThirdPartyBase):
_ciphersuite = 1
# Note that, since this classmethod is from a different class, the
# cls passed internally will be FirstParty0, NOT ThirdParty0.
_verify = FirstParty1._verify
|
Muterra/py_golix
|
golix/cipher.py
|
Python
|
unlicense
| 43,384
|
from django.contrib import admin
from django import forms
from django.contrib.auth.models import User
from centennial.models import UserProfile, BibliocommonsLink
# This creates and modifies User Administration
class UserProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'points']
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(BibliocommonsLink)
class UserForm(forms.ModelForm):
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
#self.fields['email'].required = True
#self.fields['first_name'].required = True
#self.fields['last_name'].required = True
class UserAdmin(admin.ModelAdmin):
form = UserForm
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
Edmonton-Public-Library/centennial
|
centennial/admin.py
|
Python
|
mit
| 823
|
import os, shutil
import glob, zipfile
import re
import pprint
from xml.etree.ElementTree import ElementTree
from collections import OrderedDict
from operator import itemgetter
import logging
import datetime
dateTimeInfo = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
loggingName = "aids_export" + dateTimeInfo + ".log"
logging.basicConfig(filename=loggingName,level=logging.INFO)
itemDirectory = "J:\\"
zipOutDir = "I:\\aids-meta-zip-out\\"
xmlOutputDir = "G:\\aids-drupal-xml\\"
fileMatch = "AP[0-9]+\.tif"
# pull list of SORTED item IDs from folder where XML files are stored
# (assumes Jeff will create one XML file / item and place them in a directory)
def getFileList(itemDirectory, extension):
fileList = {}
for root, sub, files in os.walk(itemDirectory):
for item in files:
if (re.match(fileMatch, item)):
print("adding item " + item)
myFileSize = os.path.getsize(os.path.join(root,item))
fileList[item] = myFileSize
else:
logging.info("Skipping file " + item + " name pattern did not match")
itemIdList = {}
for fileNameKey, fileSize in fileList.items():
print("checking file " + fileNameKey)
if fileNameKey.find(extension) != -1 :
print("Adding found file " + fileNameKey + " of size " + str(fileSize))
itemIdList[fileNameKey.split('.')[0]] = fileSize #get the id only no extension
else:
logging.info("Could not find file name key " + fileNameKey +" with extension " + extension + " name pattern did not match")
# sorted smallest to largest
sortedDict = OrderedDict(sorted(itemIdList.items(), key=itemgetter(1)))
return sortedDict;
# walk through file tree, find all items matching an item ID, return list of files with paths
def findMatchingItems(idStr, itemDirectoryStr):
matchingItems = []
for root, dirs, files in os.walk(itemDirectoryStr):
for item in files:
if (re.match(idStr + "\.", item)):
matchingItems.append(os.path.join(root,item))
return matchingItems;
#get the list of files that can be added to the zip
#if the data cannot be found it is logged
def getFileSet(idList):
print("create zipable file set called")
filesToAdd = []
for id in idList:
myFiles = findMatchingItems(id, itemDirectory)
print("adding id " + id + " to current set" )
if len(myFiles) == 1:
fileName = id + ".xml"
xmlFile = os.path.join(xmlOutputDir, fileName)
if( os.path.isfile(xmlFile) ):
filesToAdd.append(xmlFile)
filesToAdd.append(myFiles[0])
fileName = id + ".xml"
else:
logging.info("skipping file " + id + " no xml file found")
else:
logging.info("Bad files had len of " + str(len(myFiles)) + " for id " + id)
print("done prcessing zip set")
return filesToAdd
#zip up the list of files into a zip archive
def createZipSet(files, zipFileName):
print("create zip set called " + zipFileName)
with zipfile.ZipFile(zipFileName, 'w', allowZip64=True) as myzip:
for aFile in files:
print("adding file " + os.path.basename(aFile))
myzip.write(aFile, os.path.basename(aFile))
def createFolderSet(files, folderName):
print("Create folder set called " + folderName)
def processSets(offset, maxFilesToProcess, zipOutput):
fileIdList = getFileList(itemDirectory, "tif")
setSize = len(fileIdList)
isZipOutput = False
if(not maxFilesToProcess):
maxFilesToProcess = setSize + 1
if(not offset):
offset = 0
if(zipOutput.lower() == "yes"):
isZipOutput = True
offset = int(offset)
maxFilesToProcess = int(maxFilesToProcess)
setSize = int(setSize)
print ("Max files to process = " + str(maxFilesToProcess))
print ("Offset = " + str(offset))
counter = 1
totalBytes = 0
fileSet = []
startCount = 1
for fileName, fileSize in fileIdList.items():
if( (counter >= offset) and (counter <= maxFilesToProcess) ) :
print("counter = " + str(counter) + " processing file " + fileName + " with size " + str(fileSize))
nextFile = fileName
if( (totalBytes + fileSize) < 2000000000): #keep adding files until 2GB max data set size reached
print("file size " + str(totalBytes + fileSize) + " less than 2Gb")
totalBytes = totalBytes + fileSize
fileSet.append(fileName)
counter = counter + 1
else: #we've hit the 2GB limit write out the data
print("file size " + str(totalBytes + fileSize) + " Larger than 2Gb adding file " + fileName + " to next set")
zipFileSet = getFileSet(fileSet)
if( isZipOutput ):
createZipSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip")
print("creating zip file set " + zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip size = " + str(totalBytes))
else:
createFolderSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter))
print("creating folder file set " + zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip size = " + str(totalBytes))
totalBytes = fileSize
fileSet = []
fileSet.append(fileName)
counter = counter + 1
startCount = counter
print("resetting startCount " + str(startCount) + "offset = " + str(offset) + "")
if(len(fileSet) > 0): #handle the remaining files
zipFileSet = getFileSet(fileSet)
if( isZipOutput):
createZipSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter - 1) + ".zip")
print("creating zip set " + zipOutDir + "aep_" + str(startCount) + "_to_" + str(counter -1) + ".zip size = " + str(totalBytes))
else:
createFolderSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter - 1))
print("creating folder set " + zipOutDir + "aep_" + str(startCount) + "_to_" + str(counter -1) + " size = " + str(totalBytes))
# maxFilesPerZip = input("Please enter maximum number of files per zip file: ")
maxFilesToProcess = input("Please enter maximum number of files to process enter to process all: ")
offset = input("Please enter the offset position (inclusive) press enter to start from the beginning: ")
zipOutput = input("Zip output(yes/no) - enter and default is folder output: ")
processSets(offset, maxFilesToProcess)
|
rochester-rcl/islandora-import-scripts
|
aids_by_size_pkg.py
|
Python
|
mit
| 6,819
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Defines argument types for use with argparse."""
import argparse
import logging
from pathlib import Path
def directory(arg: str) -> Path:
path = Path(arg)
if path.is_dir():
return path.resolve()
raise argparse.ArgumentTypeError(f'"{path}" is not a directory')
def log_level(arg: str) -> int:
try:
return getattr(logging, arg.upper())
except AttributeError:
raise argparse.ArgumentTypeError(
f'"{arg.upper()}" is not a valid log level')
|
google/pigweed
|
pw_cli/py/pw_cli/argument_types.py
|
Python
|
apache-2.0
| 1,085
|
# -*- coding: utf-8 -*-
author = 'John Doe'
title = u"Sigal test gallery ☺"
source = 'pictures'
thumb_suffix = '.tn'
thumb_size = (200, 150)
keep_orig = True
links = [('Example link', 'http://example.org'),
('Another link', 'http://example.org')]
plugins = ['sigal.plugins.adjust', 'sigal.plugins.copyright']
copyright = u"© An example copyright message"
adjust_options = {'color': 0.0, 'brightness': 1.0,
'contrast': 1.0, 'sharpness': 0.0}
# theme = 'galleria'
# thumb_size = (280, 210)
|
muggenhor/sigal
|
tests/sample/sigal.conf.py
|
Python
|
mit
| 522
|
from flask import Flask, request
import unittest
from beaker.middleware import SessionMiddleware
import re
from datetime import datetime
import pytz
from dateutil import parser
class TestFlaskApp(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
session_options = {
'session.cookie_domain': 'example.com',
'session.cookie_expires': True,
'session.cookie_path': '/',
'session.httponly': True,
'session.key': 'session_id',
'session.secure': True,
'session.serializer': 'json',
'session.timeout': 600,
'session.type': 'redis',
'session.url': '127.0.0.1:6379'
}
app.wsgi_app = SessionMiddleware(app.wsgi_app, session_options)
app.config['TESTING'] = True
@app.route('/', methods=['GET'])
def index():
return 'OK'
@app.route('/session', methods=['PUT'])
def put_session():
session = request.environ['beaker.session']
session['authenticated'] = True
session.save()
return 'OK'
@app.route('/session', methods=['DELETE'])
def delete_session():
session = request.environ['beaker.session']
if session.get('authenticated', False):
session.delete()
return 'OK'
@app.route('/private', methods=['GET'])
def access_private():
session = request.environ['beaker.session']
if session.get('authenticated', False):
return 'OK'
return 'Forbidden', 403
self.client = app.test_client()
def create_session(self):
r = self.client.put('/session')
assert r.status_code == 200
regexp = r'^\s+session_id\=(?P<id>[a-f0-9]{32}); Domain\=example.com; (h|H)ttp(o|O)nly; Path\=/; (s|S)ecure$'
match = re.match(regexp, r.headers['set-cookie'])
assert match is not None
assert len(match.group('id')) == 32
return match.group('id')
def drop_session(self, session_id):
headers = {'Cookie': 'session_id={0}'.format(session_id)}
r = self.client.delete('/session', headers=headers)
assert r.status_code == 200
regexp = r'^\s+session_id\={0}; Domain\=example.com; expires\=(?P<expires>.+); (h|H)ttp(o|O)nly; Path\=/; (s|S)ecure'.format(session_id)
match = re.match(regexp, r.headers['set-cookie'])
assert match is not None
now = datetime.now(tz=pytz.timezone('GMT'))
expires = parser.parse(match.group('expires'))
assert expires < now
def test_app(self):
session_id = self.create_session()
print(session_id)
headers = {'Cookie': 'session_id={0}'.format(session_id)}
r2 = self.client.get('/private', headers=headers)
assert r2.status_code == 200
self.drop_session(session_id)
r4 = self.client.get('/private')
assert r4.status_code == 403
|
aspyatkin/beakeredis
|
tests/test_app.py
|
Python
|
mit
| 3,020
|
def genPrimes():
generatedPrimes = []
if len(generatedPrimes) == 0:
yield 2
generatedPrimes.append(2)
x = 2
while True:
isPrime = True
for p in generatedPrimes:
if (x % p) == 0:
isPrime = False
if isPrime:
yield x
generatedPrimes.append(x)
x += 1
|
kirillmorozov/MIT6.00.1x
|
l12p5.py
|
Python
|
gpl-2.0
| 363
|
from prettytable import PrettyTable
from halonctl.modapi import Formatter
class TableFormatter(Formatter):
def format(self, data, args):
table = PrettyTable(data[0])
table.align = "l"
table.border = False
table.left_padding_width = 0
table.right_padding_width = 2
for row in data[1:]:
table.add_row(row)
return table.get_string()
formatter = TableFormatter()
|
halonsecurity/halonctl
|
halonctl/formatters/table.py
|
Python
|
bsd-3-clause
| 385
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import unittest
from rest_framework import generics, status, filters
from rest_framework.compat import django_filters
from rest_framework.tests.models import FilterableItem, BasicModel
factory = RequestFactory()
if django_filters:
# Basic filter on a list view.
class FilterFieldsRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_fields = ['decimal', 'date']
filter_backend = filters.DjangoFilterBackend
# These class are used to test a filter class.
class SeveralFieldsFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
decimal = django_filters.NumberFilter(lookup_type='lt')
date = django_filters.DateFilter(lookup_type='gt')
class Meta:
model = FilterableItem
fields = ['text', 'decimal', 'date']
class FilterClassRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_class = SeveralFieldsFilter
filter_backend = filters.DjangoFilterBackend
# These classes are used to test a misconfigured filter class.
class MisconfiguredFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = BasicModel
fields = ['text']
class IncorrectlyConfiguredRootView(generics.ListCreateAPIView):
model = FilterableItem
filter_class = MisconfiguredFilter
filter_backend = filters.DjangoFilterBackend
class IntegrationTestFiltering(TestCase):
"""
Integration tests for filtered list views.
"""
def setUp(self):
"""
Create 10 FilterableItem instances.
"""
base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8))
for i in range(10):
text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc.
decimal = base_data[1] + i
date = base_data[2] - datetime.timedelta(days=i * 2)
FilterableItem(text=text, decimal=decimal, date=date).save()
self.objects = FilterableItem.objects
self.data = [
{'id': obj.id, 'text': obj.text, 'decimal': obj.decimal, 'date': obj.date.isoformat()}
for obj in self.objects.all()
]
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_get_filtered_fields_root_view(self):
"""
GET requests to paginated ListCreateAPIView should return paginated results.
"""
view = FilterFieldsRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/?decimal=%s' % search_decimal)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['decimal'] == search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter works.
search_date = datetime.date(2012, 9, 22)
request = factory.get('/?date=%s' % search_date) # search_date str: '2012-09-22'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if datetime.datetime.strptime(f['date'], '%Y-%m-%d').date() == search_date]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_get_filtered_class_root_view(self):
"""
GET requests to filtered ListCreateAPIView that have a filter_class set
should return filtered results.
"""
view = FilterClassRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
# Tests that the decimal filter set with 'lt' in the filter class works.
search_decimal = Decimal('4.25')
request = factory.get('/?decimal=%s' % search_decimal)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if f['decimal'] < search_decimal]
self.assertEqual(response.data, expected_data)
# Tests that the date filter set with 'gt' in the filter class works.
search_date = datetime.date(2012, 10, 2)
request = factory.get('/?date=%s' % search_date) # search_date str: '2012-10-02'
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if datetime.datetime.strptime(f['date'], '%Y-%m-%d').date() > search_date]
self.assertEqual(response.data, expected_data)
# Tests that the text filter set with 'icontains' in the filter class works.
search_text = 'ff'
request = factory.get('/?text=%s' % search_text)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if search_text in f['text'].lower()]
self.assertEqual(response.data, expected_data)
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
request = factory.get('/?decimal=%s&date=%s' % (search_decimal, search_date))
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_data = [f for f in self.data if
datetime.datetime.strptime(f['date'], '%Y-%m-%d').date() > search_date and
f['decimal'] < search_decimal]
self.assertEqual(response.data, expected_data)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_incorrectly_configured_filter(self):
"""
An error should be displayed when the filter class is misconfigured.
"""
view = IncorrectlyConfiguredRootView.as_view()
request = factory.get('/')
self.assertRaises(AssertionError, view, request)
@unittest.skipUnless(django_filters, 'django-filters not installed')
def test_unknown_filter(self):
"""
GET requests with filters that aren't configured should return 200.
"""
view = FilterFieldsRootView.as_view()
search_integer = 10
request = factory.get('/?integer=%s' % search_integer)
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
BryceBrown/LinkstrDjango
|
rest_framework/tests/filterset.py
|
Python
|
apache-2.0
| 7,173
|
from tools import db_utils
from util import patches
assert patches
db_utils.request_context().push()
|
jlgoldman/writetogov
|
tools/interactive.py
|
Python
|
bsd-3-clause
| 102
|
from django import forms
from populous.utils.validators import RelaxNGValidator
from populous.inlines.forms.widgets import InlineTextareaWidget
from populous.inlines.utils import get_absolute_schema_path, html_to_unicode
class InlineField(forms.CharField):
def __init__(self, schema_path, additional_root_element=None, *args, **kwargs):
kwargs.update({'widget': InlineTextareaWidget})
super(InlineField, self).__init__(*args, **kwargs)
self.schema_path = schema_path
self.additional_root_element = additional_root_element
def clean(self, value):
schema_path = get_absolute_schema_path(self.schema_path)
xml_validator = RelaxNGValidator(schema_path, self.additional_root_element)
# TODO: This shouldn't be hard-coded to `content`
value = u"<content>%s</content>" % html_to_unicode(value)
return xml_validator.forms_validate(value)
|
caiges/populous
|
populous/inlines/forms/fields.py
|
Python
|
bsd-3-clause
| 924
|
from ereuse_devicehub.resources.account.settings import unregistered_user, unregistered_user_doc
from ereuse_devicehub.resources.event.device.settings import EventWithDevices, \
EventSubSettingsMultipleDevices, materialized_components
class Allocate(EventWithDevices):
to = {
'type': ['objectid', 'dict', 'string'], # We should not add string but it does not work otherwise...
'data_relation': {
'resource': 'accounts',
'field': '_id',
'embeddable': True,
},
'schema': unregistered_user,
'doc': 'The user the devices are allocated to. ' + unregistered_user_doc,
'get_from_data_relation_or_create': 'email',
'required': True,
'sink': 2
}
toOrganization = {
'type': 'string',
'readonly': True,
'materialized': True,
'doc': 'Materialization of the organization that, by the time of the allocation, the user worked in.'
}
components = materialized_components
class AllocateSettings(EventSubSettingsMultipleDevices):
_schema = Allocate
fa = 'fa-hand-o-right'
sink = -5
extra_response_fields = EventSubSettingsMultipleDevices.extra_response_fields + ['to']
short_description = 'Assign the devices to someone, so that person \'owns\' the device'
# Receiver OR ReceiverEmail. We need to hook this in a required field so it is always executed
# And @type is an always required field so we can happily hook on it
|
eReuse/DeviceHub
|
ereuse_devicehub/resources/event/device/allocate/settings.py
|
Python
|
agpl-3.0
| 1,485
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-08 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0021_auto_20161204_2145'),
]
operations = [
migrations.RemoveField(
model_name='atentionqueue',
name='average_attention_time',
),
migrations.AddField(
model_name='atentionqueue',
name='atention_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='atentionqueue',
name='atention_time_total',
field=models.IntegerField(default=0),
),
]
|
awainer/7539
|
aplicaciones_informaticas/backend/migrations/0022_auto_20161208_1740.py
|
Python
|
unlicense
| 759
|
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
class TestCashFlowMappingTemplateDetails(unittest.TestCase):
pass
|
mhbu50/erpnext
|
erpnext/accounts/doctype/cash_flow_mapping_template_details/test_cash_flow_mapping_template_details.py
|
Python
|
gpl-3.0
| 173
|
"""Viessmann ViCare climate device."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
PRESET_ECO,
PRESET_COMFORT,
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_AUTO,
)
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, PRECISION_WHOLE
from . import DOMAIN as VICARE_DOMAIN
from . import VICARE_API
from . import VICARE_NAME
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_PROGRAM_ACTIVE = "active"
VICARE_PROGRAM_COMFORT = "comfort"
VICARE_PROGRAM_ECO = "eco"
VICARE_PROGRAM_EXTERNAL = "external"
VICARE_PROGRAM_HOLIDAY = "holiday"
VICARE_PROGRAM_NORMAL = "normal"
VICARE_PROGRAM_REDUCED = "reduced"
VICARE_PROGRAM_STANDBY = "standby"
VICARE_HOLD_MODE_AWAY = "away"
VICARE_HOLD_MODE_HOME = "home"
VICARE_HOLD_MODE_OFF = "off"
VICARE_TEMP_HEATING_MIN = 3
VICARE_TEMP_HEATING_MAX = 37
SUPPORT_FLAGS_HEATING = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
VICARE_TO_HA_HVAC_HEATING = {
VICARE_MODE_DHW: HVAC_MODE_OFF,
VICARE_MODE_DHWANDHEATING: HVAC_MODE_AUTO,
VICARE_MODE_FORCEDREDUCED: HVAC_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: HVAC_MODE_HEAT,
VICARE_MODE_OFF: HVAC_MODE_OFF,
}
HA_TO_VICARE_HVAC_HEATING = {
HVAC_MODE_HEAT: VICARE_MODE_FORCEDNORMAL,
HVAC_MODE_OFF: VICARE_MODE_FORCEDREDUCED,
HVAC_MODE_AUTO: VICARE_MODE_DHWANDHEATING,
}
VICARE_TO_HA_PRESET_HEATING = {
VICARE_PROGRAM_COMFORT: PRESET_COMFORT,
VICARE_PROGRAM_ECO: PRESET_ECO,
}
HA_TO_VICARE_PRESET_HEATING = {
PRESET_COMFORT: VICARE_PROGRAM_COMFORT,
PRESET_ECO: VICARE_PROGRAM_ECO,
}
PYVICARE_ERROR = "error"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare climate devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
add_entities(
[ViCareClimate(f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Heating", vicare_api)]
)
class ViCareClimate(ClimateDevice):
"""Representation of the ViCare heating climate device."""
def __init__(self, name, api):
"""Initialize the climate device."""
self._name = name
self._state = None
self._api = api
self._attributes = {}
self._target_temperature = None
self._current_mode = None
self._current_temperature = None
self._current_program = None
def update(self):
"""Let HA know there has been an update from the ViCare API."""
_room_temperature = self._api.getRoomTemperature()
_supply_temperature = self._api.getSupplyTemperature()
if _room_temperature is not None and _room_temperature != PYVICARE_ERROR:
self._current_temperature = _room_temperature
elif _supply_temperature != PYVICARE_ERROR:
self._current_temperature = _supply_temperature
else:
self._current_temperature = None
self._current_program = self._api.getActiveProgram()
# The getCurrentDesiredTemperature call can yield 'error' (str) when the system is in standby
desired_temperature = self._api.getCurrentDesiredTemperature()
if desired_temperature == PYVICARE_ERROR:
desired_temperature = None
self._target_temperature = desired_temperature
self._current_mode = self._api.getActiveMode()
# Update the device attributes
self._attributes = {}
self._attributes["room_temperature"] = _room_temperature
self._attributes["supply_temperature"] = _supply_temperature
self._attributes["outside_temperature"] = self._api.getOutsideTemperature()
self._attributes["active_vicare_program"] = self._current_program
self._attributes["active_vicare_mode"] = self._current_mode
self._attributes["heating_curve_slope"] = self._api.getHeatingCurveSlope()
self._attributes["heating_curve_shift"] = self._api.getHeatingCurveShift()
self._attributes[
"month_since_last_service"
] = self._api.getMonthSinceLastService()
self._attributes["date_last_service"] = self._api.getLastServiceDate()
self._attributes["error_history"] = self._api.getErrorHistory()
self._attributes["active_error"] = self._api.getActiveError()
self._attributes[
"circulationpump_active"
] = self._api.getCirculationPumpActive()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATING
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current hvac mode."""
return VICARE_TO_HA_HVAC_HEATING.get(self._current_mode)
def set_hvac_mode(self, hvac_mode):
"""Set a new hvac mode on the ViCare API."""
vicare_mode = HA_TO_VICARE_HVAC_HEATING.get(hvac_mode)
if vicare_mode is None:
_LOGGER.error(
"Cannot set invalid vicare mode: %s / %s", hvac_mode, vicare_mode
)
return
_LOGGER.debug("Setting hvac mode to %s / %s", hvac_mode, vicare_mode)
self._api.setMode(vicare_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac modes."""
return list(HA_TO_VICARE_HVAC_HEATING)
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_HEATING_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_HEATING_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setProgramTemperature(
self._current_program, self._target_temperature
)
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return VICARE_TO_HA_PRESET_HEATING.get(self._current_program)
@property
def preset_modes(self):
"""Return the available preset mode."""
return list(VICARE_TO_HA_PRESET_HEATING)
def set_preset_mode(self, preset_mode):
"""Set new preset mode and deactivate any existing programs."""
vicare_program = HA_TO_VICARE_PRESET_HEATING.get(preset_mode)
if vicare_program is None:
_LOGGER.error(
"Cannot set invalid vicare program: %s / %s",
preset_mode,
vicare_program,
)
return
_LOGGER.debug("Setting preset to %s / %s", preset_mode, vicare_program)
self._api.deactivateProgram(self._current_program)
self._api.activateProgram(vicare_program)
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self._attributes
|
Cinntax/home-assistant
|
homeassistant/components/vicare/climate.py
|
Python
|
apache-2.0
| 7,739
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from optparse import make_option
import random
import math
from django.contrib.gis.geos import Point
from treemap.models import Plot, Tree, Species
from treemap.management.util import InstanceDataCommand
class Command(InstanceDataCommand):
option_list = InstanceDataCommand.option_list + (
make_option('-r', '--radius',
action='store',
type='int',
dest='radius',
default=5000,
help='Number of meters from the center'),
make_option('-n', '--number-of-trees',
action='store',
type='int',
dest='n',
default=100000,
help='Number of trees to create'),
make_option('-p', '--prob-of-tree',
action='store',
type='int',
dest='ptree',
default=50,
help=('Probability that a given plot will '
'have a tree (0-100)')),
make_option('-s', '--prob-of-species',
action='store',
type='int',
dest='pspecies',
default=50,
help=('Probability that a given tree will '
'have a species (0-100)')),
make_option('-D', '--prob-of-diameter',
action='store',
type='int',
dest='pdiameter',
default=10,
help=('Probability that a given tree will '
'have a diameter (0-100)')))
def handle(self, *args, **options):
""" Create some seed data """
instance, user = self.setup_env(*args, **options)
species_qs = instance.scope_model(Species)
n = options['n']
self.stdout.write("Will create %s plots" % n)
get_prob = lambda option: float(min(100, max(0, option))) / 100.0
tree_prob = get_prob(options['ptree'])
species_prob = get_prob(options['pspecies'])
diameter_prob = get_prob(options['pdiameter'])
max_radius = options['radius']
center_x = instance.center.x
center_y = instance.center.y
ct = 0
cp = 0
for i in xrange(0, n):
mktree = random.random() < tree_prob
radius = random.gauss(0.0, max_radius)
theta = random.random() * 2.0 * math.pi
x = math.cos(theta) * radius + center_x
y = math.sin(theta) * radius + center_y
plot = Plot(instance=instance,
geom=Point(x, y))
plot.save_with_user(user)
cp += 1
if mktree:
add_species = random.random() < species_prob
if add_species:
species = random.choice(species_qs)
else:
species = None
add_diameter = random.random() < diameter_prob
if add_diameter:
diameter = 2 + random.random() * 18
else:
diameter = None
tree = Tree(plot=plot,
species=species,
diameter=diameter,
instance=instance)
tree.save_with_user(user)
ct += 1
self.stdout.write("Created %s trees and %s plots" % (ct, cp))
|
johnsonc/OTM2
|
opentreemap/treemap/management/commands/random_trees.py
|
Python
|
gpl-3.0
| 3,642
|
#! /usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An integration test for tracing.
This is not run as part of unittests and is executed directly. In normal
operation it can be run with no arguments (or perhaps --no_sandbox depending on
how you have chrome set up). When debugging or adding tests, setting
--failed_trace_dir could be useful.
The integration test spawns a local http server to serve web pages. The trace
generated by each file in tests/*.html will be compared with the corresponding
results/*.result. Each test should have a detailed comment explaining its
organization and what the important part of the test result is.
By default this will use a release version of chrome built in this same
code tree (out/Release/chrome), see --local_binary to override.
See InitiatorSequence for what the integration tests verify. The idea is to
capture a sketch of the initiator and call stack relationship. The output is
human-readable. To create a new test, first run test_server.py locally with
--source_dir pointing to tests/, and verify that the test page works as expected
by pointing a browser to localhost:XXX/your_new_test.html (with XXX the port
reported in the console output of test_server.py). Then run this
webserver_test.py with --failed_trace_dir set. Verify that the actual output is
what you expect it to be, then copy it to results/. If your test is 7.html, you
should copy to results/7.result.
"""
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import urlparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from device_setup import DeviceConnection
import loading_trace
import options
import trace_recorder
OPTIONS = options.OPTIONS
WEBSERVER = os.path.join(os.path.dirname(__file__), 'test_server.py')
TESTDIR = os.path.join(os.path.dirname(__file__), 'tests')
RESULTDIR = os.path.join(os.path.dirname(__file__), 'results')
@contextlib.contextmanager
def TemporaryDirectory():
"""Returns a freshly-created directory that gets automatically deleted after
usage.
"""
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name)
class WebServer(object):
"""Wrap the webserver."""
def __init__(self, source_dir, communication_dir):
"""Initialize the server but does not start it.
Args:
source_dir: the directory where source data (html, js, etc) will be found.
communication_dir: a directory to use for IPC (eg, discovering the
port, which is dynamically allocated). This should probably be a
temporary directory.
"""
self._source_dir = source_dir
self._communication_dir = communication_dir
self._fifo = None
self._server_process = None
self._port = None
@classmethod
@contextlib.contextmanager
def Context(cls, *args, **kwargs):
"""Creates a webserver as a context manager.
Args:
As in __init__.
Returns:
A context manager for an instance of a WebServer.
"""
try:
server = cls(*args, **kwargs)
server.Start()
yield server
finally:
server.Stop()
def Start(self):
"""Start the server by spawning a process."""
fifo_name = os.path.join(self._communication_dir, 'from_server')
os.mkfifo(fifo_name)
server_out = None if OPTIONS.local_noisy else file('/dev/null', 'w')
self._server_process = subprocess.Popen(
[WEBSERVER,
'--source_dir=%s' % self._source_dir,
'--fifo=%s' % fifo_name],
shell=False, stdout=server_out, stderr=server_out)
fifo = file(fifo_name)
# TODO(mattcary): timeout?
self._port = int(fifo.readline())
fifo.close()
def Stop(self):
"""Stops the server, waiting for it to complete.
Returns:
True if the server stopped correctly.
"""
if self._server_process is None:
return False
self._server_process.kill()
# TODO(mattcary): timeout & error?
self._server_process.wait()
return True
def Address(self):
"""Returns a host:port string suitable for an http request."""
assert self._port is not None, \
"No port exists until the server is started."
return 'localhost:%s' % self._port
class InitiatorSequence(object):
"""The interesting parts of the initiator dependancies that are tested."""
def __init__(self, trace):
"""Create.
Args:
trace: a LoadingTrace.
"""
self._seq = []
# ReadFromFile will initialize without a trace.
if trace is None:
return
for rq in trace.request_track.GetEvents():
if rq.initiator['type'] in ('parser', 'script'):
stack_string = ''
stack = rq.initiator.get('stack')
# Iteratively walk the stack and its parents.
while stack:
current_string = '/'.join(
['%s:%s' % (self._ShortUrl(frame['url']), frame['lineNumber'])
for frame in stack['callFrames']])
if len(current_string) and len(stack_string):
stack_string += '/'
stack_string += current_string
stack = stack.get('parent')
if stack_string == '':
stack_string = 'no stack'
self._seq.append('%s (%s) %s' % (
rq.initiator['type'],
stack_string,
self._ShortUrl(rq.url)))
self._seq.sort()
@classmethod
def ReadFromFile(cls, input_file):
"""Read a file from DumpToFile.
Args:
input_file: a file-like object.
Returns:
An InitiatorSequence instance.
"""
seq = cls(None)
seq._seq = sorted([l.strip() for l in input_file.readlines() if l])
return seq
def DumpToFile(self, output):
"""Write to a file.
Args:
output: a writeable file-like object.
"""
output.write('\n'.join(self._seq) + '\n')
def __eq__(self, other):
if other is None:
return False
assert type(other) is InitiatorSequence
if len(self._seq) != len(other._seq):
return False
for a, b in zip(self._seq, other._seq):
if a != b:
return False
return True
def _ShortUrl(self, url):
short = urlparse.urlparse(url).path
while short.startswith('/'):
short = short[1:]
if len(short) > 40:
short = '...'.join((short[:20], short[-10:]))
return short
def RunTest(webserver, connection, test_page, expected):
"""Run an webserver test.
The expected result can be None, in which case --failed_trace_dir can be set
to output the observed trace.
Args:
webserver [WebServer]: the webserver to use for the test. It must be
started.
connection [DevToolsConnection]: the connection to trace against.
test_page: the name of the page to load.
expected [InitiatorSequence]: expected initiator sequence.
Returns:
True if the test passed and false otherwise. Status is printed to stdout.
"""
url = 'http://%s/%s' % (webserver.Address(), test_page)
sys.stdout.write('Testing %s...' % url)
observed_seq = InitiatorSequence(trace_recorder.MonitorUrl(
connection, url, clear_cache=True))
if observed_seq == expected:
sys.stdout.write(' ok\n')
return True
else:
sys.stdout.write(' FAILED!\n')
if OPTIONS.failed_trace_dir:
outname = os.path.join(OPTIONS.failed_trace_dir,
test_page + '.observed_result')
with file(outname, 'w') as output:
observed_seq.DumpToFile(output)
sys.stdout.write('Wrote observed result to %s\n' % outname)
return False
def RunAllTests():
"""Run all tests in TESTDIR.
All tests must have a corresponding result in RESULTDIR unless
--failed_trace_dir is set.
"""
with TemporaryDirectory() as temp_dir, \
WebServer.Context(TESTDIR, temp_dir) as webserver, \
DeviceConnection(None) as connection:
failure = False
for test in sorted(os.listdir(TESTDIR)):
if test.endswith('.html'):
result = os.path.join(RESULTDIR, test[:test.rfind('.')] + '.result')
assert OPTIONS.failed_trace_dir or os.path.exists(result), \
'No result found for test'
expected = None
if os.path.exists(result):
with file(result) as result_file:
expected = InitiatorSequence.ReadFromFile(result_file)
if not RunTest(webserver, connection, test, expected):
failure = True
if failure:
print 'FAILED!'
else:
print 'all tests passed'
if __name__ == '__main__':
OPTIONS.ParseArgs(sys.argv[1:],
description='Run webserver integration test',
extra=[('--failed_trace_dir', ''),
('--noisy', False)])
RunAllTests()
|
ds-hwang/chromium-crosswalk
|
tools/android/loading/trace_test/webserver_test.py
|
Python
|
bsd-3-clause
| 8,796
|
config = {
# environment this app is running on: localhost, testing, production
'environment': "localhost",
# webapp2 sessions
'webapp2_extras.sessions' : {'secret_key': 'Rgs5tJE$5n6jsfrg'},
# webapp2 authentication
'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name' : "Moolatoo",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang' : 'en_US',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales' : ['en_US'],
# contact page email settings
'contact_sender' : "PUT_SENDER_EMAIL_HERE",
'contact_recipient' : "PUT_RECIPIENT_EMAIL_HERE",
# Password AES Encryption Parameters
'aes_key' : "7JiH8IGhtRH65gy6",
'salt' : "4gTd6",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key' : 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret' : 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret' : 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret' : 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server' : 'github.com',
'github_redirect_uri' : 'http://www.example.com/social_login/github/complete',
'github_client_id' : 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret' : 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key' : "6LfXYNwSAAAAAEoJDglhwZKYEgygSLa5qYQeJGfL",
'captcha_private_key' : "6LfXYNwSAAAAAAQFjA66WcyLUOnK0ei3hJXRJjih",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain' : "moolatoo.com",
'google_analytics_code' : "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates' : {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login' : True,
# jinja2 base layout template
'base_layout' : 'base.html',
# send error emails to developers
'send_mail_developer' : False,
# fellas' list
'developers' : (
('Richard Haber', 'rchaber@gmail.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email' : True,
# If true, it will write in datastore a log of every visit
'log_visit' : True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
|
rchaber/moolatoo
|
config/localhost.py
|
Python
|
lgpl-3.0
| 3,921
|
import os
import unittest
import synapse
import synapse.lib.datfile as s_datfile
from synapse.tests.common import *
syndir = os.path.dirname(synapse.__file__)
class DatFileTest(SynTest):
def test_datfile_basic(self):
with s_datfile.openDatFile('synapse.tests/test.dat') as fd:
self.nn(fd)
self.eq(fd.read(), b'woot\n')
|
vivisect/synapse
|
synapse/tests/test_lib_datfile.py
|
Python
|
apache-2.0
| 360
|
# -*- coding: utf-8 -*-
import re
from werkzeug.exceptions import BadRequest
import numpy as np
from .rendering import render_task
@render_task
def solve(input_data):
"""Solve task 2 in accordance to the ZON code ninja program task sheet."""
# Clean input data and split by linebreaks.
input_split = input_data.split('\n')
lines = [re.sub('[^0-9]+', '', i) for i in input_split if len(i)]
# Determine number of test cases and init solution list.
cases = int(0 if not lines else lines.pop(0))
counts = [1] * cases
# Enforce test case limit constraint.
if cases <= 0 or cases >= 6:
raise BadRequest('You need to enter 0 < T < 6 test cases.')
for case in xrange(cases):
if not len(lines):
raise BadRequest('Specified %s cases, but only provided %s.' %
(cases, case))
dimension = int(lines.pop(0))
# Enforce dimensional constraint.
if dimension <= 0 or dimension >= 1009:
raise BadRequest('You need to enter 0 < N < 1009 dim. matrices.')
# The line array is consumed according to the dimension spec.
matrix = np.array([[int(n) for n in m] for m in lines[:dimension]])
lines = lines[dimension:]
cluster_ids = []
if not matrix.shape == (dimension,) * 2:
raise BadRequest('Expected uniform %s-D matrix for case %s.' %
(dimension, case + 1))
def neighbours(idx, cluster_id):
# Return all 1s in the up to 8 fields adjacent to idx.
def along_axis(axis):
# Discover candidate fields based on matrix dimensionality.
for offset in (-1, 0, 1):
candidate = idx[axis] + offset
if candidate >= 0 and candidate < dimension:
yield candidate
hood = []
for x in along_axis(0):
for y in along_axis(1):
if (x, y) != idx and matrix[x, y] == 1:
hood.append((x, y))
elif matrix[x, y] > 1 and matrix[x, y] != cluster_id:
# Claim fields of neighbouring cluster.
if matrix[x, y] in cluster_ids:
del cluster_ids[cluster_ids.index(matrix[x, y])]
matrix[matrix == matrix[x, y]] == cluster_id
return hood
# Generate initial list of fields with value 1.
untouched = zip(*[idx.tolist() for idx in np.where(matrix == 1)])
while untouched:
cluster_ids
# Every cluster gets an ID, which is stored in the matrix instead
# of the value 1. Therefore, we loop until there are only zeroes
# and cluster IDs left.
def expand(resident, cluster_id):
matrix[resident] = cluster_id
hood = neighbours(resident, cluster_id)
if hood:
for neighbour in hood:
try:
expand(neighbour, cluster_id)
except RuntimeError:
# Incase we run into recursion depth issues, let
# the next iteration handle this neighbourhood.
return
# Increase cluster ID and recursivly explore neighbours.
cluster_ids.append(max(cluster_ids or [1]) + 1)
expand(untouched[0], cluster_ids[-1])
# Recalculate list of fields with value 1.
untouched = zip(*[idx.tolist() for idx in np.where(matrix == 1)])
counts[case] = len(cluster_ids)
return '\n'.join([str(c) for c in counts])
|
cutoffthetop/hireme
|
src/hireme/task2.py
|
Python
|
bsd-2-clause
| 3,756
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PerfKitBenchmarker' scratchdisks."""
import abc
import unittest
from absl import flags
from absl.testing import flagsaver
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.aws import aws_virtual_machine
from perfkitbenchmarker.providers.aws import util as aws_util
from perfkitbenchmarker.providers.azure import azure_disk
from perfkitbenchmarker.providers.azure import azure_virtual_machine
from perfkitbenchmarker.providers.gcp import gce_disk
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from perfkitbenchmarker.providers.gcp import util
from tests import pkb_common_test_case # pylint:disable=unused-import
FLAGS = flags.FLAGS
_BENCHMARK_NAME = 'name'
_BENCHMARK_UID = 'uid'
_COMPONENT = 'test_component'
class ScratchDiskTestMixin(object):
"""Sets up and tears down some of the mocks needed to test scratch disks."""
@abc.abstractmethod
def _PatchCloudSpecific(self):
"""Adds any cloud specific patches to self.patches."""
pass
@abc.abstractmethod
def _CreateVm(self):
"""Creates and returns a VM object of the correct type for the cloud."""
pass
@abc.abstractmethod
def _GetDiskClass(self):
"""Returns the disk class for the given cloud."""
pass
def setUp(self):
self.saved_flag_values = flagsaver.save_flag_values()
self.patches = []
vm_prefix = linux_virtual_machine.__name__ + '.BaseLinuxMixin'
self.patches.append(
mock.patch(vm_prefix + '.FormatDisk'))
self.patches.append(
mock.patch(vm_prefix + '.MountDisk'))
self.patches.append(
mock.patch(
util.__name__ + '.GetDefaultProject', side_effect='test_project'))
# Patch subprocess.Popen to make sure we don't issue any commands to spin up
# resources.
self.patches.append(mock.patch('subprocess.Popen'))
self.patches.append(
mock.patch(vm_util.__name__ + '.GetTempDir', return_value='/tmp/dir'))
self._PatchCloudSpecific()
for p in self.patches:
p.start()
self.addCleanup(p.stop)
# We need the disk class mocks to return new mocks each time they are
# called. Otherwise all "disks" instantiated will be the same object.
self._GetDiskClass().side_effect = (
lambda *args, **kwargs: mock.MagicMock(is_striped=False))
# VM Creation depends on there being a BenchmarkSpec.
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
_BENCHMARK_NAME, flag_values=FLAGS, vm_groups={})
self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec,
_BENCHMARK_UID)
self.addCleanup(context.SetThreadBenchmarkSpec, None)
self.addCleanup(flagsaver.restore_flag_values, self.saved_flag_values)
def testScratchDisks(self):
"""Test for creating and deleting scratch disks.
This test creates two scratch disks on a vm and deletes them, ensuring
that the proper calls to create, format, mount, and delete are made.
"""
vm = self._CreateVm()
disk_spec = disk.BaseDiskSpec(_COMPONENT, mount_point='/mountpoint0')
vm.CreateScratchDisk(disk_spec)
assert len(vm.scratch_disks) == 1, 'Disk not added to scratch disks.'
scratch_disk = vm.scratch_disks[0]
scratch_disk.Create.assert_called_once_with()
vm.FormatDisk.assert_called_once_with(scratch_disk.GetDevicePath(), None)
vm.MountDisk.assert_called_once_with(
scratch_disk.GetDevicePath(), '/mountpoint0',
None, scratch_disk.mount_options, scratch_disk.fstab_options)
disk_spec = disk.BaseDiskSpec(_COMPONENT, mount_point='/mountpoint1')
vm.CreateScratchDisk(disk_spec)
assert len(vm.scratch_disks) == 2, 'Disk not added to scratch disks.'
# Check that these execute without exceptions. The return value
# is a MagicMock, not a string, so we can't compare to expected results.
vm.GetScratchDir()
vm.GetScratchDir(0)
vm.GetScratchDir(1)
with self.assertRaises(errors.Error):
vm.GetScratchDir(2)
scratch_disk = vm.scratch_disks[1]
scratch_disk.Create.assert_called_once_with()
vm.FormatDisk.assert_called_with(scratch_disk.GetDevicePath(), None)
vm.MountDisk.assert_called_with(
scratch_disk.GetDevicePath(), '/mountpoint1',
None, scratch_disk.mount_options, scratch_disk.fstab_options)
vm.DeleteScratchDisks()
vm.scratch_disks[0].Delete.assert_called_once_with()
vm.scratch_disks[1].Delete.assert_called_once_with()
class AzureScratchDiskTest(ScratchDiskTestMixin, unittest.TestCase):
def _PatchCloudSpecific(self):
self.patches.append(mock.patch(azure_disk.__name__ + '.AzureDisk'))
def _CreateVm(self):
vm_spec = azure_virtual_machine.AzureVmSpec(
'test_vm_spec.Azure', zone='eastus2', machine_type='test_machine_type')
return azure_virtual_machine.Ubuntu1604BasedAzureVirtualMachine(vm_spec)
def _GetDiskClass(self):
return azure_disk.AzureDisk
class GceScratchDiskTest(ScratchDiskTestMixin, unittest.TestCase):
def _PatchCloudSpecific(self):
self.patches.append(mock.patch(gce_disk.__name__ + '.GceDisk'))
def _CreateVm(self):
vm_spec = gce_virtual_machine.GceVmSpec('test_vm_spec.GCP',
machine_type='test_machine_type')
return gce_virtual_machine.Ubuntu1804BasedGceVirtualMachine(vm_spec)
def _GetDiskClass(self):
return gce_disk.GceDisk
class AwsScratchDiskTest(ScratchDiskTestMixin, unittest.TestCase):
def _PatchCloudSpecific(self):
self.patches.append(mock.patch(aws_disk.__name__ + '.AwsDisk'))
self.patches.append(mock.patch(aws_util.__name__ + '.AddDefaultTags'))
# In Python3 the mocking of subprocess.Popen in setup() is problematic for
# platform.system(). It is called by RemoteCommand() in
# _GetNvmeBootIndex() so we'll mock that instead.
self.patches.append(mock.patch(
aws_virtual_machine.__name__ + '.AwsVirtualMachine._GetNvmeBootIndex'))
def _CreateVm(self):
vm_spec = aws_virtual_machine.AwsVmSpec('test_vm_spec.AWS',
zone='us-east-1a',
machine_type='test_machine_type')
return aws_virtual_machine.Ubuntu1604BasedAwsVirtualMachine(vm_spec)
def _GetDiskClass(self):
return aws_disk.AwsDisk
class GceDeviceIdTest(unittest.TestCase):
def testDeviceId(self):
with mock.patch(disk.__name__ + '.FLAGS') as disk_flags:
disk_flags.os_type = 'windows'
disk_spec = disk.BaseDiskSpec(_COMPONENT, disk_number=1, disk_size=2,
disk_type=gce_disk.PD_STANDARD)
disk_obj = gce_disk.GceDisk(disk_spec, 'name', 'zone', 'project')
self.assertEqual(disk_obj.GetDeviceId(), r'\\.\PHYSICALDRIVE1')
if __name__ == '__main__':
unittest.main()
|
GoogleCloudPlatform/PerfKitBenchmarker
|
tests/scratch_disk_test.py
|
Python
|
apache-2.0
| 7,787
|
import pytest
import os
import time
import unittest
from tml import get_current_context
from tml.api.client import Client
from tml.web_tools.translator import BaseTranslation
from tml.context import SourceContext
from tml.config import Config
@pytest.mark.usefixtures("init_app")
class TranslatorTest(unittest.TestCase):
def setUp(self):
self.app = self.init_app()
self.en = self.app.language('en')
self.ru = self.app.language('ru')
self.client = self.app.client
def test_translation(self):
self.translation = BaseTranslation({})
self.config = Config()
self.assertIsInstance(self.translation, BaseTranslation)
self.assertIsInstance(BaseTranslation.instance(), BaseTranslation)
self.assertIsInstance(self.translation.build_context(), SourceContext)
self.assertEquals(self.translation.context, get_current_context())
self.assertEquals(self.translation.application, get_current_context().application)
self.assertEquals(self.translation.application_key, self.config.application_key())
self.assertEquals(self.translation.languages, get_current_context().application.languages)
self.assertIsInstance(self.translation.build_client(), Client)
self.assertEquals(self.translation.client, get_current_context().client)
if __name__ == '__main__':
unittest.main()
|
translationexchange/tml-python
|
tests/unit/web_tools/translator.py
|
Python
|
mit
| 1,391
|
from sklearn.metrics.pairwise import pairwise_distances
import numpy as np
# X shoudl be a numpy matrix, very likely sparse matrix: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.csr_matrix.html#scipy.sparse.csr_matrix
# T1 > T2 for overlapping clusters
# T1 = Distance to centroid point to not include in other clusters
# T2 = Distance to centroid point to include in cluster
# T1 > T2 for overlapping clusters
# T1 < T2 will have points which reside in no clusters
# T1 == T2 will cause all points to reside in mutually exclusive clusters
# Distance metric can be any from here: http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html
# filemap may be a list of point names in their order in X. If included, row numbers from X will be replaced with names from filemap.
def canopy(X, T1, T2, distance_metric='euclidean', filemap=None):
canopies = dict()
X1_dist = pairwise_distances(X, metric=distance_metric)
canopy_points = set(range(X.shape[0]))
while canopy_points:
point = canopy_points.pop()
i = len(canopies)
canopies[i] = {"c":point, "points": list(np.where(X1_dist[point] < T2)[0])}
canopy_points = canopy_points.difference(set(np.where(X1_dist[point] < T1)[0]))
if filemap:
for canopy_id in canopies.keys():
canopy = canopies.pop(canopy_id)
canopy2 = {"c":filemap[canopy['c']], "points":list()}
for point in canopy['points']:
canopy2["points"].append(filemap[point])
canopies[canopy_id] = canopy2
return canopies
|
pbarbero/TFM
|
demo/algorithms/canopy.py
|
Python
|
gpl-3.0
| 1,623
|
import copy
import textwrap
import re
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa
class TestStyler(object):
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.render()
# An index but no columns
DataFrame(columns=['a']).style.render()
# A column but no index
DataFrame(index=['a']).style.render()
# No IndexError raised?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#d65f5f 10.0%, #d65f5f 10.0%, '
'transparent 10.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, '
'#d65f5f 0.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 45.0%, '
'#d65f5f 45.0%, #d65f5f 50%, '
'transparent 50%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, '
'#5fba7d 50%, #5fba7d 50.0%, '
'transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 60.0%, transparent 60.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 95.0%, transparent 95.0%)']}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = pd.DataFrame({'a': [1, 3, 5, 6], 'b': [2, 4, 12, 21]})
result = df.style.render(uuid='test')
assert 'test' in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row] for row in ctx['body'])
assert (all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body']))
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0", "id": "level0_row0"
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
"id": "level1_row0"
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
"id": "level0_row1"
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
class TestStylerMatplotlibDep(object):
def test_background_gradient(self):
tm._skip_if_no_mpl()
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb']
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
def test_shim():
# https://github.com/pandas-dev/pandas/pull/16059
# Remove in 0.21
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
from pandas.formats.style import Styler as _styler # noqa
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/tests/io/formats/test_style.py
|
Python
|
mit
| 36,923
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for for_loops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf.converters import converter_test_base
from tensorflow.contrib.py2tf.converters import for_loops
from tensorflow.python.platform import test
class ControlFlowTest(converter_test_base.TestCase):
def test_basic_for(self):
def test_fn(l):
s = 0
for e in l:
s += e
return s
node = self.parse_and_analyze(test_fn, {})
node = for_loops.transform(node, self.ctx)
with self.compiled(node) as result:
l = [1, 2, 3]
self.assertEqual(test_fn(l), result.test_fn(l))
l = []
self.assertEqual(test_fn(l), result.test_fn(l))
if __name__ == '__main__':
test.main()
|
zasdfgbnm/tensorflow
|
tensorflow/contrib/py2tf/converters/for_loops_test.py
|
Python
|
apache-2.0
| 1,489
|
import logging
import random
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.modules.base import BaseModule
log = logging.getLogger(__name__)
class PointLotteryModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Point Lottery"
DESCRIPTION = "Lets players participate in lottery for points"
CATEGORY = "Game"
SETTINGS = []
def __init__(self, bot):
super().__init__(bot)
self.lottery_running = False
self.lottery_users = []
self.lottery_points = 0
def load_commands(self, **options):
self.commands["pointlottery"] = Command.raw_command(
self.lottery,
delay_all=0,
delay_user=5,
description="Lottery for points",
examples=[
CommandExample(
None,
"Lottery start",
chat="user:!pointlottery start\n"
"bot:A Lottery has begun. Type !pointlottery join {points} to join the lottery!",
description="Start lottery",
).parse(),
CommandExample(
None,
"Lottery join",
chat="user:!pointlottery join {}",
description="You don't get confirmation whether you joined the lottery or not.",
).parse(),
CommandExample(
None,
"Lottery stop",
chat="user:!pointlottery stop\n" "bot:The lottery has finished! {} won {} points",
description="Finish lottery",
).parse(),
CommandExample(
None,
"Lottery join",
chat="user:!pointlottery {}",
description="You don't get confirmation whether you joined the lottery or not.",
).parse(),
],
)
def lottery(self, **options):
message = options["message"]
source = options["source"]
commands = {
"start": (self.process_start, 500),
"begin": (self.process_start, 500),
"join": (self.process_join, 100),
"": (self.process_join, 100),
"end": (self.process_end, 500),
"stop": (self.process_end, 500),
"status": (self.process_status, 100),
}
try:
if message.split(" ")[0].isdigit():
command = ""
else:
command = str(message.split(" ")[0])
cb, level = commands[command]
if source.level < level:
# User does not have access to run this command
return False
cb(**options)
except (KeyError, ValueError, TypeError, AttributeError):
return False
def process_start(self, bot, source, **end):
if self.lottery_running:
bot.say(f"{source}, a lottery is already running OMGScoots")
return False
self.lottery_users = []
self.lottery_running = True
self.lottery_points = 0
bot.websocket_manager.emit("notification", {"message": "A lottery has been started!"})
bot.execute_delayed(
0.75, bot.websocket_manager.emit, "notification", {"message": "Type !pointlottery join to enter!"}
)
bot.me(
"A lottery has begun. Type !pointlottery join {tickets} or !pointlottery {tickets} to join the lottery! "
"The more tickets you buy, the more chances to win you have! "
"1 ticket costs 1 point"
)
def process_join(self, bot, source, message, **rest):
if not self.lottery_running:
log.debug("No lottery running")
return False
if source in [user for user, points in self.lottery_users if user == source]:
return False
try:
if len(message.split(" ")) == 1:
tickets = int(message.split(" ")[0])
else:
tickets = int(message.split(" ")[1])
if not source.can_afford(tickets):
bot.me(f"Sorry, {source}, you don't have enough points! FeelsBadMan")
return False
if tickets <= 0:
bot.me(f"Sorry, {source}, you have to buy at least 1 ticket! FeelsBadMan")
return False
source.points -= tickets
self.lottery_points += tickets
log.info(f"Lottery points is now at {self.lottery_points}")
except (ValueError, TypeError, AttributeError):
bot.me(f"Sorry, {source}, I didn't recognize your command! FeelsBadMan")
return False
# Added user to the lottery
self.lottery_users.append((source, tickets))
def process_end(self, bot, **rest):
if not self.lottery_running:
return False
self.lottery_running = False
if not self.lottery_users:
bot.me("Wow, no one joined the lottery DansGame")
return False
winner = self.weighted_choice(self.lottery_users)
log.info(f"at end, lottery points is now at {self.lottery_points}")
bot.websocket_manager.emit(
"notification", {"message": f"{winner} won {self.lottery_points} points in the lottery!"}
)
bot.me(f"The lottery has finished! {winner} won {self.lottery_points} points! PogChamp")
winner.points += self.lottery_points
self.lottery_users = []
def process_status(self, bot, **rest):
if not self.lottery_running:
return False
bot.me(
f"{len(self.lottery_users)} people have joined the lottery so far, for a total of {self.lottery_points} points"
)
@staticmethod
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
|
pajlada/tyggbot
|
pajbot/modules/pointlottery.py
|
Python
|
mit
| 6,152
|
# -*- coding: utf-8 -*-
"""Test lib.file_io."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import os
import re
import unittest
from shutil import rmtree
from tempfile import mkdtemp
from pdb.lib.data_paths import ProjectFolders
from pdb.uni_composite import (
_create_composite_file_names, _create_composite_file_paths,
_compile_uni_composite_regex, _get_local_file_names,
_uni_composite_file_exists)
class TestJsonIO(unittest.TestCase):
def _create_project_folders(self):
project_dp = os.path.join(self.temp_dir, 'pdb')
uni_dp = os.path.join(project_dp, 'uni_data')
tsv_dp = os.path.join(project_dp, 'tsv_data')
working_dp = os.path.join(project_dp, 'working')
self.dirs = ProjectFolders(
user_home=None,
project_home=project_dp,
uni_data=uni_dp,
tsv_data=tsv_dp,
working=working_dp
)
return None
def _create_test_files(self):
self.test_files = {
'uni_composite.20151202T062045Z.tsv',
'uni_composite.20151202T062047Z.tsv',
'uni_composite.20151202T062050Z.tsv',
'uni_composite.20151202T062052Z.tsv',
'uni_composite.20151202T062054Z.tsv'
}
for test_fn in self.test_files:
open(
os.path.join(
self.temp_dir,
test_fn), 'w'
).close()
return None
def _create_valid_composite_regex(self):
valid_composite_pat = """
# Group 1
(uni_composite) # Base name
\. # Literal period.
( # Group 2
\d{8,} # Date
T # "T" (indicate time)
\d{6,} # Time
Z # "Z" (indicate GMT)
)
\. # Literal period.
# Group 3
( # Valid file extentions.
(?:json)|(?:yaml)|(?:tsv)
)
"""
self.valid_composite_fn = re.compile(valid_composite_pat, re.VERBOSE)
return None
def setUp(self):
self.temp_dir = mkdtemp(prefix='pdb-tests_')
self._create_project_folders()
self._create_test_files()
self._create_valid_composite_regex()
return None
def test_create_composite_file_names_pass(self):
result = _create_composite_file_names()
for name in result:
this_name = result[name]
self.assertTrue(
self.valid_composite_fn.search(this_name)
)
return None
def test_create_composite_file_paths_pass(self):
names = _create_composite_file_names()
result = _create_composite_file_paths(self.temp_dir, names)
for name in result:
this_path = result[name]
dir_name = os.path.dirname(this_path)
self.assertTrue(os.path.isdir(dir_name))
file_name = os.path.basename(this_path)
self.assertTrue(self.valid_composite_fn.search(file_name))
return None
def test_match_time_stamped_composite_fn_pass(self):
uniprot_composite_pat = _compile_uni_composite_regex()
time_stamped_name = 'uni_composite.20151202T051543Z.tsv'
self.assertTrue(uniprot_composite_pat.search(time_stamped_name))
return None
def test_match_composite_fn_pass(self):
uniprot_composite_pat = _compile_uni_composite_regex()
composite_name = 'uni_composite.tsv'
self.assertTrue(uniprot_composite_pat.search(composite_name))
return None
def test_get_local_file_names(self):
local_files = set(_get_local_file_names(self.temp_dir))
self.assertEqual(local_files, self.test_files)
return None
def test_find_valid_file_pass(self):
self.assertTrue(_uni_composite_file_exists(self.temp_dir))
return None
def tearDown(self):
rmtree(self.temp_dir)
return None
if __name__ == '__main__':
unittest.main()
|
shellydeforte/PDB
|
pdb/tests/test_uni_composite.py
|
Python
|
mit
| 4,243
|
#!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Negative compilation test for Google Test."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import unittest
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
if not IS_LINUX:
sys.exit(0) # Negative compilation tests are not supported on Windows & Mac.
class GTestNCTest(unittest.TestCase):
"""Negative compilation test for Google Test."""
def testCompilerError(self):
"""Verifies that erroneous code leads to expected compiler
messages."""
# Defines a list of test specs, where each element is a tuple
# (test name, list of regexes for matching the compiler errors).
test_specs = [
('CANNOT_IGNORE_RUN_ALL_TESTS_RESULT',
[r'ignoring return value']),
('USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H',
[r'must not be included except by Google Test itself']),
('CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_WRONG_CASE_IN_TYPED_TEST_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P',
[r'BarTest.*not declared']),
('CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX',
[r'redefinition of.*My.*FooTest']),
('STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE',
[r'StaticAssertTypeEq.* does not name a type']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE',
[r'StaticAssertTypeEq.*int.*const int']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS',
[r'StaticAssertTypeEq.*int.*bool']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION',
[r'StaticAssertTypeEq.*const int.*int']),
('SANITY',
None)
]
# TODO(wan@google.com): verify that the test specs are satisfied.
if __name__ == '__main__':
unittest.main()
|
nawawi/wkhtmltopdf
|
webkit/Source/ThirdParty/gtest/test/gtest_nc_test.py
|
Python
|
lgpl-3.0
| 3,758
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from scrapy.settings import Settings
from scrapy_tracker.storage.memory import MemoryStorage
from scrapy_tracker.storage.redis import RedisStorage
from scrapy_tracker.storage.sqlalchemy import SqlAlchemyStorage
from tests import TEST_KEY, TEST_CHECKSUM, mock
class TestMemoryStorage(TestCase):
def setUp(self):
self.storage = MemoryStorage(None)
def test_getset(self):
result = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertIsNone(result)
found = self.storage.getset(TEST_KEY, 'new_checksum')
self.assertEqual(TEST_CHECKSUM, found)
found = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertEqual('new_checksum', found)
result = self.storage.getset('new_key', TEST_CHECKSUM)
self.assertIsNone(result)
class TestSqlAlchemyStorage(TestCase):
def setUp(self):
self.storage = SqlAlchemyStorage(Settings({
'TRACKER_SQLALCHEMY_ENGINE': 'sqlite:///:memory:',
'TRACKER_SQLALCHEMY_FLUSH_DB': True
}))
def test_getset(self):
result = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertIsNone(result)
found = self.storage.getset(TEST_KEY, 'new_checksum')
self.assertEqual(TEST_CHECKSUM, found)
found = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertEqual('new_checksum', found)
result = self.storage.getset('new_key', TEST_CHECKSUM)
self.assertIsNone(result)
class TestRedisStorage(TestCase):
def setUp(self):
with mock.patch("scrapy_tracker.storage.redis.StrictRedis") as mock_redis:
data = {}
def getset(key, val):
old_val = data.get(key)
data[key] = val
return old_val
mock_getset = mock.MagicMock()
mock_getset.getset.side_effect = getset
mock_redis.return_value = mock_getset
self.storage = RedisStorage(Settings({
'TRACKER_RADIS_FLUSH_DB': True
}))
def test_getset(self):
result = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertIsNone(result)
found = self.storage.getset(TEST_KEY, 'new_checksum')
self.assertEqual(TEST_CHECKSUM, found)
found = self.storage.getset(TEST_KEY, TEST_CHECKSUM)
self.assertEqual('new_checksum', found)
result = self.storage.getset('new_key', TEST_CHECKSUM)
self.assertIsNone(result)
|
vkastyniuk/scrapy-tracker
|
tests/test_storage.py
|
Python
|
bsd-3-clause
| 2,541
|
#Python Cleaning Script
import os
import Tkinter, tkMessageBox
from shutil import rmtree
root = Tkinter.Tk()
root.withdraw()
HOME_VAR = "HOME"
if os.name == 'nt':
HOME_VAR = "UserProfile"
def main():
if tkMessageBox.askokcancel("Automated Clean Up", "System Requests Running Automated Clean Up Script. Continue?"):
print "Beginning Cleanup..."
folder = os.getenv("HOME") + "/Downloads"
print "Clearing Downloads..."
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
rmtree(file_path)
except Exception, e:
print e
if __name__ == "__main__":
main()
|
samueljackson92/scripts
|
python/misc/cleanup_unix.py
|
Python
|
mit
| 723
|
"""
Fourier Reconstruction of RR-Lyrae Templates
--------------------------------------------
Figure 10.1
An example of a truncated Fourier representation of an RR Lyrae light curve.
The thick dashed line shows the true curve; the gray lines show the
approximation based on 1, 3, and 8 Fourier modes (sinusoids).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_rrlyrae_templates
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Load the RR Lyrae template
templates = fetch_rrlyrae_templates()
x, y = templates['115r'].T
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0)
kvals = [1, 3, 8]
subplots = [311, 312, 313]
for (k, subplot) in zip(kvals, subplots):
ax = fig.add_subplot(subplot)
# Use FFT to fit a truncated Fourier series
y_fft = np.fft.fft(y)
y_fft[k + 1:-k] = 0
y_fit = np.fft.ifft(y_fft).real
# plot the true value and the k-term reconstruction
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y, y]), '--k', lw=2)
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y_fit, y_fit]), color='gray')
label = "%i mode" % k
if k > 1:
label += 's'
ax.text(0.02, 0.1, label, ha='left', va='bottom',
transform=ax.transAxes)
if subplot == subplots[-1]:
ax.set_xlabel('phase')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
if subplot == subplots[1]:
ax.set_ylabel('amplitude')
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlim(0, 2)
ax.set_ylim(1.1, -0.1)
plt.show()
|
nhuntwalker/astroML
|
book_figures/chapter10/fig_rrlyrae_reconstruct.py
|
Python
|
bsd-2-clause
| 2,472
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libpfm4(MakefilePackage):
"""libpfm4 is a userspace library to help
setup performance events for use with
the perf_events Linux kernel interface."""
homepage = "http://perfmon2.sourceforge.net"
url = "https://downloads.sourceforge.net/project/perfmon2/libpfm4/libpfm-4.8.0.tar.gz"
version('4.8.0', '730383896db92e12fb2cc10f2d41dd43')
# Fails to build libpfm4 with intel compiler version 16 and 17
conflicts('%intel@16:17')
@property
def install_targets(self):
return ['DESTDIR={0}'.format(self.prefix),
'LIBDIR=/lib',
'INCDIR=/include',
'MANDIR=/man',
'LDCONFIG=true',
'install']
|
wscullin/spack
|
var/spack/repos/builtin/packages/libpfm4/package.py
|
Python
|
lgpl-2.1
| 1,981
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS VM networking.
The Firewall class provides a way of opening VM ports. The Network class allows
VMs to communicate via internal ips and isolates PerfKitBenchmarker VMs from
others in
the same project. See https://aws.amazon.com/documentation/vpc/
for more information about AWS Virtual Private Clouds.
"""
import json
import logging
import threading
import uuid
from perfkitbenchmarker import flags
from perfkitbenchmarker import network
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.aws import util
FLAGS = flags.FLAGS
class AwsFirewall(network.BaseFirewall):
"""An object representing the AWS Firewall."""
def __init__(self, project):
self.firewall_set = set()
self._lock = threading.Lock()
def AllowPort(self, vm, port):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
port: The local port to open.
"""
if vm.is_static:
return
entry = (port, vm.group_id)
if entry in self.firewall_set:
return
with self._lock:
if entry in self.firewall_set:
return
authorize_cmd = util.AWS_PREFIX + [
'ec2',
'authorize-security-group-ingress',
'--region=%s' % vm.region,
'--group-id=%s' % vm.group_id,
'--port=%s' % port,
'--cidr=0.0.0.0/0']
util.IssueRetryableCommand(
authorize_cmd + ['--protocol=tcp'])
util.IssueRetryableCommand(
authorize_cmd + ['--protocol=udp'])
self.firewall_set.add(entry)
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
pass
class AwsVpc(resource.BaseResource):
"""An object representing an Aws VPC."""
def __init__(self, region):
super(AwsVpc, self).__init__()
self.region = region
self.id = None
def _Create(self):
"""Creates the VPC."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-vpc',
'--region=%s' % self.region,
'--cidr-block=10.0.0.0/16']
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Vpc']['VpcId']
self._EnableDnsHostnames()
util.AddDefaultTags(self.id, self.region)
def _Exists(self):
"""Returns true if the VPC exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-vpcs',
'--region=%s' % self.region,
'--filter=Name=vpc-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
vpcs = response['Vpcs']
assert len(vpcs) < 2, 'Too many VPCs.'
return len(vpcs) > 0
def _EnableDnsHostnames(self):
"""Sets the enableDnsHostnames attribute of this VPC to True.
By default, instances launched in non-default VPCs are assigned an
unresolvable hostname. This breaks the hadoop benchmark. Setting the
enableDnsHostnames attribute to 'true' on the VPC resolves this. See:
http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
"""
enable_hostnames_command = util.AWS_PREFIX + [
'ec2',
'modify-vpc-attribute',
'--region=%s' % self.region,
'--vpc-id', self.id,
'--enable-dns-hostnames',
'{ "Value": true }']
util.IssueRetryableCommand(enable_hostnames_command)
def _Delete(self):
"""Delete's the VPC."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-vpc',
'--region=%s' % self.region,
'--vpc-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
class AwsSubnet(resource.BaseResource):
"""An object representing an Aws subnet."""
def __init__(self, zone, vpc_id, cidr_block='10.0.0.0/24'):
super(AwsSubnet, self).__init__()
self.zone = zone
self.region = zone[:-1]
self.vpc_id = vpc_id
self.id = None
self.cidr_block = cidr_block
def _Create(self):
"""Creates the subnet."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-subnet',
'--region=%s' % self.region,
'--vpc-id=%s' % self.vpc_id,
'--cidr-block=%s' % self.cidr_block,
'--availability-zone=%s' % self.zone]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Subnet']['SubnetId']
util.AddDefaultTags(self.id, self.region)
def _Delete(self):
"""Deletes the subnet."""
logging.info('Deleting subnet %s. This may fail if all instances in the '
'subnet have not completed termination, but will be retried.',
self.id)
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-subnet',
'--region=%s' % self.region,
'--subnet-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the subnet exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-subnets',
'--region=%s' % self.region,
'--filter=Name=subnet-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
subnets = response['Subnets']
assert len(subnets) < 2, 'Too many subnets.'
return len(subnets) > 0
class AwsInternetGateway(resource.BaseResource):
"""An object representing an Aws Internet Gateway."""
def __init__(self, region):
super(AwsInternetGateway, self).__init__()
self.region = region
self.vpc_id = None
self.id = None
self.attached = False
def _Create(self):
"""Creates the internet gateway."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-internet-gateway',
'--region=%s' % self.region]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['InternetGateway']['InternetGatewayId']
util.AddDefaultTags(self.id, self.region)
def _Delete(self):
"""Deletes the internet gateway."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the internet gateway exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-internet-gateways',
'--region=%s' % self.region,
'--filter=Name=internet-gateway-id,Values=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
internet_gateways = response['InternetGateways']
assert len(internet_gateways) < 2, 'Too many internet gateways.'
return len(internet_gateways) > 0
def Attach(self, vpc_id):
"""Attaches the internetgateway to the VPC."""
if not self.attached:
self.vpc_id = vpc_id
attach_cmd = util.AWS_PREFIX + [
'ec2',
'attach-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id,
'--vpc-id=%s' % self.vpc_id]
util.IssueRetryableCommand(attach_cmd)
self.attached = True
def Detach(self):
"""Detaches the internetgateway from the VPC."""
if self.attached:
detach_cmd = util.AWS_PREFIX + [
'ec2',
'detach-internet-gateway',
'--region=%s' % self.region,
'--internet-gateway-id=%s' % self.id,
'--vpc-id=%s' % self.vpc_id]
util.IssueRetryableCommand(detach_cmd)
self.attached = False
class AwsRouteTable(resource.BaseResource):
"""An object representing a route table."""
def __init__(self, region, vpc_id):
super(AwsRouteTable, self).__init__()
self.region = region
self.vpc_id = vpc_id
def _Create(self):
"""Creates the route table.
This is a no-op since every VPC has a default route table.
"""
pass
def _Delete(self):
"""Deletes the route table.
This is a no-op since the default route table gets deleted with the VPC.
"""
pass
@vm_util.Retry()
def _PostCreate(self):
"""Gets data about the route table."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-route-tables',
'--region=%s' % self.region,
'--filters=Name=vpc-id,Values=%s' % self.vpc_id]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
self.id = response['RouteTables'][0]['RouteTableId']
def CreateRoute(self, internet_gateway_id):
"""Adds a route to the internet gateway."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-route',
'--region=%s' % self.region,
'--route-table-id=%s' % self.id,
'--gateway-id=%s' % internet_gateway_id,
'--destination-cidr-block=0.0.0.0/0']
util.IssueRetryableCommand(create_cmd)
class AwsPlacementGroup(resource.BaseResource):
"""Object representing an AWS Placement Group.
Attributes:
region: The AWS region the Placement Group is in.
name: The name of the Placement Group.
"""
def __init__(self, region):
"""Init method for AwsPlacementGroup.
Args:
region: A string containing the AWS region of the Placement Group.
"""
super(AwsPlacementGroup, self).__init__()
self.name = (
'perfkit-%s-%s' % (FLAGS.run_uri, str(uuid.uuid4())[-12:]))
self.region = region
def _Create(self):
"""Creates the Placement Group."""
create_cmd = util.AWS_PREFIX + [
'ec2',
'create-placement-group',
'--region=%s' % self.region,
'--group-name=%s' % self.name,
'--strategy=cluster']
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Deletes the Placement Group."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'delete-placement-group',
'--region=%s' % self.region,
'--group-name=%s' % self.name]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the Placement Group exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-placement-groups',
'--region=%s' % self.region,
'--filter=Name=group-name,Values=%s' % self.name]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
placement_groups = response['PlacementGroups']
assert len(placement_groups) < 2, 'Too many placement groups.'
return len(placement_groups) > 0
class AwsNetwork(network.BaseNetwork):
"""Object representing an AWS Network.
Attributes:
region: The AWS region the Network is in.
vpc_id: The id of the Network's Virtual Private Cloud (VPC).
subnet_id: The id of the Subnet of the Network's VPC.
internet_gateway_id: The id of the Network's Internet Gateway.
route_table_id: The id of the Route Table of the Networks's VPC.
"""
def __init__(self, zone):
"""Initializes AwsNetwork instances.
Args:
zone: The Availability Zone that the Network corresponds to.
"""
super(AwsNetwork, self).__init__(zone)
self.region = zone[:-1]
self.vpc = AwsVpc(self.region)
self.internet_gateway = AwsInternetGateway(self.region)
self.subnet = None
self.route_table = None
self.placement_group = AwsPlacementGroup(self.region)
def Create(self):
"""Creates the network."""
self.vpc.Create()
self.internet_gateway.Create()
self.internet_gateway.Attach(self.vpc.id)
if self.route_table is None:
self.route_table = AwsRouteTable(self.region, self.vpc.id)
self.route_table.Create()
self.route_table.CreateRoute(self.internet_gateway.id)
if self.subnet is None:
self.subnet = AwsSubnet(self.zone, self.vpc.id)
self.subnet.Create()
self.placement_group.Create()
def Delete(self):
"""Deletes the network."""
self.placement_group.Delete()
if self.subnet:
self.subnet.Delete()
self.internet_gateway.Detach()
self.internet_gateway.Delete()
self.vpc.Delete()
|
ksasi/PerfKitBenchmarker
|
perfkitbenchmarker/aws/aws_network.py
|
Python
|
apache-2.0
| 12,614
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import copy
import functools
import re
import string
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import keymgr
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.pci import request as pci_request
import nova.policy
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(count)d',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether to encrypt ephemeral storage'),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help='The cipher and mode to be used to encrypt ephemeral '
'storage. Which ciphers are available ciphers depends '
'on kernel support. See /proc/crypto for the list of '
'available options.'),
cfg.IntOpt('key_size',
default=512,
help='The bit length of the encryption key to be used to '
'encrypt ephemeral storage (in XTS mode only half of '
'the bits are used for encryption key)')
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_group(ephemeral_storage_encryption_group)
CONF.register_opts(ephemeral_storage_encryption_opts,
group='ephemeral_storage_encryption')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
AGGREGATE_ACTION_UPDATE = 'Update'
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
AGGREGATE_ACTION_DELETE = 'Delete'
AGGREGATE_ACTION_ADD = 'Add'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance.vm_state not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method=f.__name__)
if (task_state is not None and
instance.task_state not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method=f.__name__)
if must_have_launched and not instance.launched_at:
raise exception.InstanceInvalidState(
attr='launched_at',
instance_uuid=instance.uuid,
state=instance.launched_at,
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance.locked and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
if not self.skip_policy_check:
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, skip_policy_check=False, **kwargs):
self.skip_policy_check = skip_policy_check
self.image_api = image_api or image.API()
self.network_api = network_api or network.API(
skip_policy_check=skip_policy_check)
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=skip_policy_check))
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = keymgr.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance.uuid,
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _get_headroom(self, quotas, usages, deltas):
headroom = {res: quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved'])
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom['instances']
return headroom
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
quotas = objects.Quotas(context)
quotas.reserve(instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
deltas = {'instances': max_count,
'cores': req_cores, 'ram': req_ram}
headroom = self._get_headroom(quotas, usages, deltas)
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = quotas[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s"), params)
else:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, quotas
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in six.iteritems(metadata):
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
# For backward compatible we need raise HTTPRequestEntityTooLarge
# so we need to keep InvalidMetadataSize exception here
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance.uuid,
'name': instance.display_name,
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_LE('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance.display_name
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _get_image_defined_bdms(self, base_options, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if image_mapping:
image_defined_bdms += self._prepare_image_mapping(
instance_type, image_mapping)
return image_defined_bdms
def _check_and_transform_bdm(self, context, base_options, instance_type,
image_meta, min_count, max_count,
block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# If the instance is booted by image and has a volume attached,
# the volume cannot have the same device name as root_device_name
if image_ref:
for bdm in block_device_mapping:
if (bdm.get('source_type') == 'volume' and
block_device.strip_dev(bdm.get(
'device_name')) == root_device_name):
msg = _('The volume cannot be assigned the same device'
' name as the root device %s') % root_device_name
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
base_options, instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image
# NOTE (gibi): another case is when a server is booted with an
# image to bdm mapping where the image only contains a bdm to a
# snapshot. In this case the other image to bdm mapping
# contains an unnecessary device with boot_index == 0.
# Also in this case the image_ref is None as we are booting from
# an image to volume bdm.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = (
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name is not None:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
numa_topology = hardware.numa_get_constraints(
instance_type, boot_meta)
system_metadata = {}
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
self.network_api.create_pci_requests_for_sriov_ports(context,
pci_request_info, requested_networks)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_requests': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type, pci_request_info):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
if pci_request_info and pci_request_info.requests:
filter_properties['pci_requests'] = pci_request_info
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota):
# Reserve quotas
num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
try:
for i in range(num_instances):
instance = objects.Instance(context=context)
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i, shutdown_terminate)
instances.append(instance)
if instance_group:
if check_server_group_quota:
count = objects.Quotas.count(context,
'server_group_members',
instance_group,
context.user_id)
try:
objects.Quotas.limit_check(context,
server_group_members=count + 1)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
objects.InstanceGroup.add_members(context,
instance_group.uuid,
[instance.uuid])
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
quotas.rollback()
# Commit the reservations
quotas.commit()
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
volume_id = bdm.get('volume_id')
snapshot_id = bdm.get('snapshot_id')
if snapshot_id:
# NOTE(alaski): A volume snapshot inherits metadata from the
# originating volume, but the API does not expose metadata
# on the snapshot itself. So we query the volume for it below.
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
volume_id = snapshot['volume_id']
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id:
try:
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
properties = volume.get('volume_image_metadata', {})
image_meta = {'properties': properties}
# NOTE(yjiang5): restore the basic attributes
# NOTE(mdbooth): These values come from volume_glance_metadata
# in cinder. This is a simple key/value table, and all values
# are strings. We need to convert them to ints to avoid
# unexpected type errors.
image_meta['min_ram'] = int(properties.get('min_ram', 0))
image_meta['min_disk'] = int(properties.get('min_disk', 0))
# Volume size is no longer related to the original image size,
# so we take it from the volume directly. Cinder creates
# volumes in Gb increments, and stores size in Gb, whereas
# glance reports size in bytes. As we're returning glance
# metadata here, we need to convert it.
image_meta['size'] = volume.get('size', 0) * units.Gi
# NOTE(yjiang5): Always set the image status as 'active'
# and depends on followed volume_api.check_attach() to
# verify it. This hack should be harmless with that check.
image_meta['status'] = 'active'
return image_meta
return {}
@staticmethod
def _get_requested_instance_group(context, scheduler_hints,
check_quota):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
if not uuidutils.is_uuid_like(group_hint):
msg = _('Server group scheduler hint must be a UUID.')
raise exception.InvalidInput(reason=msg)
return objects.InstanceGroup.get_by_uuid(context, group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
if not self.skip_policy_check and (forced_host or forced_node):
check_policy(context, 'create:forced_host', {})
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
auto_disk_config, reservation_id, max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instance_group = self._get_requested_instance_group(context,
scheduler_hints, check_server_group_quota)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host,
forced_node, instance_type,
base_options.get('pci_requests'))
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _create_block_device_mapping(self, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", block_device_mapping,
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
if bdm.volume_size == 0:
continue
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm.boot_index
for bdm in all_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.snapshot_id
volume_id = bdm.volume_id
image_id = bdm.image_id
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm.source_type == 'image' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except (exception.CinderConnectionFailed,
exception.InvalidVolume):
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.volume_size or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].volume_size or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance.uuid)
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance.uuid = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
instance.system_metadata.update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
# NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
instance.shutdown_terminate = shutdown_terminate
self.security_group_api.ensure_default(context)
instance.create()
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
except (exception.CinderConnectionFailed, exception.InvalidBDM,
exception.InvalidVolume):
with excutils.save_and_reraise_exception():
instance.destroy()
self._create_block_device_mapping(
instance_type, instance.uuid, block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
if not self.skip_policy_check:
check_policy(context, 'create', target)
if requested_networks and len(requested_networks):
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_and_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
shutdown_terminate=False, check_server_group_quota=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1:
self._check_multiple_instances_and_specified_ip(requested_networks)
if utils.is_neutron():
self._check_multiple_instances_neutron_ports(
requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota)
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_LI('instance termination disabled'),
instance=instance)
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_LI("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
quotas = None
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
quotas = self._create_reservations(context,
instance,
original_task_state,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
quotas.commit()
return
shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED)
if not instance.host and not shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
quotas.commit()
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_local_delete = True
try:
if not shelved_offloaded:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_LI('Instance is already in deleting state, '
'ignoring this request'),
instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete',
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if quotas:
quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_LI('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s'),
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_LI('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_LI('Migration %s may have been confirmed during '
'delete'),
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, instance, original_task_state,
project_id, user_id):
instance_vcpus = instance.vcpus
instance_memory_mb = instance.memory_mb
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if original_task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
instance.instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type.get('extra_specs',
{}).get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug("going to delete a resizing instance",
instance=instance)
quotas = objects.Quotas(context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return quotas
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info(_LI("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database."),
instance=instance)
else:
LOG.warning(_LW("instance's host %s is down, deleting from "
"database"), instance.host, instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in SHELVED_OFFLOADED
# state, instance['host'] is None, here, use shelved_host as host
# to deallocate network info and reset instance['host'] after that.
# Here we shouldn't use instance.save(), because this will mislead
# user who may think the instance's host has been changed, and
# actually, the instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
instance.host = instance._system_metadata.get(
'shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _LW("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy()
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quotas = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
@wrap_check_policy
@check_instance_lock
@check_instance_state(must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete(context, instance, 'force_delete', self._do_delete,
task_state=task_states.DELETING)
def force_stop(self, context, instance, do_cast=True, clean_shutdown=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast, clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
LOG.debug("Fetching instance by UUID",
instance_uuid=instance_id)
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif strutils.is_int_like(instance_id):
LOG.debug("Fetching instance by numeric id %s", instance_id)
instance = objects.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
LOG.debug("Invalid instance id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
if not self.skip_policy_check:
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, limit=None, marker=None,
want_objects=False, expected_attrs=None, sort_keys=None,
sort_dirs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs'
parameter.
"""
# TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if not self.skip_policy_check:
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s" % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
def _remap_metadata_filter(metadata):
filters['metadata'] = jsonutils.loads(metadata)
def _remap_system_metadata_filter(metadata):
filters['system_metadata'] = jsonutils.loads(metadata)
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter,
'metadata': _remap_metadata_filter,
'system_metadata': _remap_system_metadata_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in six.iteritems(search_opts):
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
filter_ip = 'ip6' in filters or 'ip' in filters
orig_limit = limit
if filter_ip and limit:
LOG.debug('Removing limit for DB query due to IP filter')
limit = None
inst_models = self._get_instances_by_filters(context, filters,
limit=limit, marker=marker, expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
if filter_ip:
inst_models = self._ip_filter(inst_models, filters, orig_limit)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
@staticmethod
def _ip_filter(inst_models, filters, limit):
ipv4_f = re.compile(str(filters.get('ip')))
ipv6_f = re.compile(str(filters.get('ip6')))
def _match_instance(instance):
nw_info = compute_utils.get_nw_info_for_instance(instance)
for vif in nw_info:
for fixed_ip in vif.fixed_ips():
address = fixed_ip.get('address')
if not address:
continue
version = fixed_ip.get('version')
if ((version == 4 and ipv4_f.match(address)) or
(version == 6 and ipv6_f.match(address))):
return True
return False
result_objs = []
for instance in inst_models:
if _match_instance(instance):
result_objs.append(instance)
if limit and len(result_objs) == limit:
break
return objects.InstanceList(objects=result_objs)
def _get_instances_by_filters(self, context, filters,
limit=None, marker=None, expected_attrs=None,
sort_keys=None, sort_dirs=None):
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return objects.InstanceList.get_by_filters(
context, filters=filters, limit=limit, marker=marker,
expected_attrs=fields, sort_keys=sort_keys, sort_dirs=sort_dirs)
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.objects.instance.Instance object
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
if self.is_volume_backed_instance(context, instance):
# TODO(flwang): The log level will be changed to INFO after
# string freeze (Liberty).
LOG.debug("It's not supported to backup volume backed instance.",
context=context, instance=instance)
raise exception.InvalidRequest()
else:
image_meta = self._create_image(context, instance,
name, 'backup',
extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance.uuid
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_api.create(context, sent_meta)
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.objects.instance.Instance object
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance.root_device_name:
properties['root_device_name'] = instance.root_device_name
properties.update(extra_properties or {})
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
exception.NovaException, NotImplementedError) as err:
if strutils.bool_from_string(properties.get(
'os_require_quiesce')):
raise
else:
LOG.info(_LI('Skipping quiescing instance: '
'%(reason)s.'), {'reason': err},
context=context, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
# NOTE (ndipanov): Remove swap/ephemerals from mappings as they will be
# in the block_device_mapping for the new image.
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id', 'owner'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_api.create(context, image_meta)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance.vm_state not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method='soft reboot')
if reboot_type == 'SOFT' and instance.task_state is not None:
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method='reboot')
expected_task_state = [None]
if reboot_type == 'HARD':
expected_task_state.extend([task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING])
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=instance.host,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_flavor: the target instance type
:param old_flavor: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = objects.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = objects.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
"""If there are deltas to reserve, construct a Quotas object and
reserve the deltas for the given project.
@param context: The nova request context.
@param deltas: A dictionary of the proposed delta changes.
@param instance: The instance we're operating on, so that
quotas can use the correct project_id/user_id.
@return: nova.objects.quotas.Quotas
"""
quotas = objects.Quotas(context=context)
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit()
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = instance.get_flavor()
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = self._get_headroom(quotas, usages, deltas)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warning(_LW("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used,
allowed=total_allowed,
resource=resource)
else:
quotas = objects.Quotas(context=context)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [],
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance.display_name
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance, clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug('Locking', context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
def is_expected_locked_by(self, context, instance):
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
return False
return True
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
if not self.skip_policy_check and not self.is_expected_locked_by(
context, instance):
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance.uuid)['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance.uuid:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance.uuid:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
return self.db.instance_metadata_get(context, instance.uuid)
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
instances = self._get_instances_by_filters(context, filters={},
sort_keys=['created_at'],
sort_dirs=['desc'])
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return utils.filter_and_format_resource_metadata('instance', instances,
search_filts, metadata_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance.uuid for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance.image_ref:
return True
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
LOG.error(_LE('Instance compute service state on %s '
'expected to be down, but it was up.'), inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return objects.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in six.iteritems(filters):
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def _service_update(self, context, host_name, binary, params_to_update):
"""Performs the actual service update operation."""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
return self._service_update(context, host_name, binary,
params_to_update)
def _service_delete(self, context, service_id):
"""Performs the actual Service deletion operation."""
objects.Service.get_by_id(context, service_id).destroy()
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self._service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return objects.InstanceList.get_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return objects.ComputeNode.get_by_id(context, int(compute_id))
def compute_node_get_all(self, context):
return objects.ComputeNodeList.get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return objects.ComputeNodeList.get_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance.uuid)
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance.uuid, request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate(context=context)
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create()
self.scheduler_client.update_aggregates(context, [aggregate])
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE)
if values:
aggregate.update_metadata(values)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE_META)
aggregate.update_metadata(metadata)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=msg)
aggregate.destroy()
self.scheduler_client.delete_aggregate(context, aggregate)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None,
action_name=AGGREGATE_ACTION_ADD):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
_hosts = hosts or aggregate.hosts
host_aggregates = objects.AggregateList.get_by_metadata_key(
context, 'availability_zone', hosts=_hosts)
conflicting_azs = [
agg.availability_zone for agg in host_aggregates
if agg.availability_zone != metadata['availability_zone']
and agg.id != aggregate.id]
if conflicting_azs:
msg = _("One or more hosts already in availability zone(s) "
"%s") % conflicting_azs
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason):
if action_name == AGGREGATE_ACTION_ADD:
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE:
raise exception.InvalidAggregateActionUpdate(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE_META:
raise exception.InvalidAggregateActionUpdateMeta(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_DELETE:
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=reason)
raise exception.NovaException(
_("Unexpected aggregate action %s") % action_name)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate.metadata,
hosts=[host_name], aggregate=aggregate)
aggregate.add_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name, key_type):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
count = objects.Quotas.count(context, 'key_pairs', user_id)
try:
objects.Quotas.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
fingerprint = self._generate_fingerprint(public_key, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = self._generate_key_pair(
user_id, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'create.end', key_name)
return keypair, private_key
def _generate_fingerprint(self, public_key, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_fingerprint(public_key)
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_x509_fingerprint(public_key)
def _generate_key_pair(self, user_id, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_key_pair()
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_winrm_x509_cert(user_id)
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return objects.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, skip_policy_check=False, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.skip_policy_check = skip_policy_check
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
quotas = objects.Quotas(context)
try:
quotas.reserve(security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info(_LI("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = objects.Quotas(context=context)
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"security group"))
LOG.info(_LI("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = objects.Quotas.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
objects.Quotas.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
rules.append(rule)
LOG.info(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.info(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
return self.db.security_group_default_rule_get(context, id)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance.host is not None:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance.host, instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance.uuid not in instances:
instances[instance.uuid] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance.host, instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = objects.Instance(uuid=instance_uuid)
groups = objects.SecurityGroupList.get_by_instance(context, instance)
return [{'name': group.name} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
|
JioCloud/nova
|
nova/compute/api.py
|
Python
|
apache-2.0
| 183,981
|
import sys
blind_num = int(input())
if blind_num <= 1 or blind_num >= 50:
sys.exit(0)
s = input()
blind = s.split(" ")
dict_blind = []; sick_num = 1
for item in range(0, len(blind)):
blind_pro = {}
if item == 0:
blind_pro["issick"] = 1
else:
blind_pro["issick"] = -1
blind_pro["pos"] = int(blind[item])
dict_blind.append(blind_pro)
dict_blind.sort(key=lambda e: abs(e.__getitem__("pos")))
while(1):
if len(dict_blind) <= 1:
break
for i in range(0, len(dict_blind)):
dict_blind[i]['pos'] += 0.5 # change to 0.5
j = 0
while j < len(dict_blind)-1:
if dict_blind[j]["pos"] == 0 or dict_blind[j]["pos"] == 100:
dict_blind.pop(j)
continue
elif dict_blind[j]["pos"] + dict_blind[j+1]["pos"] == 0:
dict_blind[j]["pos"] = -dict_blind[j]["pos"]
dict_blind[j + 1]["pos"] = -dict_blind[j + 1]["pos"]
if dict_blind[j]["issick"] + dict_blind[j+1]["issick"] == 0:
sick_num += 1
dict_blind[j]["issick"] = 1
dict_blind[j+1]["issick"] = 1
j += 1
print(sick_num)
|
IT-SeanWANG/CodeJam
|
2017_2nd/Q1_Refer2.py
|
Python
|
apache-2.0
| 1,186
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('anothercrm', '0002_auto_20150526_2247'),
]
operations = [
migrations.AlterField(
model_name='person',
name='relationships',
field=models.ManyToManyField(to='anothercrm.Relationship', blank=True),
),
]
|
monuszko/django-anothercrm
|
anothercrm/migrations/0003_auto_20150526_2254.py
|
Python
|
agpl-3.0
| 445
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import optparse
import os
import subprocess
import sys
import xml.etree.ElementTree as ET
class VersionBuilder:
"""
Used to build a version definition file
"""
def __init__(self, filename):
self._check_xmllint()
self.filename = filename
if os.path.exists(filename):
tree = ET.ElementTree()
tree.parse(filename)
root = tree.getroot()
else:
attribs = {}
attribs['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
attribs['xsi:noNamespaceSchemaLocation'] = "version_definition.xsd"
root = ET.Element("repository-version", attribs)
ET.SubElement(root, "release")
ET.SubElement(root, "manifest")
ET.SubElement(root, "available-services")
ET.SubElement(root, "repository-info")
self.root_element = root
def persist(self):
"""
Saves the XML file
"""
p = subprocess.Popen(['xmllint', '--format', '--output', self.filename, '-'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(input=ET.tostring(self.root_element))
def finalize(self, xsd_file):
"""
Validates the XML file against the XSD
"""
args = ['xmllint', '--noout', '--load-trace', '--schema', xsd_file, self.filename]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
if len(stdout) > 0:
print(stdout.decode("UTF-8"))
if len(stderr) > 0:
print(stderr.decode("UTF-8"))
def set_release(self, type=None, stack=None, version=None, build=None, notes=None, display=None,
compatible=None):
"""
Create elements of the 'release' parent
"""
release_element = self.root_element.find("./release")
if release_element is None:
raise Exception("Element 'release' is not found")
if type:
update_simple(release_element, "type", type)
if stack:
update_simple(release_element, "stack-id", stack)
if version:
update_simple(release_element, "version", version)
if build:
update_simple(release_element, "build", build)
if compatible:
update_simple(release_element, "compatible-with", compatible)
if notes:
update_simple(release_element, "release-notes", notes)
if display:
update_simple(release_element, "display", display)
def set_os(self, os_family, package_version=None):
repo_parent = self.root_element.find("./repository-info")
if repo_parent is None:
raise Exception("'repository-info' element is not found")
os_element = self.findByAttributeValue(repo_parent, "./os", "family", os_family)
if os_element is None:
os_element = ET.SubElement(repo_parent, 'os')
os_element.set('family', os_family)
if package_version:
pv_element = os_element.find("package-version")
if pv_element is None:
pv_element = ET.SubElement(os_element, "package-version")
pv_element.text = package_version
def add_manifest(self, id, service_name, version, version_id=None, release_version=None):
"""
Add a manifest service. A manifest lists all services in a repo, whether they are to be
upgraded or not.
"""
manifest_element = self.root_element.find("./manifest")
if manifest_element is None:
raise Exception("Element 'manifest' is not found")
service_element = self.findByAttributeValue(manifest_element, "./service", "id", id)
if service_element is None:
service_element = ET.SubElement(manifest_element, "service")
service_element.set('id', id)
service_element.set('name', service_name)
service_element.set('version', version)
if version_id:
service_element.set('version-id', version_id)
if release_version:
service_element.set('release-version', release_version)
def add_available(self, manifest_id, available_components=None):
"""
Adds services available to upgrade for patches
"""
manifest_element = self.root_element.find("./manifest")
if manifest_element is None:
raise Exception("'manifest' element is not found")
service_element = self.findByAttributeValue(manifest_element, "./service", "id", manifest_id)
if service_element is None:
raise Exception("Cannot add an available service for {0}; it's not on the manifest".format(manifest_id))
available_element = self.root_element.find("./available-services")
if available_element is None:
raise Exception("'available-services' is not found")
service_element = self.findByAttributeValue(available_element, "./service", "idref", manifest_id)
if service_element is not None:
available_element.remove(service_element)
service_element = ET.SubElement(available_element, "service")
service_element.set('idref', manifest_id)
if available_components:
components = available_components.split(',')
for component in components:
e = ET.SubElement(service_element, 'component')
e.text = component
def add_repo(self, os_family, repo_id, repo_name, base_url, unique, tags):
"""
Adds a repository
"""
repo_parent = self.root_element.find("./repository-info")
if repo_parent is None:
raise Exception("'repository-info' element is not found")
os_element = self.findByAttributeValue(repo_parent, "./os", "family", os_family)
if os_element is None:
os_element = ET.SubElement(repo_parent, 'os')
os_element.set('family', os_family)
if self.useNewSyntax():
repo_element = os_element.find("./repo/[reponame='{0}']".format(repo_name))
else:
repo_element = self.findByValue(os_element, "./repo/reponame", repo_name)
if repo_element is not None:
os_element.remove(repo_element)
repo_element = ET.SubElement(os_element, 'repo')
e = ET.SubElement(repo_element, 'baseurl')
e.text = base_url
e = ET.SubElement(repo_element, 'repoid')
e.text = repo_id
e = ET.SubElement(repo_element, 'reponame')
e.text = repo_name
if unique is not None:
e = ET.SubElement(repo_element, 'unique')
e.text = unique
if tags is not None:
e = ET.SubElement(repo_element, 'tags')
tag_names = tags.split(',')
for tag in tag_names:
t = ET.SubElement(e, 'tag')
t.text = tag
def _check_xmllint(self):
"""
Verifies utility xmllint is available
"""
try:
p = subprocess.Popen(['xmllint', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise Exception("xmllint command does not appear to be available")
except:
raise Exception("xmllint command does not appear to be available")
def findByAttributeValue(self, root, element, attribute, value):
if self.useNewSyntax():
return root.find("./{0}[@{1}='{2}']".format(element, attribute, value))
else:
for node in root.findall("{0}".format(element)):
if node.attrib[attribute] == value:
return node
return None;
def findByValue(self, root, element, value):
for node in root.findall("{0}".format(element)):
if node.text == value:
return node
return None
def useNewSyntax(self):
#Python2.7 and newer shipps with ElementTree that supports a different syntax for XPath queries
major=sys.version_info[0]
minor=sys.version_info[1]
if major > 3 :
return True
elif major == 2:
return (minor > 6)
else:
return False;
def update_simple(parent, name, value):
"""
Helper method to either update or create the element
"""
element = parent.find('./' + name)
if element is None:
element = ET.SubElement(parent, name)
element.text = value
else:
element.text = value
def process_release(vb, options):
"""
Create elements of the 'release' parent
"""
if options.release_type:
vb.set_release(type=options.release_type)
if options.release_stack:
vb.set_release(stack=options.release_stack)
if options.release_version:
vb.set_release(version=options.release_version)
if options.release_build:
vb.set_release(build=options.release_build)
if options.release_compatible:
vb.set_release(compatible=options.release_compatible)
if options.release_notes:
vb.set_release(notes=options.release_notes)
if options.release_display:
vb.set_release(display=options.release_display)
if options.release_package_version:
vb.set_release(package_version=options.release_package_version)
def process_manifest(vb, options):
"""
Creates the manifest element
"""
if not options.manifest:
return
vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,
options.manifest_release_version)
def process_available(vb, options):
"""
Processes available service elements
"""
if not options.available:
return
vb.add_available(options.manifest_id, options.available_components)
def process_os(vb, options):
if not options.os:
return
vb.set_os(options.os_family, options.os_package_version)
def process_repo(vb, options):
"""
Processes repository options. This method doesn't update or create individual elements, it
creates the entire repo structure
"""
if not options.repo:
return
vb.add_repo(options.repo_os, options.repo_id, options.repo_name, options.repo_url,
options.unique, options.repo_tags)
def validate_manifest(parser, options):
"""
Validates manifest options from the command line
"""
if not options.manifest:
return
template = "When specifying --manifest, {0} is also required"
if not options.manifest_id:
parser.error(template.format("--manifest-id"))
if not options.manifest_service:
parser.error(template.format("--manifest-service"))
if not options.manifest_version:
parser.error(template.format("--manifest-version"))
def validate_available(parser, options):
"""
Validates available service options from the command line
"""
if not options.available:
return
if not options.manifest_id:
parser.error("When specifying --available, --manifest-id is also required")
def validate_os(parser, options):
if not options.os:
return
if not options.os_family:
parser.error("When specifying --os, --os-family is also required")
def validate_repo(parser, options):
"""
Validates repo options from the command line
"""
if not options.repo:
return
template = "When specifying --repo, {0} is also required"
if not options.repo_os:
parser.error(template.format("--repo-os"))
if not options.repo_url:
parser.error(template.format("--repo-url"))
if not options.repo_id:
parser.error(template.format("--repo-id"))
if not options.repo_name:
parser.error(template.format("--repo-name"))
def main(argv):
parser = optparse.OptionParser(
epilog="OS utility 'xmllint' is required for this tool to function. It handles pretty-printing and XSD validation.")
parser.add_option('--file', dest='filename',
help="The output XML file")
parser.add_option('--finalize', action='store_true', dest='finalize',
help="Finalize and validate the XML file")
parser.add_option('--xsd', dest='xsd_file',
help="The XSD location when finalizing")
parser.add_option('--release-type', type='choice', choices=['STANDARD', 'PATCH', 'MAINT'], dest='release_type' ,
help="Indicate the release type: i.e. STANDARD, PATCH, MAINT")
parser.add_option('--release-stack', dest='release_stack',
help="The stack id: e.g. HDP-2.4")
parser.add_option('--release-version', dest='release_version',
help="The release version without build number: e.g. 2.4.0.1")
parser.add_option('--release-build', dest='release_build',
help="The release build number: e.g. 1234")
parser.add_option('--release-compatible', dest='release_compatible',
help="Regular Expression string to identify version compatibility for patches: e.g. 2.4.1.[0-9]")
parser.add_option('--release-notes', dest='release_notes',
help="A http link to the documentation notes")
parser.add_option('--release-display', dest='release_display',
help="The display name for this release")
parser.add_option('--release-package-version', dest='release_package_version',
help="Identifier to use when installing packages, generally a part of the package name")
parser.add_option('--manifest', action='store_true', dest='manifest',
help="Add a manifest service with other options: --manifest-id, --manifest-service, --manifest-version, --manifest-version-id, --manifest-release-version")
parser.add_option('--manifest-id', dest='manifest_id',
help="Unique ID for a service in a manifest. Required when specifying --manifest and --available")
parser.add_option('--manifest-service', dest='manifest_service')
parser.add_option('--manifest-version', dest='manifest_version')
parser.add_option('--manifest-version-id', dest='manifest_version_id')
parser.add_option('--manifest-release-version', dest='manifest_release_version')
parser.add_option('--available', action='store_true', dest='available',
help="Add an available service with other options: --manifest-id, --available-components --service-release-version")
parser.add_option('--available-components', dest='available_components',
help="A CSV of service components that are intended to be upgraded via patch. \
Omitting this implies the entire service should be upgraded")
parser.add_option('--os', action='store_true', dest='os', help="Add OS data with options --os-family, --os-package-version")
parser.add_option('--os-family', dest='os_family', help="The operating system: i.e redhat7, debian7, ubuntu12, ubuntu14, suse11, suse12")
parser.add_option('--os-package-version', dest='os_package_version',
help="The package version to use for the OS")
parser.add_option('--repo', action='store_true', dest='repo',
help="Add repository data with options: --repo-os, --repo-url, --repo-id, --repo-name, --repo-unique")
parser.add_option('--repo-os', dest='repo_os',
help="The operating system type: i.e. redhat6, redhat7, debian7, debian9, ubuntu12, ubuntu14, ubuntu16, suse11, suse12")
parser.add_option('--repo-url', dest='repo_url',
help="The base url for the repository data")
parser.add_option('--repo-unique', dest='unique', type='choice', choices=['true', 'false'],
help="Indicates base url should be unique")
parser.add_option('--repo-id', dest='repo_id', help="The ID of the repo")
parser.add_option('--repo-name', dest='repo_name', help="The name of the repo")
parser.add_option('--repo-tags', dest='repo_tags', help="The CSV tags for the repo")
(options, args) = parser.parse_args()
# validate_filename
if not options.filename:
parser.error("--file option is required")
# validate_finalize
if options.finalize and not options.xsd_file:
parser.error("Must supply XSD (--xsd) when finalizing")
validate_manifest(parser, options)
validate_available(parser, options)
validate_os(parser, options)
validate_repo(parser, options)
vb = VersionBuilder(options.filename)
process_release(vb, options)
process_manifest(vb, options)
process_available(vb, options)
process_os(vb, options)
process_repo(vb, options)
# save file
vb.persist()
if options.finalize:
vb.finalize(options.xsd_file)
if __name__ == "__main__":
main(sys.argv)
|
sekikn/ambari
|
contrib/version-builder/version_builder.py
|
Python
|
apache-2.0
| 16,391
|
from setuptools import setup, find_packages
setup(
name='brooks',
version='0.1',
packages=find_packages(),
# metadata for upload to PyPI
author='Sixty North AS',
author_email='rob@sixty-north.com',
description="A Brooks' Law simulator",
license='MIT',
keywords='simulation',
url='https://github.com/sixty-north/brooks',
# download_url = '',
long_description="Tools for simulating the effect of Brooks' Law in "
"software development.",
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
platforms='any',
# setup_requires=[],
install_requires=[
'numpy',
'docopt',
'matplotlib',
'pandas',
'seaborn',
],
# entry_points={
# 'console_scripts': [
# 'yapga = yapga.app.main:main',
# ],
# },
)
|
sixty-north/brooks
|
setup.py
|
Python
|
agpl-3.0
| 1,066
|
#
# Copyright 2009-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import threading
import logging
import signal
import select
import errno
import re
from StringIO import StringIO
import time
import functools
from collections import namedtuple
from contextlib import contextmanager
from operator import itemgetter
from vdsm.config import config
from vdsm import constants
from vdsm import utils
import vdsm.supervdsm as svdsm
import misc
import fileUtils
import sd
import lvm
import clusterlock
import blockVolume
import multipath
import resourceFactories
from resourceFactories import LVM_ACTIVATION_NAMESPACE
from persistentDict import PersistentDict, DictValidator
import iscsi
import storage_exception as se
from storage_mailbox import MAILBOX_SIZE
import resourceManager as rm
import mount
import volume
STORAGE_DOMAIN_TAG = "RHAT_storage_domain"
STORAGE_UNREADY_DOMAIN_TAG = STORAGE_DOMAIN_TAG + "_UNREADY"
MASTERLV = "master"
SPECIAL_LVS = (sd.METADATA, sd.LEASES, sd.IDS, sd.INBOX, sd.OUTBOX, MASTERLV)
MASTERLV_SIZE = "1024" # In MiB = 2 ** 20 = 1024 ** 2 => 1GiB
BlockSDVol = namedtuple("BlockSDVol", "name, image, parent")
log = logging.getLogger("Storage.BlockSD")
# FIXME: Make this calculated from something logical
RESERVED_METADATA_SIZE = 40 * (2 ** 20)
RESERVED_MAILBOX_SIZE = MAILBOX_SIZE * clusterlock.MAX_HOST_ID
METADATA_BASE_SIZE = 378
# VG's min metadata threshold is 20%
VG_MDA_MIN_THRESHOLD = 0.2
# VG's metadata size in MiB
VG_METADATASIZE = 128
MAX_PVS_LIMIT = 10 # BZ#648051
MAX_PVS = config.getint('irs', 'maximum_allowed_pvs')
if MAX_PVS > MAX_PVS_LIMIT:
log.warning("maximum_allowed_pvs = %d ignored. MAX_PVS = %d", MAX_PVS,
MAX_PVS_LIMIT)
MAX_PVS = MAX_PVS_LIMIT
PVS_METADATA_SIZE = MAX_PVS * 142
SD_METADATA_SIZE = 2048
DEFAULT_BLOCKSIZE = 512
DMDK_VGUUID = "VGUUID"
DMDK_PV_REGEX = re.compile(r"^PV\d+$")
DMDK_LOGBLKSIZE = "LOGBLKSIZE"
DMDK_PHYBLKSIZE = "PHYBLKSIZE"
VERS_METADATA_LV = (0,)
VERS_METADATA_TAG = (2, 3)
def encodePVInfo(pvInfo):
return (
"pv:%s," % pvInfo["guid"] +
"uuid:%s," % pvInfo["uuid"] +
"pestart:%s," % pvInfo["pestart"] +
"pecount:%s," % pvInfo["pecount"] +
"mapoffset:%s" % pvInfo["mapoffset"])
def decodePVInfo(value):
pvInfo = dict([item.split(":") for item in value.split(",")])
pvInfo["guid"] = pvInfo["pv"]
del pvInfo["pv"]
return pvInfo
BLOCK_SD_MD_FIELDS = sd.SD_MD_FIELDS.copy()
# TBD: Do we really need this key?
BLOCK_SD_MD_FIELDS.update({
# Key dec, enc
DMDK_PV_REGEX: (decodePVInfo, encodePVInfo),
DMDK_VGUUID: (str, str),
DMDK_LOGBLKSIZE: (functools.partial(sd.intOrDefault, DEFAULT_BLOCKSIZE),
str),
DMDK_PHYBLKSIZE: (functools.partial(sd.intOrDefault, DEFAULT_BLOCKSIZE),
str),
})
INVALID_CHARS = re.compile(r"[^a-zA-Z0-9_+.\-/=!:#]")
LVM_ENC_ESCAPE = re.compile("&(\d+)&")
# Move to lvm
def lvmTagEncode(s):
return INVALID_CHARS.sub(lambda c: "&%s&" % ord(c.group()), s)
def lvmTagDecode(s):
return LVM_ENC_ESCAPE.sub(lambda c: unichr(int(c.groups()[0])), s)
def _tellEnd(devPath):
"""Size in bytes of a block device.
stat.st_size of block devices is identically 0.
"""
with open(devPath, "rb") as f:
f.seek(0, os.SEEK_END)
return f.tell()
def _getVolsTree(sdUUID):
lvs = lvm.getLV(sdUUID)
vols = {}
for lv in lvs:
image = ""
parent = ""
for tag in lv.tags:
if tag.startswith(blockVolume.TAG_PREFIX_IMAGE):
image = tag[len(blockVolume.TAG_PREFIX_IMAGE):]
elif tag.startswith(blockVolume.TAG_PREFIX_PARENT):
parent = tag[len(blockVolume.TAG_PREFIX_PARENT):]
if parent and image:
vols[lv.name] = BlockSDVol(lv.name, image, parent)
break
else:
if lv.name not in SPECIAL_LVS:
log.warning("Ignoring Volume %s that lacks minimal tag set"
"tags %s" % (lv.name, lv.tags))
return vols
def getAllVolumes(sdUUID):
"""
Return dict {volUUID: ((imgUUIDs,), parentUUID)} of the domain.
imgUUIDs is a list of all images dependant on volUUID.
For template based volumes, the first image is the template's image.
For other volumes, there is just a single imageUUID.
Template self image is the 1st term in template volume entry images.
"""
vols = _getVolsTree(sdUUID)
res = {}
for volName in vols.iterkeys():
res[volName] = {'imgs': [], 'parent': None}
for volName, vImg, parentVol in vols.itervalues():
res[volName]['parent'] = parentVol
if vImg not in res[volName]['imgs']:
res[volName]['imgs'].insert(0, vImg)
if parentVol != sd.BLANK_UUID:
try:
imgIsUnknown = vImg not in res[parentVol]['imgs']
except KeyError:
log.warning("Found broken image %s, orphan volume %s/%s, "
"parent %s", vImg, sdUUID, volName, parentVol)
else:
if imgIsUnknown:
res[parentVol]['imgs'].append(vImg)
return dict((k, sd.ImgsPar(tuple(v['imgs']), v['parent']))
for k, v in res.iteritems())
def deleteVolumes(sdUUID, vols):
lvm.removeLVs(sdUUID, vols)
def _zeroVolume(sdUUID, volUUID):
"""Fill a block volume.
This function requires an active LV.
"""
dm = lvm.lvDmDev(sdUUID, volUUID)
size = multipath.getDeviceSize(dm) # Bytes
# TODO: Change for zero 128 M chuncks and log.
# 128 M is the vdsm extent size default
BS = constants.MEGAB # 1024 ** 2 = 1 MiB
count = size / BS
cmd = [constants.EXT_DD, "oflag=%s" % misc.DIRECTFLAG, "if=/dev/zero",
"of=%s" % lvm.lvPath(sdUUID, volUUID), "bs=%s" % BS,
"count=%s" % count]
p = misc.execCmd(cmd, sync=False, nice=utils.NICENESS.HIGH,
ioclass=utils.IOCLASS.IDLE, deathSignal=signal.SIGKILL)
return p
def zeroImgVolumes(sdUUID, imgUUID, volUUIDs):
ProcVol = namedtuple("ProcVol", "proc, vol")
# Put a sensible value for dd zeroing a 128 M or 1 G chunk and lvremove
# spent time.
ZEROING_TIMEOUT = 60000 # [miliseconds]
log.debug("sd: %s, LVs: %s, img: %s", sdUUID, volUUIDs, imgUUID)
# Following call to changelv is separate since setting rw permission on an
# LV fails if the LV is already set to the same value, hence we would not
# be able to differentiate between a real failure of deltag/addtag and one
# we would like to ignore (permission is the same)
try:
lvm.changelv(sdUUID, volUUIDs, ("--permission", "rw"))
except se.StorageException as e:
# Hope this only means that some volumes were already writable.
log.debug("Ignoring failed permission change: %s", e)
# blank the volumes.
zerofds = {}
poller = select.poll()
for volUUID in volUUIDs:
proc = _zeroVolume(sdUUID, volUUID)
fd = proc.stdout.fileno()
zerofds[fd] = ProcVol(proc, volUUID)
poller.register(fd, select.EPOLLHUP)
# Wait until all the asyncs procs return
# Yes, this is a potentially infinite loop. Kill the vdsm task.
while zerofds:
fdevents = poller.poll(ZEROING_TIMEOUT) # [(fd, event)]
toDelete = []
for fd, event in fdevents:
proc, vol = zerofds[fd]
if not proc.wait(0):
continue
else:
poller.unregister(fd)
zerofds.pop(fd)
if proc.returncode != 0:
log.error("zeroing %s/%s failed. Zero and remove this "
"volume manually. rc=%s %s", sdUUID, vol,
proc.returncode, proc.stderr.read(1000))
else:
log.debug("%s/%s was zeroed and will be deleted",
sdUUID, volUUID)
toDelete.append(vol)
if toDelete:
try:
deleteVolumes(sdUUID, toDelete)
except se.CannotRemoveLogicalVolume:
# TODO: Add the list of removed fail volumes to the exception.
log.error("Remove failed for some of VG: %s zeroed volumes: "
"%s", sdUUID, toDelete, exc_info=True)
log.debug("finished with VG:%s LVs: %s, img: %s", sdUUID, volUUIDs,
imgUUID)
return
class VGTagMetadataRW(object):
log = logging.getLogger("Storage.Metadata.VGTagMetadataRW")
METADATA_TAG_PREFIX = "MDT_"
METADATA_TAG_PREFIX_LEN = len(METADATA_TAG_PREFIX)
def __init__(self, vgName):
self._vgName = vgName
def readlines(self):
lvm.invalidateVG(self._vgName)
vg = lvm.getVG(self._vgName)
metadata = []
for tag in vg.tags:
if not tag.startswith(self.METADATA_TAG_PREFIX):
continue
metadata.append(lvmTagDecode(tag[self.METADATA_TAG_PREFIX_LEN:]))
return metadata
def writelines(self, lines):
currentMetadata = set(self.readlines())
newMetadata = set(lines)
# Remove all items that do not exist in the new metadata
toRemove = [self.METADATA_TAG_PREFIX + lvmTagEncode(item) for item in
currentMetadata.difference(newMetadata)]
# Add all missing items that do no exist in the old metadata
toAdd = [self.METADATA_TAG_PREFIX + lvmTagEncode(item) for item in
newMetadata.difference(currentMetadata)]
if len(toAdd) == 0 and len(toRemove) == 0:
return
self.log.debug("Updating metadata adding=%s removing=%s",
", ".join(toAdd), ", ".join(toRemove))
lvm.changeVGTags(self._vgName, delTags=toRemove, addTags=toAdd)
class LvMetadataRW(object):
"""
Block Storage Domain metadata implementation
"""
log = logging.getLogger("Storage.Metadata.LvMetadataRW")
def __init__(self, vgName, lvName, offset, size):
self._size = size
self._lvName = lvName
self._vgName = vgName
self._offset = offset
self.metavol = lvm.lvPath(vgName, lvName)
def readlines(self):
# Fetch the metadata from metadata volume
lvm.activateLVs(self._vgName, self._lvName)
m = misc.readblock(self.metavol, self._offset, self._size)
# Read from metadata volume will bring a load of zeroes trailing
# actual metadata. Strip it out.
metadata = [i for i in m if len(i) > 0 and i[0] != '\x00' and "=" in i]
return metadata
def writelines(self, lines):
lvm.activateLVs(self._vgName, self._lvName)
# Write `metadata' to metadata volume
metaStr = StringIO()
for line in lines:
metaStr.write(line)
metaStr.write("\n")
if metaStr.pos > self._size:
raise se.MetadataOverflowError(metaStr.getvalue())
# Clear out previous data - it is a volume, not a file
metaStr.write('\0' * (self._size - metaStr.pos))
data = metaStr.getvalue()
with fileUtils.DirectFile(self.metavol, "r+d") as f:
f.seek(self._offset)
f.write(data)
LvBasedSDMetadata = lambda vg, lv: DictValidator(
PersistentDict(LvMetadataRW(vg, lv, 0, SD_METADATA_SIZE)),
BLOCK_SD_MD_FIELDS)
TagBasedSDMetadata = lambda vg: DictValidator(
PersistentDict(VGTagMetadataRW(vg)),
BLOCK_SD_MD_FIELDS)
def selectMetadata(sdUUID):
mdProvider = LvBasedSDMetadata(sdUUID, sd.METADATA)
if len(mdProvider) > 0:
metadata = mdProvider
else:
metadata = TagBasedSDMetadata(sdUUID)
return metadata
def metadataValidity(vg):
"""
Return the metadata validity:
mdathreshold - False if the VG's metadata exceeded its threshold,
else True
mdavalid - False if the VG's metadata size too small, else True
"""
mda_size = int(vg.vg_mda_size)
mda_free = int(vg.vg_mda_free)
mda_size_ok = mda_size >= VG_METADATASIZE * constants.MEGAB / 2
mda_free_ok = mda_free >= mda_size * VG_MDA_MIN_THRESHOLD
return {'mdathreshold': mda_free_ok, 'mdavalid': mda_size_ok}
class BlockStorageDomainManifest(sd.StorageDomainManifest):
mountpoint = os.path.join(sd.StorageDomain.storage_repository,
sd.DOMAIN_MNT_POINT, sd.BLOCKSD_DIR)
def __init__(self, sdUUID, metadata=None):
domaindir = os.path.join(self.mountpoint, sdUUID)
if metadata is None:
metadata = selectMetadata(sdUUID)
sd.StorageDomainManifest.__init__(self, sdUUID, domaindir, metadata)
# _extendlock is used to prevent race between
# VG extend and LV extend.
self._extendlock = threading.Lock()
try:
self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE)
self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE)
except KeyError:
# 512 by Saggi "Trust me (Smoch Alai (sic))"
# *blkSize keys may be missing from metadata only for domains that
# existed before the introduction of the keys.
# Such domains supported only 512 sizes
self.logBlkSize = 512
self.phyBlkSize = 512
def readMetadataMapping(self):
meta = self.getMetadata()
for key in meta.keys():
if not DMDK_PV_REGEX.match(key):
del meta[key]
self.log.info("META MAPPING: %s" % meta)
return meta
def getReadDelay(self):
stats = misc.readspeed(lvm.lvPath(self.sdUUID, sd.METADATA), 4096)
return stats['seconds']
def getVSize(self, imgUUUID, volUUID):
""" Return the block volume size in bytes. """
try:
size = _tellEnd(lvm.lvPath(self.sdUUID, volUUID))
except IOError as e:
if e.errno == os.errno.ENOENT:
# Inactive volume has no /dev entry. Fallback to lvm way.
size = lvm.getLV(self.sdUUID, volUUID).size
else:
self.log.warn("Could not get size for vol %s/%s",
self.sdUUID, volUUID, exc_info=True)
raise
return int(size)
getVAllocSize = getVSize
def getLeasesFilePath(self):
# TODO: Determine the path without activating the LV
lvm.activateLVs(self.sdUUID, [sd.LEASES])
return lvm.lvPath(self.sdUUID, sd.LEASES)
def getIdsFilePath(self):
# TODO: Determine the path without activating the LV
lvm.activateLVs(self.sdUUID, [sd.IDS])
return lvm.lvPath(self.sdUUID, sd.IDS)
def extendVolume(self, volumeUUID, size, isShuttingDown=None):
with self._extendlock:
# FIXME: following line.
lvm.extendLV(self.sdUUID, volumeUUID, size) # , isShuttingDown)
@classmethod
def getMetaDataMapping(cls, vgName, oldMapping={}):
firstDev, firstExtent = lvm.getFirstExt(vgName, sd.METADATA)
firstExtent = int(firstExtent)
if firstExtent != 0:
cls.log.error("INTERNAL: metadata ext is not 0")
raise se.MetaDataMappingError("vg %s: metadata extent is not the "
"first extent" % vgName)
pvlist = list(lvm.listPVNames(vgName))
pvlist.remove(firstDev)
pvlist.insert(0, firstDev)
cls.log.info("Create: SORT MAPPING: %s" % pvlist)
mapping = {}
devNum = len(oldMapping)
for dev in pvlist:
knownDev = False
for pvID, oldInfo in oldMapping.iteritems():
if os.path.basename(dev) == oldInfo["guid"]:
mapping[pvID] = oldInfo
knownDev = True
break
if knownDev:
continue
pv = lvm.getPV(dev)
pvInfo = {}
pvInfo["guid"] = os.path.basename(pv.name)
pvInfo["uuid"] = pv.uuid
# this is another trick, it's not the
# the pestart value you expect, it's just
# 0, always
pvInfo["pestart"] = 0
pvInfo["pecount"] = pv.pe_count
if devNum == 0:
mapOffset = 0
else:
prevDevNum = devNum - 1
try:
prevInfo = mapping["PV%d" % (prevDevNum,)]
except KeyError:
prevInfo = oldMapping["PV%d" % (prevDevNum,)]
mapOffset = int(prevInfo["mapoffset"]) + \
int(prevInfo["pecount"])
pvInfo["mapoffset"] = mapOffset
mapping["PV%d" % devNum] = pvInfo
devNum += 1
return mapping
def updateMapping(self):
# First read existing mapping from metadata
with self._metadata.transaction():
mapping = self.getMetaDataMapping(self.sdUUID,
self.readMetadataMapping())
for key in set(self._metadata.keys() + mapping.keys()):
if DMDK_PV_REGEX.match(key):
if key in mapping:
self._metadata[key] = mapping[key]
else:
del self._metadata[key]
@classmethod
def metaSize(cls, vgroup):
''' Calc the minimal meta volume size in MB'''
# In any case the metadata volume cannot be less than 512MB for the
# case of 512 bytes per volume metadata, 2K for domain metadata and
# extent size of 128MB. In any case we compute the right size on line.
vg = lvm.getVG(vgroup)
minmetasize = (SD_METADATA_SIZE / sd.METASIZE * int(vg.extent_size) +
(1024 * 1024 - 1)) / (1024 * 1024)
metaratio = int(vg.extent_size) / sd.METASIZE
metasize = (int(vg.extent_count) * sd.METASIZE +
(1024 * 1024 - 1)) / (1024 * 1024)
metasize = max(minmetasize, metasize)
if metasize > int(vg.free) / (1024 * 1024):
raise se.VolumeGroupSizeError(
"volume group has not enough extents %s (Minimum %s), VG may "
"be too small" % (vg.extent_count,
(1024 * 1024) / sd.METASIZE))
cls.log.info("size %s MB (metaratio %s)" % (metasize, metaratio))
return metasize
def extend(self, devlist, force):
with self._extendlock:
if self.getVersion() in VERS_METADATA_LV:
mapping = self.readMetadataMapping().values()
if len(mapping) + len(devlist) > MAX_PVS:
raise se.StorageDomainIsMadeFromTooManyPVs()
knowndevs = set(multipath.getMPDevNamesIter())
unknowndevs = set(devlist) - knowndevs
if unknowndevs:
raise se.InaccessiblePhysDev(unknowndevs)
lvm.extendVG(self.sdUUID, devlist, force)
self.updateMapping()
newsize = self.metaSize(self.sdUUID)
lvm.extendLV(self.sdUUID, sd.METADATA, newsize)
def resizePV(self, guid):
with self._extendlock:
lvm.resizePV(self.sdUUID, guid)
self.updateMapping()
newsize = self.metaSize(self.sdUUID)
lvm.extendLV(self.sdUUID, sd.METADATA, newsize)
def getVolumeClass(self):
"""
Return a type specific volume generator object
"""
return blockVolume.BlockVolume
def _getImgExclusiveVols(self, imgUUID, volsImgs):
"""Filter vols belonging to imgUUID only."""
exclusives = dict((vName, v) for vName, v in volsImgs.iteritems()
if v.imgs[0] == imgUUID)
return exclusives
def _markForDelVols(self, sdUUID, imgUUID, volUUIDs, opTag):
"""
Mark volumes that will be zeroed or removed.
Mark for delete just in case that lvremove [lvs] success partialy.
Mark for zero just in case that zero process is interrupted.
Tagging is preferable to rename since it can be done in a single lvm
operation and is resilient to open LVs, etc.
"""
try:
lvm.changelv(sdUUID, volUUIDs,
(("-a", "y"),
("--deltag", blockVolume.TAG_PREFIX_IMAGE + imgUUID),
("--addtag", blockVolume.TAG_PREFIX_IMAGE +
opTag + imgUUID)))
except se.StorageException as e:
log.error("Can't activate or change LV tags in SD %s. "
"failing Image %s %s operation for vols: %s. %s",
sdUUID, imgUUID, opTag, volUUIDs, e)
raise
def _rmDCVolLinks(self, imgPath, volsImgs):
for vol in volsImgs:
lPath = os.path.join(imgPath, vol)
removedPaths = []
try:
os.unlink(lPath)
except OSError as e:
self.log.warning("Can't unlink %s. %s", lPath, e)
else:
removedPaths.append(lPath)
self.log.debug("removed: %s", removedPaths)
return tuple(removedPaths)
def rmDCImgDir(self, imgUUID, volsImgs):
imgPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES, imgUUID)
self._rmDCVolLinks(imgPath, volsImgs)
try:
os.rmdir(imgPath)
except OSError:
self.log.warning("Can't rmdir %s", imgPath, exc_info=True)
else:
self.log.debug("removed image dir: %s", imgPath)
return imgPath
def deleteImage(self, sdUUID, imgUUID, volsImgs):
toDel = self._getImgExclusiveVols(imgUUID, volsImgs)
self._markForDelVols(sdUUID, imgUUID, toDel, sd.REMOVED_IMAGE_PREFIX)
deleteVolumes(sdUUID, toDel)
self.rmDCImgDir(imgUUID, volsImgs)
def getAllVolumesImages(self):
"""
Return all the images that depend on a volume.
Return dicts:
vols = {volUUID: ([imgUUID1, imgUUID2], parentUUID)]}
for complete images.
remnants (same) for broken imgs, orphan volumes, etc.
"""
vols = {} # The "legal" volumes: not half deleted/removed volumes.
remnants = {} # Volumes which are part of failed image deletes.
allVols = getAllVolumes(self.sdUUID)
for volName, ip in allVols.iteritems():
if (volName.startswith(sd.REMOVED_IMAGE_PREFIX) or
ip.imgs[0].startswith(sd.REMOVED_IMAGE_PREFIX)):
remnants[volName] = ip
else:
# Deleted images are not dependencies of valid volumes.
images = [img for img in ip.imgs
if not img.startswith(sd.REMOVED_IMAGE_PREFIX)]
vols[volName] = sd.ImgsPar(images, ip.parent)
return vols, remnants
def getAllVolumes(self):
vols, rems = self.getAllVolumesImages()
return vols
def getAllImages(self):
"""
Get the set of all images uuids in the SD.
"""
vols = self.getAllVolumes() # {volName: ([imgs], parent)}
images = set()
for imgs, parent in vols.itervalues():
images.update(imgs)
return images
def refreshDirTree(self):
# create domain images folder
imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
fileUtils.createdir(imagesPath)
# create domain special volumes folder
domMD = os.path.join(self.domaindir, sd.DOMAIN_META_DATA)
fileUtils.createdir(domMD)
lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
for lvName in SPECIAL_LVS:
dst = os.path.join(domMD, lvName)
if not os.path.lexists(dst):
src = lvm.lvPath(self.sdUUID, lvName)
self.log.debug("Creating symlink from %s to %s", src, dst)
os.symlink(src, dst)
def refresh(self):
self.refreshDirTree()
lvm.invalidateVG(self.sdUUID)
self.replaceMetadata(selectMetadata(self.sdUUID))
_lvTagMetaSlotLock = threading.Lock()
@contextmanager
def acquireVolumeMetadataSlot(self, vol_name, slotSize):
# TODO: Check if the lock is needed when using
# getVolumeMetadataOffsetFromPvMapping()
with self._lvTagMetaSlotLock:
if self.getVersion() in VERS_METADATA_LV:
yield self._getVolumeMetadataOffsetFromPvMapping(vol_name)
else:
yield self._getFreeMetadataSlot(slotSize)
def _getVolumeMetadataOffsetFromPvMapping(self, vol_name):
dev, ext = lvm.getFirstExt(self.sdUUID, vol_name)
self.log.debug("vol %s dev %s ext %s" % (vol_name, dev, ext))
for pv in self.readMetadataMapping().values():
self.log.debug("MAPOFFSET: pv %s -- dev %s ext %s" %
(pv, dev, ext))
pestart = int(pv["pestart"])
pecount = int(pv["pecount"])
if (os.path.basename(dev) == pv["guid"] and
int(ext) in range(pestart, pestart + pecount)):
offs = int(ext) + int(pv["mapoffset"])
if offs < SD_METADATA_SIZE / sd.METASIZE:
raise se.MetaDataMappingError(
"domain %s: vol %s MD offset %s is bad - will "
"overwrite SD's MD" % (self.sdUUID, vol_name, offs))
return offs
raise se.MetaDataMappingError("domain %s: can't map PV %s ext %s" %
(self.sdUUID, dev, ext))
def _getFreeMetadataSlot(self, slotSize):
occupiedSlots = self._getOccupiedMetadataSlots()
# It might look weird skipping the sd metadata when it has been moved
# to tags. But this is here because domain metadata and volume metadata
# look the same. The domain might get confused and think it has lv
# metadata if it finds something is written in that area.
freeSlot = (SD_METADATA_SIZE + self.logBlkSize - 1) / self.logBlkSize
for offset, size in occupiedSlots:
if offset - freeSlot > slotSize:
break
freeSlot = offset + size
self.log.debug("Found freeSlot %s in VG %s", freeSlot, self.sdUUID)
return freeSlot
def _getOccupiedMetadataSlots(self):
stripPrefix = lambda s, pfx: s[len(pfx):]
occupiedSlots = []
for lv in lvm.getLV(self.sdUUID):
if lv.name in SPECIAL_LVS:
# Special LVs have no mapping
continue
offset = None
size = blockVolume.VOLUME_MDNUMBLKS
for tag in lv.tags:
if tag.startswith(blockVolume.TAG_PREFIX_MD):
offset = int(stripPrefix(tag, blockVolume.TAG_PREFIX_MD))
if tag.startswith(blockVolume.TAG_PREFIX_MDNUMBLKS):
size = int(stripPrefix(tag,
blockVolume.TAG_PREFIX_MDNUMBLKS))
if offset is not None and size != blockVolume.VOLUME_MDNUMBLKS:
# I've found everything I need
break
if offset is None:
self.log.warn("Could not find mapping for lv %s/%s",
self.sdUUID, lv.name)
continue
occupiedSlots.append((offset, size))
occupiedSlots.sort(key=itemgetter(0))
return occupiedSlots
def validateCreateVolumeParams(self, volFormat, srcVolUUID,
preallocate=None):
super(BlockStorageDomainManifest, self).validateCreateVolumeParams(
volFormat, srcVolUUID, preallocate=preallocate)
# Sparse-Raw not supported for block volumes
if preallocate == volume.SPARSE_VOL and volFormat == volume.RAW_FORMAT:
raise se.IncorrectFormat(volume.type2name(volFormat))
class BlockStorageDomain(sd.StorageDomain):
manifestClass = BlockStorageDomainManifest
def __init__(self, sdUUID):
manifest = self.manifestClass(sdUUID)
sd.StorageDomain.__init__(self, manifest)
lvm.activateLVs(self.sdUUID, SPECIAL_LVS)
self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA)
# Check that all devices in the VG have the same logical and physical
# block sizes.
lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize))
self.imageGarbageCollector()
self._registerResourceNamespaces()
self._lastUncachedSelftest = 0
@property
def logBlkSize(self):
return self._manifest.logBlkSize
@property
def phyBlkSize(self):
return self._manifest.phyBlkSize
def _registerResourceNamespaces(self):
"""
Register resources namespaces and create
factories for it.
"""
sd.StorageDomain._registerResourceNamespaces(self)
rmanager = rm.ResourceManager.getInstance()
# Register lvm activation resource namespace for the underlying VG
lvmActivationFactory = resourceFactories.LvmActivationFactory(
self.sdUUID)
lvmActivationNamespace = sd.getNamespace(self.sdUUID,
LVM_ACTIVATION_NAMESPACE)
try:
rmanager.registerNamespace(lvmActivationNamespace,
lvmActivationFactory)
except KeyError:
self.log.info("Resource namespace %s already registered",
lvmActivationNamespace)
@classmethod
def metaSize(cls, vgroup):
return cls.manifestClass.metaSize(vgroup)
@classmethod
def create(cls, sdUUID, domainName, domClass, vgUUID, storageType,
version):
""" Create new storage domain
'sdUUID' - Storage Domain UUID
'domainName' - storage domain name
'domClass' - Data/Iso
'vgUUID' - volume group UUID
'storageType' - NFS_DOMAIN, LOCALFS_DOMAIN, &etc.
'version' - DOMAIN_VERSIONS
"""
cls.log.info("sdUUID=%s domainName=%s domClass=%s vgUUID=%s "
"storageType=%s version=%s", sdUUID, domainName, domClass,
vgUUID, storageType, version)
if not misc.isAscii(domainName) and not sd.supportsUnicode(version):
raise se.UnicodeArgumentException()
if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE:
raise se.StorageDomainDescriptionTooLongError()
sd.validateDomainVersion(version)
vg = lvm.getVGbyUUID(vgUUID)
vgName = vg.name
if set((STORAGE_UNREADY_DOMAIN_TAG,)) != set(vg.tags):
raise se.VolumeGroupHasDomainTag(vgUUID)
try:
lvm.getLV(vgName)
raise se.StorageDomainNotEmpty(vgUUID)
except se.LogicalVolumeDoesNotExistError:
pass
numOfPVs = len(lvm.listPVNames(vgName))
if version in VERS_METADATA_LV and numOfPVs > MAX_PVS:
cls.log.debug("%d > %d", numOfPVs, MAX_PVS)
raise se.StorageDomainIsMadeFromTooManyPVs()
# Create metadata service volume
metasize = cls.metaSize(vgName)
lvm.createLV(vgName, sd.METADATA, "%s" % (metasize))
# Create the mapping right now so the index 0 is guaranteed
# to belong to the metadata volume. Since the metadata is at
# least SDMETADATA/METASIZE units, we know we can use the first
# SDMETADATA bytes of the metadata volume for the SD metadata.
# pass metadata's dev to ensure it is the first mapping
mapping = cls.getMetaDataMapping(vgName)
# Create the rest of the BlockSD internal volumes
for metaFile, metaSizeMb in sd.SPECIAL_VOLUME_SIZES_MIB.iteritems():
lvm.createLV(vgName, metaFile, metaSizeMb)
lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE)
# Create VMS file system
_createVMSfs(os.path.join("/dev", vgName, MASTERLV))
lvm.deactivateLVs(vgName, MASTERLV)
path = lvm.lvPath(vgName, sd.METADATA)
# Zero out the metadata and special volumes before use
try:
misc.ddCopy("/dev/zero", path, RESERVED_METADATA_SIZE)
path = lvm.lvPath(vgName, sd.INBOX)
misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE)
path = lvm.lvPath(vgName, sd.OUTBOX)
misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE)
except utils.ActionStopped:
raise
except se.StorageException:
raise se.VolumesZeroingError(path)
if version in VERS_METADATA_LV:
md = LvBasedSDMetadata(vgName, sd.METADATA)
elif version in VERS_METADATA_TAG:
md = TagBasedSDMetadata(vgName)
logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName)
# create domain metadata
# FIXME : This is 99% like the metadata in file SD
# Do we really need to keep the VGUUID?
# no one reads it from here anyway
initialMetadata = {
sd.DMDK_VERSION: version,
sd.DMDK_SDUUID: sdUUID,
sd.DMDK_TYPE: storageType,
sd.DMDK_CLASS: domClass,
sd.DMDK_DESCRIPTION: domainName,
sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
sd.DMDK_POOLS: [],
sd.DMDK_LOCK_POLICY: '',
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
sd.DMDK_LEASE_TIME_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LEASE_TIME_SEC],
sd.DMDK_IO_OP_TIMEOUT_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_IO_OP_TIMEOUT_SEC],
sd.DMDK_LEASE_RETRIES: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LEASE_RETRIES],
DMDK_VGUUID: vgUUID,
DMDK_LOGBLKSIZE: logBlkSize,
DMDK_PHYBLKSIZE: phyBlkSize,
}
initialMetadata.update(mapping)
md.update(initialMetadata)
# Mark VG with Storage Domain Tag
try:
lvm.replaceVGTag(vgName, STORAGE_UNREADY_DOMAIN_TAG,
STORAGE_DOMAIN_TAG)
except se.StorageException:
raise se.VolumeGroupUninitialized(vgName)
bsd = BlockStorageDomain(sdUUID)
bsd.initSPMlease()
return bsd
@classmethod
def getMetaDataMapping(cls, vgName, oldMapping={}):
return cls.manifestClass.getMetaDataMapping(vgName, oldMapping)
def extend(self, devlist, force):
self._manifest.extend(devlist, force)
def resizePV(self, guid):
self._manifest.resizePV(guid)
_lvTagMetaSlotLock = threading.Lock()
@contextmanager
def acquireVolumeMetadataSlot(self, vol_name, slotSize):
with self._manifest.acquireVolumeMetadataSlot(vol_name, slotSize) \
as slot:
yield slot
def readMetadataMapping(self):
return self._manifest.readMetadataMapping()
def getLeasesFileSize(self):
lv = lvm.getLV(self.sdUUID, sd.LEASES)
return int(lv.size)
def selftest(self):
"""
Run the underlying VG validation routine
"""
timeout = config.getint("irs", "repo_stats_cache_refresh_timeout")
now = time.time()
if now - self._lastUncachedSelftest > timeout:
self._lastUncachedSelftest = now
lvm.chkVG(self.sdUUID)
elif lvm.getVG(self.sdUUID).partial != lvm.VG_OK:
raise se.StorageDomainAccessError(self.sdUUID)
def validate(self):
"""
Validate that the storage domain metadata
"""
self.log.info("sdUUID=%s", self.sdUUID)
lvm.chkVG(self.sdUUID)
self.invalidateMetadata()
if not len(self.getMetadata()):
raise se.StorageDomainAccessError(self.sdUUID)
def invalidate(self):
"""
Make sure that storage domain is inaccessible.
1. Make sure master LV is not mounted
2. Deactivate all the volumes from the underlying VG
3. Destroy any possible dangling maps left in device mapper
"""
try:
self.unmountMaster()
except se.StorageDomainMasterUnmountError:
self.log.warning("Unable to unmount master LV during invalidateSD")
except se.CannotDeactivateLogicalVolume:
# It could be that at this point there is no LV, so just ignore it
pass
except Exception:
# log any other exception, but keep going
self.log.error("Unexpected error", exc_info=True)
# FIXME: remove this and make sure nothing breaks
try:
lvm.deactivateVG(self.sdUUID)
except Exception:
# log any other exception, but keep going
self.log.error("Unexpected error", exc_info=True)
fileUtils.cleanupdir(os.path.join("/dev", self.sdUUID))
@classmethod
def format(cls, sdUUID):
"""Format detached storage domain.
This removes all data from the storage domain.
"""
# Remove the directory tree
try:
domaindir = cls.findDomainPath(sdUUID)
except (se.StorageDomainDoesNotExist):
pass
else:
fileUtils.cleanupdir(domaindir, ignoreErrors=True)
# Remove special metadata and service volumes
# Remove all volumes LV if exists
_removeVMSfs(lvm.lvPath(sdUUID, MASTERLV))
try:
lvs = lvm.getLV(sdUUID)
except se.LogicalVolumeDoesNotExistError:
lvs = () # No LVs in this VG (domain)
for lv in lvs:
# Fix me: Should raise and get resource lock.
try:
lvm.removeLVs(sdUUID, lv.name)
except se.CannotRemoveLogicalVolume as e:
cls.log.warning("Remove logical volume failed %s/%s %s",
sdUUID, lv.name, str(e))
lvm.removeVG(sdUUID)
return True
def getInfo(self):
"""
Get storage domain info
"""
# self.log.info("sdUUID=%s", self.sdUUID)
# First call parent getInfo() - it fills in all the common details
info = sd.StorageDomain.getInfo(self)
# Now add blockSD specific data
vg = lvm.getVG(self.sdUUID) # vg.name = self.sdUUID
info['vguuid'] = vg.uuid
info['state'] = vg.partial
return info
def getStats(self):
"""
"""
vg = lvm.getVG(self.sdUUID)
vgMetadataStatus = metadataValidity(vg)
return dict(disktotal=vg.size, diskfree=vg.free,
mdasize=vg.vg_mda_size, mdafree=vg.vg_mda_free,
mdavalid=vgMetadataStatus['mdavalid'],
mdathreshold=vgMetadataStatus['mdathreshold'])
def rmDCImgDir(self, imgUUID, volsImgs):
return self._manifest.rmDCImgDir(imgUUID, volsImgs)
def zeroImage(self, sdUUID, imgUUID, volsImgs):
toZero = self._manifest._getImgExclusiveVols(imgUUID, volsImgs)
self._manifest._markForDelVols(sdUUID, imgUUID, toZero,
sd.ZEROED_IMAGE_PREFIX)
zeroImgVolumes(sdUUID, imgUUID, toZero)
self.rmDCImgDir(imgUUID, volsImgs)
def deactivateImage(self, imgUUID):
"""
Deactivate all the volumes belonging to the image.
imgUUID: the image to be deactivated.
If the image is based on a template image it should be expressly
deactivated.
"""
allVols = self.getAllVolumes()
volUUIDs = self._manifest._getImgExclusiveVols(imgUUID, allVols)
lvm.deactivateLVs(self.sdUUID, volUUIDs)
def linkBCImage(self, imgPath, imgUUID):
dst = self.getLinkBCImagePath(imgUUID)
self.log.debug("Creating symlink from %s to %s", imgPath, dst)
try:
os.symlink(imgPath, dst)
except OSError as e:
if e.errno == errno.EEXIST:
self.log.debug("path to image directory already exists: %s",
dst)
else:
self.log.error("Failed to create path to image directory: %s",
dst)
raise
return dst
def createImageLinks(self, srcImgPath, imgUUID, volUUIDs):
"""
qcow chain is build by reading each qcow header and reading the path
to the parent. When creating the qcow layer, we pass a relative path
which allows us to build a directory with links to all volumes in the
chain anywhere we want. This method creates a directory with the image
uuid under /var/run/vdsm and creates sym links to all the volumes in
the chain.
srcImgPath: Dir where the image volumes are.
"""
sdRunDir = os.path.join(constants.P_VDSM_STORAGE, self.sdUUID)
imgRunDir = os.path.join(sdRunDir, imgUUID)
fileUtils.createdir(imgRunDir)
for volUUID in volUUIDs:
srcVol = os.path.join(srcImgPath, volUUID)
dstVol = os.path.join(imgRunDir, volUUID)
self.log.debug("Creating symlink from %s to %s", srcVol, dstVol)
try:
os.symlink(srcVol, dstVol)
except OSError as e:
if e.errno == errno.EEXIST:
self.log.debug("img run vol already exists: %s", dstVol)
else:
self.log.error("Failed to create img run vol: %s", dstVol)
raise
return imgRunDir
def activateVolumes(self, imgUUID, volUUIDs):
"""
Activate all the volumes belonging to the image.
imgUUID: the image to be deactivated.
allVols: getAllVolumes result.
If the image is based on a template image it will be activated.
"""
lvm.activateLVs(self.sdUUID, volUUIDs)
vgDir = os.path.join("/dev", self.sdUUID)
return self.createImageLinks(vgDir, imgUUID, volUUIDs)
def getVolumeLease(self, imgUUID, volUUID):
"""
Return the volume lease (leasePath, leaseOffset)
"""
if self.hasVolumeLeases():
# TODO: use the sanlock specific offset when present
leaseSlot = self.produceVolume(imgUUID, volUUID).getMetaOffset()
leaseOffset = ((leaseSlot + blockVolume.RESERVED_LEASES) *
self.logBlkSize * sd.LEASE_BLOCKS)
return self.getLeasesFilePath(), leaseOffset
return None, None
def validateMasterMount(self):
return mount.isMounted(self.getMasterDir())
def mountMaster(self):
"""
Mount the master metadata file system. Should be called only by SPM.
"""
lvm.activateLVs(self.sdUUID, MASTERLV)
masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
fileUtils.createdir(masterDir)
masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV)
cmd = [constants.EXT_FSCK, "-p", masterfsdev]
(rc, out, err) = misc.execCmd(cmd, sudo=True,
deathSignal=signal.SIGKILL)
# fsck exit codes
# 0 - No errors
# 1 - File system errors corrected
# 2 - File system errors corrected, system should
# be rebooted
# 4 - File system errors left uncorrected
# 8 - Operational error
# 16 - Usage or syntax error
# 32 - E2fsck canceled by user request
# 128 - Shared library error
if rc == 1 or rc == 2:
# rc is a number
self.log.info("fsck corrected fs errors (%s)", rc)
if rc >= 4:
raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc)
# TODO: Remove when upgrade is only from a version which creates ext3
# Try to add a journal - due to unfortunate circumstances we exposed
# to the public the code that created ext2 file system instead of ext3.
# In order to make up for it we are trying to add journal here, just
# to be sure (and we have fixed the file system creation).
# If there is a journal already tune2fs will do nothing, indicating
# this condition only with exit code. However, we do not really care.
cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev]
misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL)
masterMount = mount.Mount(masterfsdev, masterDir)
try:
masterMount.mount(vfstype=mount.VFS_EXT3)
except mount.MountError as ex:
rc, out = ex
raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out)
cmd = [constants.EXT_CHOWN, "%s:%s" %
(constants.METADATA_USER, constants.METADATA_GROUP), masterDir]
(rc, out, err) = misc.execCmd(cmd, sudo=True)
if rc != 0:
self.log.error("failed to chown %s", masterDir)
@classmethod
def __handleStuckUmount(cls, masterDir):
umountPids = utils.pgrep("umount")
try:
masterMount = mount.getMountFromTarget(masterDir)
except OSError as ex:
if ex.errno == errno.ENOENT:
return
raise
for umountPid in umountPids:
try:
state = utils.pidStat(umountPid).state
mountPoint = utils.getCmdArgs(umountPid)[-1]
except:
# Process probably exited
continue
if mountPoint != masterDir:
continue
if state != "D":
# If the umount is not in d state there
# is a possibility that the world might
# be in flux and umount will get stuck
# in an unkillable state that is not D
# which I don't know about, perhaps a
# bug in umount will cause umount to
# wait for something unrelated that is
# not the syscall. Waiting on a process
# which is not your child is race prone
# I will just call for another umount
# and wait for it to finish. That way I
# know that a umount ended.
try:
masterMount.umount()
except mount.MountError:
# timeout! we are stuck again.
# if you are here spmprotect forgot to
# reboot the machine but in any case
# continue with the disconnection.
pass
try:
vgName = masterDir.rsplit("/", 2)[1]
masterDev = os.path.join(
"/dev/mapper", vgName.replace("-", "--") + "-" + MASTERLV)
except KeyError:
# Umount succeeded after all
return
cls.log.warn("master mount resource is `%s`, trying to disconnect "
"underlying storage", masterDev)
iscsi.disconnectFromUndelyingStorage(masterDev)
@classmethod
def doUnmountMaster(cls, masterdir):
"""
Unmount the master metadata file system. Should be called only by SPM.
"""
# fuser processes holding mount point and validate that the umount
# succeeded
cls.__handleStuckUmount(masterdir)
try:
masterMount = mount.getMountFromTarget(masterdir)
except OSError as ex:
if ex.errno == errno.ENOENT:
return
raise
if masterMount.isMounted():
# Try umount, take 1
try:
masterMount.umount()
except mount.MountError:
# umount failed, try to kill that processes holding mount point
svdsmp = svdsm.getProxy()
pids = svdsmp.fuser(masterMount.fs_file, mountPoint=True)
# It was unmounted while I was checking no need to do anything
if not masterMount.isMounted():
return
if len(pids) == 0:
cls.log.warn("Unmount failed because of errors that fuser "
"can't solve")
else:
for pid in pids:
try:
cls.log.debug("Trying to kill pid %d", pid)
os.kill(pid, signal.SIGKILL)
except OSError as e:
if e.errno == errno.ESRCH: # No such process
pass
elif e.errno == errno.EPERM: # Op. not permitted
cls.log.warn("Could not kill pid %d because "
"operation was not permitted",
pid)
else:
cls.log.warn("Could not kill pid %d because an"
" unexpected error",
exc_info=True)
except:
cls.log.warn("Could not kill pid %d because an "
"unexpected error", exc_info=True)
# Try umount, take 2
try:
masterMount.umount()
except mount.MountError:
pass
if masterMount.isMounted():
# We failed to umount masterFS
# Forcibly rebooting the SPM host would be safer. ???
raise se.StorageDomainMasterUnmountError(masterdir, 1)
def unmountMaster(self):
"""
Unmount the master metadata file system. Should be called only by SPM.
"""
masterdir = os.path.join(self.domaindir, sd.MASTER_FS_DIR)
self.doUnmountMaster(masterdir)
# It is time to deactivate the master LV now
lvm.deactivateLVs(self.sdUUID, MASTERLV)
def extendVolume(self, volumeUUID, size, isShuttingDown=None):
return self._manifest.extendVolume(volumeUUID, size, isShuttingDown)
@staticmethod
def findDomainPath(sdUUID):
try:
vg = lvm.getVG(sdUUID)
except se.VolumeGroupDoesNotExist:
raise se.StorageDomainDoesNotExist(sdUUID)
if _isSD(vg):
return vg.name
raise se.StorageDomainDoesNotExist(sdUUID)
def _createVMSfs(dev):
"""
Create a special file system to store VM data
"""
cmd = [constants.EXT_MKFS, "-q", "-j", "-E", "nodiscard", dev]
rc = misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL)[0]
if rc != 0:
raise se.MkfsError(dev)
def _removeVMSfs(dev):
"""
Destroy special VM data file system
"""
# XXX Add at least minimal sanity check:. i.e. fs not mounted
pass
def _isSD(vg):
return STORAGE_DOMAIN_TAG in vg.tags
def findDomain(sdUUID):
return BlockStorageDomain(BlockStorageDomain.findDomainPath(sdUUID))
def getStorageDomainsList():
return [vg.name for vg in lvm.getAllVGs() if _isSD(vg)]
|
germanovm/vdsm
|
vdsm/storage/blockSD.py
|
Python
|
gpl-2.0
| 52,116
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import SpellCheckClientConfiguration
from ._spell_check_client import SpellCheckClient
__all__ = ['SpellCheckClient', 'SpellCheckClientConfiguration']
from .version import VERSION
__version__ = VERSION
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-language-spellcheck/azure/cognitiveservices/language/spellcheck/__init__.py
|
Python
|
mit
| 700
|
# This file is generated by /private/var/folders/my/m6ynh3bn6tq06h7xr3js0z7r0000gn/T/pip-uot8AB-build/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_mkl_info={}
lapack_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3)], 'extra_compile_args': ['-msse3']}
blas_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3)], 'extra_compile_args': ['-msse3', '-I/System/Library/Frameworks/vecLib.framework/Headers']}
atlas_blas_threads_info={}
openblas_info={}
atlas_info={}
lapack_mkl_info={}
mkl_info={}
atlas_blas_info={}
atlas_threads_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/scipy/__config__.py
|
Python
|
mit
| 1,221
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^$', views.index),
)
|
hydai/HydaiNoWebsite
|
index/urls.py
|
Python
|
mit
| 115
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CentralStudentInfo.nickname'
db.add_column('infocenter_centralstudentinfo', 'nickname', self.gf('django.db.models.fields.CharField')(default=u'', max_length=32, blank=True), keep_default=False)
# Adding field 'CentralStudentInfo.sign_line'
db.add_column('infocenter_centralstudentinfo', 'sign_line', self.gf('django.db.models.fields.CharField')(default=u'', max_length=128, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'CentralStudentInfo.nickname'
db.delete_column('infocenter_centralstudentinfo', 'nickname')
# Deleting field 'CentralStudentInfo.sign_line'
db.delete_column('infocenter_centralstudentinfo', 'sign_line')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'classes.logicalclass': {
'Meta': {'object_name': 'LogicalClass'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.Major']"}),
'seq': ('django.db.models.fields.IntegerField', [], {})
},
'classes.major': {
'Meta': {'object_name': 'Major'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'infocenter.centralstudentinfo': {
'Meta': {'object_name': 'CentralStudentInfo'},
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'english_band_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'english_band_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'ethnic': ('django.db.models.fields.IntegerField', [], {}),
'health': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64', 'blank': 'True'}),
'high_school': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'hobby': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'join_date': ('django.db.models.fields.DateField', [], {}),
'klass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.LogicalClass']", 'null': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'nickname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'political': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sign_line': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'central_info'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['infocenter']
|
team-xue/xue
|
xue/infocenter/migrations/0003_add_ident.py
|
Python
|
bsd-3-clause
| 6,887
|
from gogp_s import GOGP_S
from util import Util as U
import pickle
import sklearn.preprocessing as skpre
from sklearn.datasets import load_svmlight_file
datadir = '../datasets/'
dataset = 'abalone.shuffle_2'
filename = datadir+dataset+'.txt'
xxTrain, yyTrain = load_svmlight_file(filename)
xxTrain = xxTrain.toarray()
yyTrain = yyTrain.reshape(yyTrain.shape[0], 1)
# print xxTrain.shape
# print yyTrain.shape
# min_max_scaler = skpre.MinMaxScaler(feature_range=(-1, 1))
# xxTrain = min_max_scaler.fit_transform(xxTrain)
learner = GOGP_S(theta=0.9095, gamma=0.02, lbd=0.02, percent_batch=0.1)
learner.fit_online_delay(xxTrain, yyTrain)
print 'RMSE (Online):', learner.final_rmse
print 'Training time:', learner.online_time
print 'save report ...'
learner.X = None
pickle.dump(learner, open("log/" + dataset + U.get_string_time('.log.p'), "wb" ))
|
khanhndk/GoGP
|
gogp-py/test_abalone.py
|
Python
|
gpl-3.0
| 852
|
#!/usr/bin/env python
"""This module contains tests for output plugins-related API renderers."""
from grr.gui import api_test_lib
from grr.lib import flags
from grr.lib import output_plugin
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.output_plugins import csv_plugin
from grr.lib.output_plugins import email_plugin
class ApiOutputPluginsListRendererRegressionTest(
api_test_lib.ApiCallRendererRegressionTest):
"""Regression test for ApiOutputPluginsListRenderer."""
renderer = "ApiOutputPluginsListRenderer"
def Run(self):
with utils.Stubber(output_plugin.OutputPlugin, "classes", {
"EmailOutputPlugin": email_plugin.EmailOutputPlugin,
"CSVOutputPlugin": csv_plugin.CSVOutputPlugin
}):
self.Check("GET", "/api/output-plugins/all")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
darrenbilby/grr
|
gui/api_plugins/output_plugin_test.py
|
Python
|
apache-2.0
| 899
|
#! /usr/bin/env python
# Sunset Clock for condo
# Copyleft 2014 (8/25/14 2200) Mark Fink -- mastermwf@gmail.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Reminder: The program is free. The images you use may be copyrighted.
# Ver. 0.1.0
import ephem
import datetime
import Tkinter as tk # Tkinter for Python 2.7
# import tkinter as tk # tkinter for Python 3.4
from Tkinter import *
# from tkinter import *
hel65B = ('Helvetica', 65, 'bold') # substitute your favorite font here for insertion in line 64
hel80 = ('Helvetica', 80) # defines font
san65 = ('Sans', 65)
now = datetime.datetime.now() #get current time
GOM=ephem.Observer()
GOM.horizon = 0
GOM.lat='29.1266667' # Insert your Latitude here. Positive numbers for North
GOM.lon='-90.2166667' # Insert your Longitude here. Negative numbers for West
GOM.elevation = 24 # meters
GOM.date = now
sun = ephem.Sun()
def countdown(label):
def clock():
nextSet = ephem.localtime(GOM.next_setting(sun)) # This is the line that can be modified to get sunrise, moonset, meridian passage, etc
remaining = nextSet-datetime.datetime.now()
hours = int(remaining.seconds) // 3600
minutes = int(remaining.seconds % 3600) // 60
seconds = int(remaining.seconds % 60)
displayFormat = 'Sunset in {}:{}:{}'.format(hours, format(minutes, '02d'), format(seconds, '02d'))
label.config(text= '\n \n \n \n \n {}'.format(displayFormat)) # adjust newlines '\n' to fit your display
label.after(1000, clock)
clock()
root = tk.Tk() # creates the working window
root.title("Sunset Clock") # text in title bar
logo = PhotoImage(file="clock1.ppm") # Insert path to your favorite sunset photo. Resolution of photo determines window size.
label = tk.Label(root, compound=CENTER, font=hel65B, image=logo, fg="white") # centers text, adjusts font, inserts background photo, choice of text color
countdown(label) # runs the counting routine
label.pack() # displays the label with text on top of the image
root.mainloop()
|
mastermwf/SunsetClock.py
|
SunsetClock.py
|
Python
|
gpl-2.0
| 2,666
|
"""Unit tests for the module create-your-company."""
import datetime
import unittest
from unittest import mock
from bob_emploi.common.python import now
from bob_emploi.frontend.api import user_pb2
from bob_emploi.frontend.server.test import base_test
from bob_emploi.frontend.server.test import scoring_test
class AdviceCreateYourCompanyTestCase(scoring_test.ScoringModelTestBase):
"""Unit tests for the "Create your company" scoring model."""
model_id = 'advice-create-your-company'
def test_atypic_profile(self) -> None:
"""Test the scoring function before the events with an atypic profile."""
persona = self._random_persona().clone()
persona.user_profile.frustrations.append(user_pb2.ATYPIC_PROFILE)
score = self._score_persona(persona)
self.assertEqual(2, score, msg=f'Fail for "{persona.name}"')
def test_not_really_needed_yet(self) -> None:
"""Test the scoring function for someone that has just started their search."""
persona = self._random_persona().clone()
self.now = datetime.datetime(2018, 1, 25)
del persona.user_profile.frustrations[:]
persona.project.job_search_has_not_started = False
persona.project.job_search_started_at.FromDatetime(datetime.datetime(2018, 12, 14))
score = self._score_persona(persona)
self.assertEqual(1, score, msg=f'Fail for "{persona.name}"')
class EndpointTestCase(base_test.ServerTestCase):
"""Unit tests for the project/.../create-your-company endpoint."""
def setUp(self) -> None:
super().setUp()
self._db.advice_modules.insert_one({
'adviceId': 'create-your-company',
'triggerScoringModel': 'advice-create-your-company',
'extraDataFieldName': 'create_your_company_data',
'isReadyForProd': True,
})
def test_close_to_city_with_events(self) -> None:
"""Test close to a city with multiple events."""
self._db.cities.insert_one({
'_id': '69266',
'latitude': 45.7667,
'longitude': 4.88333,
})
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Work as a freelance',
'cityName': 'Lyon',
},
{
'title': 'Entrepreneur in Paris',
'cityName': 'Paris',
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual('Lyon', data.get('closeByEvents', {}).get('city'))
self.assertEqual(
['Create your company', 'Work as a freelance'],
[event.get('title') for event in data.get('closeByEvents', {}).get('events')])
def test_related_testimonials(self) -> None:
"""Test when testimonials related to the user's project exist."""
self._db.adie_testimonials.insert_many([
{
'author_name': 'Bob',
'author_job_name': 'coach',
'link': 'www.here.org',
'image_link': 'www.image.org',
'description': 'I will help you',
'filters': [],
'preferred_job_group_ids': ['A1', 'B2'],
},
{
'author_name': 'Bill',
'author_job_name': 'witch',
'link': 'www.away.org',
'image_link': 'www.no-image.org',
'description': 'I will put a spell on you',
'filters': [],
'preferred_job_group_ids': ['A2', 'B1'],
},
{
'author_name': 'Lola',
'author_job_name': 'driver',
'link': 'www.there.org',
'image_link': 'www.this-image.org',
'description': 'I will try to help you',
'filters': [],
'preferred_job_group_ids': ['A12', 'B3'],
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"targetJob": {"jobGroup": {"romeId": "A1234"}}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual(2, len(data.get('relatedTestimonials', []).get('testimonials', [])))
self.assertEqual(
['Bob', 'Lola'],
[testimonial.get('authorName') for testimonial in data.get(
'relatedTestimonials', []).get('testimonials', [])])
def test_far_from_any_city_with_events(self) -> None:
"""Test far from any city with events."""
self._db.cities.insert_one({
'_id': '67462',
# Sélestat: closer to Dijon than to Lyon.
'latitude': 48.2667,
'longitude': 7.45,
})
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Entrepreneur in Dijon',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "67462"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
['Entrepreneur in Dijon', 'Create your company'],
[event.get('title') for event in data.get('closeByEvents', {}).get('events')])
def test_no_location(self) -> None:
"""Test city without no coordinates."""
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Entrepreneur in Dijon',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
{'Entrepreneur in Dijon', 'Create your company'},
{event.get('title') for event in data.get('closeByEvents', {}).get('events')})
def test_no_events(self) -> None:
"""Test without any events."""
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertFalse(data.get('closeByEvents'))
@mock.patch(now.__name__ + '.get', mock.MagicMock(
return_value=datetime.datetime(2018, 5, 9)))
def test_start_date(self) -> None:
"""Test events with start dates."""
self._db.adie_events.insert_many([
{
'title': 'Past date',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
'startDate': '2018-05-02',
},
{
'title': 'No date',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
{
'title': 'Today',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
'startDate': '2018-05-09',
},
{
'title': 'Future date',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
'startDate': '2018-06-01',
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
{'No date', 'Today', 'Future date'},
{event.get('title') for event in data.get('closeByEvents', {}).get('events')})
if __name__ == '__main__':
unittest.main()
|
bayesimpact/bob-emploi
|
frontend/server/modules/test/create_your_company_test.py
|
Python
|
gpl-3.0
| 9,054
|
#!/usr/bin/python
__author__ = 'kalcho'
from burp import IBurpExtender
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.util import List, ArrayList
from java.net import URL
import re
from datetime import datetime
from HTMLParser import HTMLParser
class TagStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.page_text = []
def handle_data(self, data):
self.page_text.append(data)
def handle_comment(self, data):
self.handle_data(data)
def strip(self, html):
self.feed(html)
return " ".join(self.page_text)
class BurpExtender(IBurpExtender, IContextMenuFactory):
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self.context = None
self.hosts = set()
# Start with something we know is common
self.wordlist = set(["password"])
# we set up our extension
callbacks.setExtensionName("BHP Wordlist")
callbacks.registerContextMenuFactory(self)
return
def createMenuItems(self, context_menu):
self.context = context_menu
menu_list = ArrayList()
menu_list.add(JMenuItem("Create Wordlist",
actionPerformed=self.wordlist_menu))
return menu_list
def wordlist_menu(self, event):
# grab the details of what the user clicked
http_traffic = self.context.getSelectedMessages()
for traffic in http_traffic:
http_service = traffic.getHttpService()
host = http_service.getHost()
self.hosts.add(host)
http_response = traffic.getResponse()
if http_response:
self.get_words(http_response)
self.display_wordlist()
return
def get_words(self, http_response):
headers, body = http_response.tostring().split('\r\n\r\n', 1)
# skip non-text responses
if headers.lower().find("content-type: text") == -1:
return
tag_stripper = TagStripper()
page_text = tag_stripper.strip(body)
words = re.findall("[a-zA-Z]\w{2,}", page_text)
for word in words:
# filter out long strings
if len(word) <= 12:
self.wordlist.add(word.lower())
return
def mangle(self, word):
year = datetime.now().year
suffixes = ["", "1", "!", year]
mangled = []
for password in (word, word.capitalize()):
for suffix in suffixes:
mangled.append("%s%s" % (password, suffix))
return mangled
def display_wordlist(self):
print "#!comment: BHP Wordlist for site(s) %s" % ", ".join(self.hosts)
for word in sorted(self.wordlist):
for password in self.mangle(word):
print password
return
|
kalcho83/black-hat-python
|
bhp_wordlist.py
|
Python
|
gpl-3.0
| 3,139
|
from django.shortcuts import render
from widgets.models import BackgroundImages, Widget
def home(request):
context = {}
return render(request, 'home.html', context)
def personalized(request):
top_widgets = Widget.objects.all()
featured_widgets = Widget.objects.all()
backgrounds = BackgroundImages.objects.all()
user = request.user
context = {'top_widgets': top_widgets, 'featured_widgets': featured_widgets, 'backgrounds': backgrounds, 'user': user}
return render(request, 'personalized.html', context)
|
malikshahzad228/widget-jack
|
widgets/views.py
|
Python
|
mit
| 543
|
import numpy as np
from keras.layers import containers
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, MaxoutDense, Activation
from keras.optimizers import SGD, RMSprop, Adagrad, Adam
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
# import matplotlib.pyplot as plt
# import matplotlib.animation as animation
# from matplotlib.colors import LinearSegmentedColormap
# from matplotlib.colors import LogNorm
# class DrawWeights(keras.callbacks.Callback):
# def __init__(self, figsize, layer_id=0, param_id=0, weight_slice=(slice(None), 0)):
# self.layer_id = layer_id
# self.param_id = param_id
# self.weight_slice = weight_slice
# # Initialize the figure and axis
# self.fig = plt.figure(figsize=figsize)
# self.ax = self.fig.add_subplot(1, 1, 1)
# def on_train_begin(self):
# self.imgs = []
# def on_batch_end(self, batch, indices, loss, accuracy):
# # Get a snapshot of the weight matrix every 5 batches
# if batch % 5 == 0:
# # Access the full weight matrix
# weights = self.model.layers[self.layer_id].params[self.param_id].get_value()
# # Create the frame and add it to the animation
# img = self.ax.imshow(weights[self.weight_slice], interpolation='nearest')
# self.imgs.append(img)
# def on_train_end(self):
# # Once the training has ended, display the animation
# anim = animation.ArtistAnimation(self.fig, self.imgs, interval=10, blit=False)
# plt.show()
train = np.load('./wprime800_QCD200-600_train.npz')
test = np.load('./wprime800_QCD200-600_test.npz')
weights = np.load('./wprime800_QCD200-600_train_weights.npz')['weights']
# -- build the model
dl = Sequential()
dl.add(Dense(625, 500, W_regularizer=l2(0.0001)))
dl.add(Activation('relu'))
dl.add(Dropout(0.1))
dl.add(Dense(500, 256, W_regularizer=l2(0.0001)))
dl.add(Activation('relu'))
dl.add(Dropout(0.1))
dl.add(Dense(256, 128, W_regularizer=l2(0.0001)))
dl.add(Activation('relu'))
dl.add(Dropout(0.1))
dl.add(Dense(128, 64, W_regularizer=l2(0.0001)))
dl.add(Activation('tanh'))
dl.add(Dropout(0.1))
dl.add(Dense(64, 25))
dl.add(Activation('tanh'))
dl.add(Dropout(0.1))
dl.add(Dense(25, 1))
dl.add(Activation('sigmoid'))
dl.compile(loss='binary_crossentropy', optimizer=Adam(), class_mode='binary')
# -- train!
dl.fit(train['X'], train['y'], validation_data=(test['X'], test['y']),
batch_size=256,
nb_epoch=100,
callbacks=[
EarlyStopping(verbose=True, patience=2)
],
sample_weight=weights,
show_accuracy=True,
verbose=2)
with open('deepjets.yaml') as f:
f.write(dl.to_yaml())
dl.save_weights('deepjets.h5')
|
ml-slac/deep-jets
|
train.py
|
Python
|
mit
| 2,806
|
from pycp2k.inputsection import InputSection
class _move_type2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Size = None
self.Prob = None
self.Init_acc_prob = None
self.Atoms = []
self._name = "MOVE_TYPE"
self._keywords = {'Prob': 'PROB', 'Init_acc_prob': 'INIT_ACC_PROB', 'Size': 'SIZE'}
self._repeated_keywords = {'Atoms': 'ATOMS'}
self._attributes = ['Section_parameters']
|
SINGROUP/pycp2k
|
pycp2k/classes/_move_type2.py
|
Python
|
lgpl-3.0
| 516
|
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import timedelta
from django import forms
from django.db.models import Q
from django.db.models.sql.constants import QUERY_TERMS
from django.utils import six
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .fields import (
RangeField, LookupTypeField, Lookup, DateRangeField, TimeRangeField)
__all__ = [
'Filter', 'CharFilter', 'BooleanFilter', 'ChoiceFilter',
'TypedChoiceFilter', 'MultipleChoiceFilter', 'DateFilter',
'DateTimeFilter', 'TimeFilter', 'ModelChoiceFilter',
'ModelMultipleChoiceFilter', 'NumberFilter', 'NumericRangeFilter', 'RangeFilter',
'DateRangeFilter', 'DateFromToRangeFilter', 'TimeRangeFilter',
'AllValuesFilter', 'MethodFilter'
]
LOOKUP_TYPES = sorted(QUERY_TERMS)
class Filter(object):
creation_counter = 0
field_class = forms.Field
def __init__(self, name=None, label=None, widget=None, action=None,
lookup_type='exact', required=False, distinct=False, exclude=False, **kwargs):
self.name = name
self.label = label
if action:
self.filter = action
self.lookup_type = lookup_type
self.widget = widget
self.required = required
self.extra = kwargs
self.distinct = distinct
self.exclude = exclude
self.creation_counter = Filter.creation_counter
Filter.creation_counter += 1
@property
def field(self):
if not hasattr(self, '_field'):
help_text = self.extra.pop('help_text', None)
if help_text is None:
help_text = _('This is an exclusion filter') if self.exclude else _('Filter')
if (self.lookup_type is None or
isinstance(self.lookup_type, (list, tuple))):
if self.lookup_type is None:
lookup = [(x, x) for x in LOOKUP_TYPES]
else:
lookup = [
(x, x) for x in LOOKUP_TYPES if x in self.lookup_type]
self._field = LookupTypeField(self.field_class(
required=self.required, widget=self.widget, **self.extra),
lookup, required=self.required, label=self.label, help_text=help_text)
else:
self._field = self.field_class(required=self.required,
label=self.label, widget=self.widget,
help_text=help_text, **self.extra)
return self._field
def filter(self, qs, value):
if isinstance(value, Lookup):
lookup = six.text_type(value.lookup_type)
value = value.value
else:
lookup = self.lookup_type
if value in ([], (), {}, None, ''):
return qs
method = qs.exclude if self.exclude else qs.filter
qs = method(**{'%s__%s' % (self.name, lookup): value})
if self.distinct:
qs = qs.distinct()
return qs
class CharFilter(Filter):
field_class = forms.CharField
class BooleanFilter(Filter):
field_class = forms.NullBooleanField
def filter(self, qs, value):
if value is not None:
return qs.filter(**{self.name: value})
return qs
class ChoiceFilter(Filter):
field_class = forms.ChoiceField
class TypedChoiceFilter(Filter):
field_class = forms.TypedChoiceField
class MultipleChoiceFilter(Filter):
"""
This filter preforms OR(by default) or AND(using conjoined=True) query
on the selected options.
Advanced Use
------------
Depending on your application logic, when all or no choices are selected, filtering may be a noop. In this case you may wish to avoid the filtering overhead, particularly of the `distinct` call.
Set `always_filter` to False after instantiation to enable the default `is_noop` test.
Override `is_noop` if you require a different test for your application.
"""
field_class = forms.MultipleChoiceField
always_filter = True
def __init__(self, *args, **kwargs):
conjoined = kwargs.pop('conjoined', False)
self.conjoined = conjoined
super(MultipleChoiceFilter, self).__init__(*args, **kwargs)
def is_noop(self, qs, value):
"""
Return True to short-circuit unnecessary and potentially slow filtering.
"""
if self.always_filter:
return False
# A reasonable default for being a noop...
if self.required and len(value) == len(self.field.choices):
return True
return False
def filter(self, qs, value):
value = value or () # Make sure we have an iterable
if self.is_noop(qs, value):
return qs
# Even though not a noop, no point filtering if empty
if not value:
return qs
if self.conjoined:
for v in value:
qs = qs.filter(**{self.name: v})
return qs
q = Q()
for v in value:
q |= Q(**{self.name: v})
return qs.filter(q).distinct()
class DateFilter(Filter):
field_class = forms.DateField
class DateTimeFilter(Filter):
field_class = forms.DateTimeField
class TimeFilter(Filter):
field_class = forms.TimeField
class ModelChoiceFilter(Filter):
field_class = forms.ModelChoiceField
class ModelMultipleChoiceFilter(MultipleChoiceFilter):
field_class = forms.ModelMultipleChoiceField
class NumberFilter(Filter):
field_class = forms.DecimalField
class NumericRangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start and value.stop:
lookup = '%s__%s' % (self.name, self.lookup_type)
return qs.filter(**{lookup: (value.start, value.stop)})
else:
if value.start:
qs = qs.filter(**{'%s__startswith' % self.name: value.start})
if value.stop:
qs = qs.filter(**{'%s__endswith' % self.name: value.stop})
return qs
class RangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start and value.stop:
lookup = '%s__range' % self.name
return qs.filter(**{lookup: (value.start, value.stop)})
else:
if value.start:
qs = qs.filter(**{'%s__gte'%self.name:value.start})
if value.stop:
qs = qs.filter(**{'%s__lte'%self.name:value.stop})
return qs
_truncate = lambda dt: dt.replace(hour=0, minute=0, second=0)
class DateRangeFilter(ChoiceFilter):
options = {
'': (_('Any date'), lambda qs, name: qs.all()),
1: (_('Today'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
3: (_('This month'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
})),
4: (_('This year'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
})),
5: (_('Yesterday'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: (now() - timedelta(days=1)).day,
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(DateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.name)
class DateFromToRangeFilter(RangeFilter):
field_class = DateRangeField
class TimeRangeFilter(RangeFilter):
field_class = TimeRangeField
class AllValuesFilter(ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.name).values_list(self.name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super(AllValuesFilter, self).field
class MethodFilter(Filter):
"""
This filter will allow you to run a method that exists on the filterset class
"""
def __init__(self, *args, **kwargs):
# Get the action out of the kwargs
action = kwargs.get('action', None)
# If the action is a string store the action and set the action to our own filter method
# so it can be backwards compatible and work as expected, the parent will still treat it as
# a filter that has an action
self.parent_action = ''
text_types = (str, six.text_type)
if type(action) in text_types:
self.parent_action = str(action)
kwargs.update({
'action': self.filter
})
# Call the parent
super(MethodFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
"""
This filter method will act as a proxy for the actual method we want to
call.
It will try to find the method on the parent filterset,
if not it attempts to search for the method `field_{{attribute_name}}`.
Otherwise it defaults to just returning the queryset.
"""
parent = getattr(self, 'parent', None)
parent_filter_method = getattr(parent, self.parent_action, None)
if not parent_filter_method:
func_str = 'filter_{0}'.format(self.name)
parent_filter_method = getattr(parent, func_str, None)
if parent_filter_method is not None:
return parent_filter_method(qs, value)
return qs
|
koirikivi/django-filter
|
django_filters/filters.py
|
Python
|
bsd-3-clause
| 10,135
|
"""
Resource relationship class.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Apr 12, 2013.
"""
from everest.relationship import RELATIONSHIP_DIRECTIONS
from everest.relationship import Relationship
__docformat__ = 'reStructuredText en'
__all__ = ['ResourceRelationship',
]
class ResourceRelationship(Relationship):
"""
Relationship between resources.
"""
def __init__(self, relator, descriptor,
direction=RELATIONSHIP_DIRECTIONS.BIDIRECTIONAL):
Relationship.__init__(self, relator, descriptor, direction)
self.__domain_relationship = None
@property
def domain_relationship(self):
"""
Returns a domain relationship equivalent with this resource
relationship.
"""
if self.__domain_relationship is None:
ent = self.relator.get_entity()
self.__domain_relationship = \
self.descriptor.make_relationship(ent)
return self.__domain_relationship
def add(self, related, direction=None, safe=False):
self.domain_relationship.add(related.get_entity(),
direction=direction,
safe=safe)
def remove(self, related, direction=None, safe=False):
self.domain_relationship.remove(related.get_entity(),
direction=direction,
safe=safe)
def _get_specification_attributes(self):
return self.descriptor.resource_attr, self.descriptor.resource_backref
|
helixyte/everest
|
everest/resources/relationship.py
|
Python
|
mit
| 1,672
|
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
from maple.core import logging
from maple.core import static_info
from maple.core import testing
from maple.race import testing as race_testing
from maple.systematic import program
from maple.systematic import search
class ChessTestCase(testing.DeathTestCase):
""" Run a test under the CHESS scheduler.
"""
def __init__(self, test, mode, threshold, controller):
testing.DeathTestCase.__init__(self, test, mode, threshold)
self.controller = controller
def threshold_check(self):
if self.search_done():
return True
if testing.DeathTestCase.threshold_check(self):
return True
return False
def search_done(self):
sinfo = static_info.StaticInfo()
sinfo.load(self.controller.knobs['sinfo_out'])
prog = program.Program(sinfo)
prog.load(self.controller.knobs['program_out'])
search_info = search.SearchInfo(sinfo, program)
search_info.load(self.controller.knobs['search_out'])
return search_info.done()
def after_each_test(self):
iteration = len(self.test_history)
used_time = self.test_history[-1].used_time()
logging.msg('=== chess iteration %d done === (%f) (%s)\n' % (iteration, used_time, os.getcwd()))
def after_all_tests(self):
if self.is_fatal():
logging.msg('chess fatal error detected\n')
else:
logging.msg('chess threshold reached\n')
def log_stat(self):
runs = len(self.test_history)
used_time = self.used_time()
logging.msg('%-15s %d\n' % ('chess_runs', runs))
logging.msg('%-15s %f\n' % ('chess_time', used_time))
class RaceTestCase(race_testing.TestCase):
""" Run race detector to find all racy instructions.
"""
def __init__(self, test, mode, threshold, profiler):
race_testing.TestCase.__init__(self, test, mode, threshold, profiler)
class ChessRaceTestCase(testing.TestCase):
""" Run race detecctor to find all racy instructions first, and
then run the chess scheduler with sched_race on.
"""
def __init__(self, race_testcase, chess_testcase):
testing.TestCase.__init__(self)
self.race_testcase = race_testcase
self.chess_testcase = chess_testcase
def is_fatal(self):
assert self.done
if self.race_testcase.is_fatal() or self.chess_testcase.is_fatal():
return True
else:
return False
def body(self):
self.race_testcase.run()
if self.race_testcase.is_fatal():
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
else:
self.chess_testcase.run()
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
self.chess_testcase.log_stat()
|
jieyu/maple
|
script/maple/systematic/testing.py
|
Python
|
apache-2.0
| 3,528
|
def resolve_asset(request, path):
tmp = path.split(":")
identifier = tmp.pop(0)
path = path.join(tmp)
if identifier == "cdn":
return request.registry.settings["cdn_url"] + "/" + path
if identifier == "static":
return request.static_path(path)
def configure(config):
config.add_static_view('imgs', 'temporals_web:assets/imgs',
cache_max_age=3600)
config.add_request_method(resolve_asset, "resolve")
|
404d/Temporals-Web
|
temporals_web/assets/__init__.py
|
Python
|
mit
| 474
|
# System imports
import random
# External imports
from unstuck import *
# Local imports
from .share import *
from .serialize import *
from .service import *
from .handshake import *
# Exports
__all__ = ["BusMasterInterface", "BusMaster", "BusMasterService",
"busMasterService", "busClientService"]
class BusMasterInterface(TransverseObjectInterface):
def getNeonateID(self) -> ConnectionID:
pass
def offer(self, offer:ServiceOfferingInterface, name:TransverseID):
pass
def discover(self, name:TransverseID) -> SerialID:
pass
def connect(self, localRoute:OpenRouteInterface, remoteToken:SerialID):
pass
@notification
def requestConnection(self, request:OpenTransportInterface,
remoteBusID:BusID):
pass
def registerServer(self, server:TransportServer, outCode:TransverseID):
pass
#TODO discover and connect to use a "share remote object" type interface
@implements(BusMasterInterface)
class BusMaster:
def __init__(self, bus):
self.bus = bus
self.register = {}
self.tokenRegistry = {}
self.connectionCount = -1
self.servers = {}
self.waiting = {}
def getNeonateID(self):
self.connectionCount +=1
connectID = SerialID.integerToBytes(self.connectionCount)
return connectID
def offer(self, offer, name):
self.register[name] = offer
def translateToken(self, token):
while True:
innerToken = random.getrandbits(32)
innerToken = SerialID.integerToBytes(innerToken)
if not innerToken in self.tokenRegistry:
break
self.tokenRegistry[innerToken] = token
return innerToken
def discover(self, name):
serviceOffering = self.register[name]
token = serviceOffering.request()
return self.translateToken(token)
def registerServer(self, server, outCode):
if isinstance(server, ProxyObject):
myBusID = server.destination.transport.remoteBusID
else:
myBusID = self.bus.busID
self.servers[myBusID] = server, outCode
def requestConnection(self, request, remoteBusID):
if isinstance(request, ProxyObject):
myBusID = request.destination.transport.remoteBusID
else:
myBusID = self.bus.busID
#TODO make this a little more sensible
shiboleth = await(self.awaitSecondConnection(myBusID, remoteBusID))
if not myBusID in self.servers and not remoteBusID in self.servers:
raise(Exception)
if (myBusID in self.servers
and (not remoteBusID in self.servers or myBusID < remoteBusID)):
server, _ = self.servers[myBusID]
request.accept(server, shiboleth)
else:
_, code = self.servers[remoteBusID]
request.connect(code, shiboleth)
def generateShiboleth(self):
return b"NOTRANDOM"
@asynchronous
def awaitSecondConnection(self, myBusID, remoteBusID):
# Not thread safe
if not (myBusID, remoteBusID) in self.waiting:
self.waiting[(remoteBusID, myBusID)] = fut = Future()
return (yield from fut)
else:
shiboleth = self.generateShiboleth()
fut = self.waiting.pop((myBusID, remoteBusID))
fut.setResult(shiboleth)
return shiboleth
def connect(self, routeA, remoteToken):
routeB = self.tokenRegistry[remoteToken]
del self.tokenRegistry[remoteToken]
if isinstance(routeA, ProxyObject):
transportA = routeA.destination.transport
transportTokenA = transportA.remoteBusID
else:
transportA = None
transportTokenA = self.bus.busID
connectionIDA = routeA.getConnectionID()
if isinstance(routeB, ProxyObject):
transportB = routeB.destination.transport
transportTokenB = transportB.remoteBusID
else:
transportB = None
transportTokenB = self.bus.busID
connectionIDB = routeB.getConnectionID()
if isinstance(routeA, ProxyObject) and isinstance(routeB, ProxyObject):
localTokenA = routeA.supplyEndpointBus.async(transportTokenB)
localTokenB = routeB.supplyEndpointBus.async(transportTokenA)
localTokenA = await(localTokenA)
localTokenB = await(localTokenB)
elif isinstance(routeB, ProxyObject):
localTokenB = routeB.supplyEndpointBus.async(transportTokenA)
localTokenA = routeA.supplyEndpointBus(transportTokenB)
localTokenB = await(localTokenB)
elif isinstance(routeA, ProxyObject):
localTokenA = routeA.supplyEndpointBus.async(transportTokenB)
localTokenB = routeB.supplyEndpointBus(transportTokenA)
localTokenA = await(localTokenA)
else:
localTokenA = routeA.supplyEndpointBus(transportTokenB)
localTokenB = routeB.supplyEndpointBus(transportTokenA)
routeA.completeRoute(localTokenB, connectionIDB)
routeB.completeRoute(localTokenA, connectionIDA)
@transverseDef
def getBusMasterInterface(connection: GetMyConnection) -> (BusMasterInterface):
pass
def getBusMaster(connection):
return connection.bus.busMaster
class BusMasterService(Service):
BusMaster = BusMasterInterface
getBusMaster = getBusMasterInterface
busMasterService = BusMasterService.implementation(
BusMaster = BusMaster,
getBusMaster = getBusMaster)
class BusClientService(Service):
OpenRoute = OpenRouteInterface
ServiceOffering = ServiceOfferingInterface
OpenTransport = OpenTransportInterface
busClientService = BusClientService.implementation(
OpenRoute = OpenRoute,
ServiceOffering = ServiceOffering,
OpenTransport = OpenTransport)
|
disnesquick/ripley
|
scratch/py/backups/backup-20150624a/bus_master.py
|
Python
|
gpl-2.0
| 5,251
|
#!/usr/bin/env python
# Use the sample code in example_01.py. Create three functions named
# func1, func2, and func3.
#
# Make func1 print:
# "Hello World"
#
# Make func2 print:
# "It's nice to meet you"
#
# Make func3 print:
# "Howdeeeee"
# Put your code here:
# Now, make a new function called `using_functions`.
# Make it take three arguments (name the arguments as you see fit)
# Then, execute each of the arguments that you received.
# For example, if I used arguments 'a', 'b', 'c' (don't use those in
# your answer), my code would look like this:
#
# def using_functions(a, b, c):
# a()
#
# You are left with the exercise of calling all three functions.
|
glenjarvis/decorator_training
|
src/answer01.py
|
Python
|
bsd-3-clause
| 668
|
import os
import sys
import json
import argparse
import logging
from auth import auth as Auth
from databasehandler import CollectionDatabaseWriter
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from httplib import IncompleteRead
from time import sleep
log = logging.getLogger('pyckaxe')
log.setLevel(logging.WARNING)
handler = logging.StreamHandler()
log.addHandler(handler)
DATABASE_PATH = os.path.join(os.path.dirname(__file__), 'database/')
class PyckaxeException(Exception):
pass
class StdOutListener(StreamListener):
# Std Out Listener meant for debugging/testing
def on_data(self, data):
print data
return True
def on_error(self, status):
print status
class CollectListener(StreamListener):
def __init__(self, db, verbose=False):
super(CollectListener, self).__init__()
self.db_path = db
self.db = CollectionDatabaseWriter(self.db_path)
self.verbose = verbose
def close(self):
self.db.disconnect_db()
def on_data(self, data):
# Collecting id, text, creation time, and coordinates
try:
data = json.loads(data.strip())
id = data['id_str']
text = data['text'].strip()
created_at = data['created_at']
coords = str(data['coordinates'])
self.db.add([id, text, created_at, coords])
if self.verbose:
sys.stdout.write(
'\rTweets collected: %s -- database size: %s kb' %
(self.db.entry_count,
os.path.getsize(self.db_path) >> 10))
sys.stdout.flush()
except KeyError, ke:
pass
except Exception, e:
raise
def on_error(self, error):
pass
class Pyckaxe(object):
def __init__(self, listener, terms, credentials, err_limit=None):
if not isinstance(listener, StreamListener):
raise TypeError('Custom listeners must derive from StreamListener.')
self.listener = listener
self.terms = terms
consumer_key = credentials['consumer_key']
consumer_secret = credentials['consumer_secret']
access_token = credentials['access_token']
access_token_secret = credentials['access_secret']
self.auth = OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.err_limit = err_limit
self.stream = None
def gather(self, async=False):
'''
Gather tweets from the Twitter public stream. The data is handled by the
listener provided by the user.
Setting async to True will collect tweets asynchronously and require the
stop method to be called to end collection.
'''
self.stream = Stream(self.auth, self.listener)
try:
self.stream.filter(track=self.terms, async=async)
except IncompleteRead, ir:
# Incomplete reads occur (as far as the community can tell) when our
# stream starts falling behind the live feed.
self.stream = Stream(self.auth, self.listener)
self.stream.filter(track=args.terms)
except KeyboardInterrupt:
self.stream.disconnect()
raise
except Exception, e:
raise PyckaxeException('Encountered an unexpected error - %s' %
str(e))
def stop(self):
'''
Stop the stream. Note that this will only be usable/relevant if we are
gathering tweets asynchronously.
This does NOT handle releasing of custom listeners.
'''
self.stream.disconnect()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--database', help='Provide a database name.')
parser.add_argument('terms', nargs='+',
help='Collect tweets containing the provided term(s).')
args = parser.parse_args()
if not args.database:
args.database = raw_input('Please provide a database name: ')
db = os.path.join(DATABASE_PATH, args.database)
listener = CollectListener(db, verbose=True)
auth = Auth('credentials.csv')
try:
pyck = Pyckaxe(listener, args.terms[0].split(), auth)
pyck.gather()
except KeyboardInterrupt:
# Can't do this from Pyckaxe if we're going to allow custom listeners.
pyck.listener.close()
log.warning('\nExiting.')
|
dbernard/Pyckaxe
|
pyckaxe.py
|
Python
|
mit
| 4,606
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.