text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from erpnext.controllers.recurring_document import month_map, get_next_date
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def __init__(self, arg1, arg2=None):
super(SalesOrder, self).__init__(arg1, arg2)
self.prev_link_mapper = {
"Quotation": {
"fieldname": "prevdoc_docname",
"doctype": "Sales Order Item",
"filters": [
["Sales Order Item", "parent", "=", self.name],
["Sales Order Item", "prevdoc_docname", "!=", ""]
]
}
}
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_drop_ship()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not \
cint(frappe.db.get_single_value("Selling Settings", "allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
# check for same entry multiple times
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list) and \
not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
frappe.msgprint(_("Warning: Same item has been entered multiple times."))
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_warehouse(self):
super(SalesOrder, self).validate_warehouse()
for d in self.get("items"):
if (frappe.db.get_value("Item", d.item_code, "is_stock_item")==1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse and not cint(d.delivered_by_supplier):
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def validate_drop_ship(self):
for d in self.get('items'):
if d.delivered_by_supplier and not d.supplier:
frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code))
def on_submit(self):
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
self.update_prevdoc_status('submit')
def on_cancel(self):
# Cannot cancel closed SO
if self.status == 'Closed':
frappe.throw(_("Closed order cannot be cancelled. Unclose to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def update_status(self, status):
self.check_modified_date()
self.set_status(update=True, status=status)
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.db.get_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier:
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
else:
_valid_for_reserve(d.item_code, d.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def before_update_after_submit(self):
self.validate_drop_ship()
self.validate_supplier_after_submit()
def validate_supplier_after_submit(self):
"""Check that supplier is the same after submit if PO is already made"""
exc_list = []
for item in self.items:
if item.supplier:
supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code},
"supplier")
if item.ordered_qty > 0.0 and item.supplier != supplier:
exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx))
if exc_list:
frappe.throw('\n'.join(exc_list))
def update_delivery_status(self):
"""Update delivery status from Purchase Order for drop shipping"""
tot_qty, delivered_qty = 0.0, 0.0
for item in self.items:
if item.delivered_by_supplier:
item_delivered_qty = frappe.db.sql("""select sum(qty)
from `tabPurchase Order Item` poi, `tabPurchase Order` po
where poi.sales_order_item = %s
and poi.item_code = %s
and poi.parent = po.name
and po.docstatus = 1
and po.status = 'Delivered'""", (item.name, item.item_code))
item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0
item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False)
delivered_qty += item.delivered_qty
tot_qty += item.qty
frappe.db.set_value("Sales Order", self.name, "per_delivered", flt(delivered_qty/tot_qty) * 100,
update_modified=False)
def set_indicator(self):
"""Set indicator for portal"""
if self.per_billed < 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Not Paid and Not Delivered")
elif self.per_billed == 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Paid and Not Delivered")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def on_recurring(self, reference_doc):
mcount = month_map[reference_doc.recurring_type]
self.set("delivery_date", get_next_date(reference_doc.delivery_date, mcount,
cint(reference_doc.repeat_on_day_of_month)))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Orders'),
})
return list_context
@frappe.whitelist()
def close_or_unclose_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status == "Closed":
if so.status not in ("Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100):
so.update_status(status)
else:
if so.status == "Closed":
so.update_status('Draft')
frappe.local.message_log = []
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
def update_item(source, target, source_parent):
target.project = source_parent.project
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Packed Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom"
},
"postprocess": update_item
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom"
},
"condition": lambda doc: not frappe.db.exists('Product Bundle', doc.item_code),
"postprocess": update_item
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None, ignore_permissions=False):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.set_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.flags.ignore_permissions = True
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"field_map": {
"party_account_currency": "party_account_currency"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "sales_order"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""select name, customer_name, delivery_status, billing_status, delivery_date
from `tabSales Order`
where (ifnull(delivery_date, '0000-00-00')!= '0000-00-00') \
and (delivery_date between %(start)s and %(end)s)
and docstatus < 2
{conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def make_purchase_order_for_drop_shipment(source_name, for_supplier, target_doc=None):
def set_missing_values(source, target):
target.supplier = for_supplier
default_price_list = frappe.get_value("Supplier", for_supplier, "default_price_list")
if default_price_list:
target.buying_price_list = default_price_list
if any( item.delivered_by_supplier==1 for item in source.items):
if source.shipping_address_name:
target.shipping_address = source.shipping_address_name
target.shipping_address_display = source.shipping_address
else:
target.shipping_address = source.customer_address
target.shipping_address_display = source.address_display
target.customer_contact_person = source.contact_person
target.customer_contact_display = source.contact_display
target.customer_contact_mobile = source.contact_mobile
target.customer_contact_email = source.contact_email
else:
target.customer = ""
target.customer_name = ""
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.schedule_date = source_parent.delivery_date
target.qty = flt(source.qty) - flt(source.ordered_qty)
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Purchase Order",
"field_no_map": [
"address_display",
"contact_display",
"contact_mobile",
"contact_email",
"contact_person"
],
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "sales_order_item"],
["parent", "sales_order"],
["uom", "stock_uom"],
["delivery_date", "schedule_date"]
],
"field_no_map": [
"rate",
"price_list_rate"
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == for_supplier
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def get_supplier(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
and name in (select supplier from `tabSales Order Item` where parent = %(parent)s)
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': frappe.db.escape(searchfield)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'parent': filters.get('parent')
})
@frappe.whitelist()
def update_status(status, name):
so = frappe.get_doc("Sales Order", name)
so.update_status(status)
|
anandpdoshi/erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 22,495
|
[
"VisIt"
] |
4a46243070b9fdd73b7a07d779ed1e65732512816699a7a2d7bd969b46e35eb9
|
import numpy as np
from numpy.random import multivariate_normal as multi_norm
from scipy.spatial import cKDTree as ckdt
from collections import defaultdict
from scipy.stats import norm
import warnings
class GMM: # gaussian mixture model
def __init__(self, pis, params):
self.params = params # [[mu1, sig1], [mu2, sig2],...]
self.components = params.shape[0]
self.pis = pis
def __call__(self, x):
pis = self.pis
p = self.params
sz = self.components
return np.array([pis[i]*norm.pdf(x,*(p[i])) for i in range(sz)]).sum(axis=0)
def sample(self, n_samples, normalize=False):
mode_id = np.random.choice(self.components, size=n_samples, replace=True, p=self.pis)
return [np.array([norm.rvs(*(self.params[i])) for i in mode_id]), mode_id]
class Neuron():
def __init__(self, shape, weights, bias, decay=0.025, pi=1):
self.weights = np.array(weights)
self.rows, self.cols = shape
self.dim = self.rows*self.cols
self.bias = bias
self.decay = decay
self.pi = pi
self.tot_exp = 0
self.avg_change = 0
self.calls = 0
self.neighbors = []
self.lr = 1.0 # Learning Rate
def __call__(self, x, feedback=1, update=True):
assert x.shape[1:] == self.weights.shape
z = x-self.weights
z_dot_z = (z*z).reshape(-1,self.rows*self.cols).sum(axis=1)
output = np.exp(-z_dot_z/(2*self.bias))
if update:
self.calls += x.shape[0]
# Update
if update: # Can only update batches of size 1 currently
q = np.power(output,1)
self.weights = self.weights + self.lr*q*z.sum(axis=0)
self.bias = self.bias + self.lr*(np.maximum(q*(z_dot_z-self.bias),-0.2*self.bias) + self.decay*np.sqrt(2*np.pi*self.bias)*self.bias)
self.lr = np.maximum(0,self.lr-0.005)
return output
def add_neighbors(self, neurons):
self.neighbors.append(neurons)
def get_weights(self):
return self.weights
def sample(self, num_samps):
return multi_norm(self.weights[0], np.diag([self.bias]*self.dim),num_samps)
class Net():
def __init__(self, rows, cols, num_neurons, bias, decay, kernels, locs, sleep_cycle):
""" rows - number of rows in the input
cols - number of columns in the input
num_neurons - number of neurons in the layers
bias - the bias every neuron in the layer should be initialized with
decay - the decay rate every neuron should be initialized with (could be list)
kernels - the kernel sizes for every neuron. If only one, it is
duplicated
locs - location on the input for the neuron to listen
"""
self.input_rows = rows
self.input_cols = cols
self.num_neurons = num_neurons
self.bias = bias
self.decay = decay if hasattr(decay, '__iter__') else [decay]*num_neurons
self.sleep_cycle = sleep_cycle
if len(kernels) != num_neurons:
self.kernels = kernels*num_neurons
else:
self.kernels = kernels
if len(locs) != num_neurons:
self.locs = locs*num_neurons
else:
self.locs = locs
self.num_calls = 0
self.total_activity = 0
self.neurons = defaultdict(list)
#if isinstance(learning_params, dict):
#self.learning_params = [learning_params]*num_neurons
#elif isinstance(learning_params, list):
#self.learning_params = learning_params
#else:
#sys.exit("Error: Learning params must be a dict or list")
self.__build_network()
def __build_network(self):
pis = np.random.rand(self.num_neurons)
pis /= pis.sum()
for n in range(self.num_neurons):
r,c = self.kernels[n]
locx,locy = self.locs[n]
# Create neuron
weights = np.random.rand(r,c)
self.neurons[(locx,locy)].append(Neuron([r,c], weights, self.bias,
self.decay[n], pis[n]))
# Calculate the nearest neighbors for the neurons
locs = np.array(list(self.neurons.keys()))
kdtree = ckdt(locs)
neighbors = kdtree.query_ball_point(locs,7)
# Give each neuron a pointer to its neighbors
for loc, nbhrs in zip(locs, neighbors):
neurons = self.neurons[tuple(loc)]
for neuron in neurons:
for nbhr_loc in locs[nbhrs[1:]]:
neuron.add_neighbors(self.neurons[tuple(nbhr_loc)])
def __call__(self, xp, feedback=1, update=1):
#print('xp = ', xp)
output = []
for loc, neurons in self.neurons.items():
for neuron in neurons:
x,y = loc
r = neuron.rows//2
c = neuron.cols//2
y0 = int(np.ceil(y-r))
y1 = int(np.floor(y+r+1))
x0 = int(np.ceil(x-c))
x1 = int(np.floor(x+c+1))
try:
val = neuron(xp[:,y0:y1,x0:x1], feedback, update)
if update:
# Mult by normalizing factor now because only care about
# exp term
self.total_activity += val*np.sqrt(2*np.pi*neuron.bias)
except ValueError:
print('loc = ', loc)
raise(ValueError)
output.append(neuron.pi*val)
if update:
self.num_calls += 1
if (self.num_calls+1) % self.sleep_cycle == 0:
self.__sleep()
self.num_calls = 0
return np.array(output)
def __sleep(self):
print("SLEEPING!")
for loc, neurons in self.neurons.items():
print('neurons = ', neurons)
for neuron in neurons:
neuron.pi = neuron.tot_exp/self.total_activity
print('pi = ', neuron.pi)
neuron.tot_exp = 0
neuron.calls = 0
neuron.k = 1
neuron.avg_output = 0
self.total_activity = 0
|
mathnathan/notebooks
|
dissertation/OldBrain.py
|
Python
|
mit
| 6,236
|
[
"Gaussian",
"NEURON"
] |
2d1fb61808df0a8a72fbac68b027a8dfe05a0039b83fd6117eec603c47fd01e8
|
"""
Django settings for pyconuk project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# This does not need to be kept secret, since it is not used to protect
# anything in the static site.
SECRET_KEY = 'secret'
# SECURITY WARNING: This should be False when the site is built to ensure we
# don't accidentally leak information in error pages. This has the added
# effect of massively speeding up the build, since LESS compilation no longer
# happens on every request!
DEBUG = bool(os.getenv('DEBUG', False))
# This is fine.
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'pyconuk',
'django_amber',
'markdown_deux',
'compressor',
# These two apps are required for Django to work properly, even though we
# don't use them directly.
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
]
ROOT_URLCONF = 'pyconuk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pyconuk.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_TZ = True
# Formatting
DATE_FORMAT = 'jS F Y' # eg 25th December 2016
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'media')
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Markdown
MARKDOWN_DEUX_STYLES = {
'default': {
'safe_mode': False, # This means we don't escape HTML tags in Markdown
}
}
# Django Amber
DJANGO_AMBER_CNAME = '2016.pyconuk.org'
# Django Compressor
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
|
PyconUK/2016.pyconuk.org
|
pyconuk/settings.py
|
Python
|
mit
| 3,166
|
[
"Amber"
] |
739b5bdedcd0de95c3894aa780c76b02d387306ae999ba5b9eecc9473761e831
|
import numpy as np; import os
from scipy import ndimage
import matplotlib.image as img
import matplotlib.pyplot as plt
import sys; import Queue
import dicom
# Constants.
PLATEAU = 0
INFINITE = 9e10
# Gets the neighbours of (j,i)
def neighbours(j,i):
return [(j-1,i-1),(j-1,i),(j-1,i+1),(j,i+1),
(j+1,i+1),(j+1,i),(j+1,i-1),(j,i-1)]
# Determines if tuple n is outside
# of the boundaries of the image I.
def outside(I,n):
j,i = n
h, w = I.shape
if j < 0 or j > h-1 or \
i < 0 or i > w-1: return True
else: return False
# Check if array contains unique element.
def unique(S):
return len(S) > 0 and all(p == S[0] for p in S)
# Convert tuple to index.
def t2i(tup,width):
return tup[0] * width + tup[1]
# Convert index to tuple.
def i2t(ind,width):
return (ind / width, ind % width)
# Show edges in the final watershed.
def showEdges(L,I):
plt.imshow(getEdges(L,I),cmap='gray')
plt.show()
# Get edges from watershed image
# (i.e. watershed lines).
def getEdges(L,I):
E = np.zeros_like(L)
height,width = L.shape
for j in range(0,height):
for i in range(0,width):
p = (j,i); c = 0
for u in neighbours(j,i):
if outside(L,u): continue
if L[u] < L[p] and E[u] != 1:
E[p] = 125
return E+I
# Preprocess with a Gaussian filter.
def preprocess(I):
I = np.float32(I)
I = ndimage.gaussian_filter(I,1)
return I
# Show progress dots.
def show_progress():
sys.stdout.write(".")
sys.stdout.flush()
# Heavily inspired from a 2009 scrit by Daniel Nanz:
# http://code.google.com/p/pydicom/source/browse/
# source/dicom/contrib/pydicom_Tkinter.py?
# r=f2c30464fd3b7e553af910ee5a9f5bcf4b3f4ccf
def pgm_from_np(arr, window_center, window_width, lut_min=0, lut_max=255):
# Basic sanity checking.
if np.isreal(arr).sum() != arr.size: raise ValueError
if lut_max != 255: raise ValueError
if arr.dtype != np.float64: arr = arr.astype(np.float64)
# Get window information.
window_width = max(1, window_width)
wc, ww = np.float64(window_center), np.float64(window_width)
lut_range = np.float64(lut_max) - lut_min
# Transform the image.
minval = wc - 0.5 - (ww - 1.0) / 2.0
maxval = wc - 0.5 + (ww - 1.0) / 2.0
min_mask = (minval >= arr)
to_scale = (arr > minval) & (arr < maxval)
max_mask = (arr >= maxval)
if min_mask.any(): arr[min_mask] = lut_min
# Scale the image to the right proportions.
if to_scale.any(): arr[to_scale] = \
((arr[to_scale] - (wc - 0.5)) /
(ww - 1.0) + 0.5) * lut_range + lut_min
if max_mask.any(): arr[max_mask] = lut_max
arr = np.rint(arr).astype(np.uint8)
return arr
# Read in a DICOM file.
def read_dcm(file_name):
data = dicom.read_file(file_name)
arr = data.pixel_array.astype(np.float64)
# Rescale image.
if ('RescaleIntercept' in data) and ('RescaleSlope' in data):
intercept = int(data.RescaleIntercept)
slope = int(data.RescaleSlope)
arr = slope * arr + intercept
wc = (arr.max() + arr.min()) / 2.0
ww = arr.max() - arr.min() + 1.0
if ('WindowCenter' in data) and ('WindowWidth' in data):
wc = data.WindowCenter
ww = data.WindowWidth
try: wc = wc[0]
except: pass
try: ww = ww[0]
except: pass
return pgm_from_np(arr, wc, ww)
def strip_extension(path):
return os.path.splitext(path)[0]
|
yunfanz/ReionBub
|
ws_utils.py
|
Python
|
mit
| 3,339
|
[
"Gaussian"
] |
97fb900d0830c367b138482e197c9e6b8d25125eba3bd15cfbf5ad5e4c5efebf
|
# -*- coding: utf-8 -*-
from Visitor.CNodeVisitor import *
from CNode import *
class ToStringVisitor(CNodeVisitor):
def __init__(self, with_types=False):
self.str = ""
self.with_types = with_types
self.indent = 0
def visit_any(self, o, indent=0):
if self.with_types:
displayname = str(o.name) + ' ' + str(type(o.name))
else:
displayname = str(o.name)
self.str += ' ' * indent + '{'+displayname+'}' + '\n'
cont = o.content
# usj og fysj
if type(cont) != str and type(cont) != unicode:
cont = str(cont)
if type(cont) == str:
cont = unicode(cont,'utf8')
if self.with_types:
cont = cont + " (: %s)" % str(type(o.content))
self.str += u' ' * indent + u' '+cont+ u'\n'
for k,v in o.attr.iteritems():
if self.with_types:
k = "(%s : %s)" % (k, str(type(k)))
v = v + " (: %s)" % str(type(v))
self.str += ' ' * indent + " - %s: %s\n" %(k,v)
[self.visit(c,indent=indent+1) for c in o.children]
return self.str
|
orbekk/erebus
|
Visitor/ToStringVisitor.py
|
Python
|
gpl-2.0
| 1,164
|
[
"VisIt"
] |
3ff36f046a7f75fd41714a275ca6f296234701d73552a789a205a85d7b9133ce
|
from bs4 import BeautifulSoup
import os
import pickle
import random
datadir = '../../Data/'
inputdir = datadir + 'Generator_Data/'
metadatadir = datadir + 'Metadata/'
inputfiles = os.listdir(inputdir)
inputfiles = [x for x in inputfiles if '.kml' in x]
def coordinate_string_to_latlon_pair(x):
return map(float, x.split(','))
def none_to_empty(x):
if x is None:
return '0'
else:
return x
def parse_description(nodedescription, plant_type='Coal'):
mignon = BeautifulSoup(str(nodedescription[0]))
# Nothing here we need
mignon = BeautifulSoup(str(nodedescription[1]))
cutlets = mignon.find_all('tr')
nodestatus = unicode(cutlets[0].select('td')[1].string)
# Str conversion to catch nonetype return
nodecapacity = float(none_to_empty(cutlets[3].select('td')[1].string))
if plant_type == 'Coal' or plant_type == 'Gas' or plant_type == 'Oil':
nodeprimaryfuel = unicode(cutlets[8].select('td')[1].string.lstrip('Primary: '))
nodesecondaryfuel = unicode(cutlets[8].select('td')[2].string.lstrip('Secondary: '))
elif plant_type == 'Geothermal':
nodeprimaryfuel = 'Geothermal'
nodesecondaryfuel = None
elif plant_type == 'Hydro':
nodeprimaryfuel = 'Hydro'
nodesecondaryfuel = None
elif plant_type == 'Nuclear':
nodeprimaryfuel = 'Nuclear'
nodesecondaryfuel = None
elif plant_type == 'Waste':
nodeprimaryfuel = 'Waste'
nodesecondaryfuel = None
elif plant_type == 'Biomass':
nodeprimaryfuel = 'Biomass'
nodesecondaryfuel = None
else:
nodeprimaryfuel = 'Unknown'
nodesecondaryfuel = None
mignon = BeautifulSoup(str(nodedescription[2]))
cutlets = mignon.select('tr')
generators = {}
for line in cutlets[2:]:
chops = line.find_all('td')
if not chops[1].string is None:
if plant_type == 'Nuclear':
generators[int(chops[0].string)] = {'capacity': float(chops[2].string), 'Turbine Model': unicode(chops[9].string)}
else:
generators[int(chops[0].string)] = {'capacity': float(chops[1].string), 'Turbine Model': unicode(chops[7].string)}
if nodecapacity == 0.0:
nodecapacity = sum(generators[g]['capacity'] for g in generators)
return {'capacity': nodecapacity, 'primaryfuel': nodeprimaryfuel, 'secondaryfuel': nodesecondaryfuel, 'generators': generators, 'status': nodestatus}
# # MAIN SCRIPT FOLLOWS
database = {}
i = 0
print ''
for inputfile in inputfiles:
with open(inputdir + inputfile, 'r') as f:
soup = BeautifulSoup(f, 'lxml')
plant_type = inputfile.split('_')[2]
country = inputfile.split('_')[3]
for node in soup.select('placemark'):
i += 1
print '\r', str(i)
nodeid = node.attrs['id'].lstrip('placemark')
nodename = unicode(node.select('name')[0].string)
nodelocation = coordinate_string_to_latlon_pair(node.select('coordinates')[0].string)
nodedescription = map(str, node.select('description')[0].contents)
if not nodedescription == []:
nodedescdict = parse_description(nodedescription, plant_type)
else:
nodedescdict = {'capacity': 0, 'primaryfuel': plant_type, 'secondaryfuel': None, 'generators': {}, 'status': 'No Data in GEO'}
database[nodeid] = {'name': nodename, 'location': nodelocation, 'country': country}
database[nodeid].update(nodedescdict)
# # # Cleanup of database
# # So many fuel types - let's simplify
translatefuel = {None: 'Unknown',
u'': 'Unknown',
u'Please Select': 'Unknown',
u'Anthracite coal': 'Coal',
u'Biomass': u'Biomass',
u'Bituminous Coal': 'Coal',
u'Blast Furnace Gas (Dowson Gas)': 'Natural Gas',
u'Blast furnace gas and coke oven gas': 'Natural Gas',
u'Brown Coal': 'Lignite',
u'Brown Coal and Lignite': 'Lignite',
u'Brown Coal (Lignite)': 'Lignite',
u'Coal': 'Coal',
u'Coal Anthracite': 'Coal',
u'Coal Anthracite and bituminous': 'Coal',
u'Coal Bituminous': 'Coal',
u'Coal Brown': 'Lignite',
u'Coal Brown Lignite': 'Lignite',
u'Coal Hard': 'Coal',
u'Coal Lignite': 'Lignite',
u'Coal Lignite and bituminous': 'Lignite',
u'Coal Sub-bituminous': 'Coal',
u'Coal Syngas': 'Natural Gas',
u'Coal bituminous': 'Coal',
u'Coal bituminous and lignite': 'Coal',
u'Coal lignite': 'Lignite',
u'Coal lignite and Brown Coal': 'Lignite',
u'Coal lignite and sub-bituminous': 'Lignite',
u'Coal lignite black': 'Lignite',
u'Coal, Heavy Fuel Oil': 'Coal',
u'Coal, slag, petroleum coke': 'Coal',
u'Diesel': 'Fuel Oil',
u'Diesel Oil': 'Fuel Oil',
u'Distillate Oil': 'Fuel Oil',
u'Fuel Oil': 'Fuel Oil',
u'Fuel Oil Heavy': 'Fuel Oil',
u'Fuel Oil Light': 'Fuel Oil',
u'Furnace Gas': 'Coal',
u'Gas': 'Natural Gas',
u'Gas from Steel Mills': 'Natural Gas',
u'Gas Oil': 'Natural Gas',
u'Geothermal': 'Geothermal',
u'Hard Coal': 'Coal',
u'Hard Coal, Heavy Fuel Oil': 'Coal',
u'Heavy Fuel Oil': 'Fuel Oil',
u'Heavy Oil': 'Fuel Oil',
u'Hydro': 'Hydro',
u'Lignite': 'Lignite',
u'Light Fuel Oil/Diesel': 'Fuel Oil',
u'Mixed Fuel (Coal NG, Blast Furnance Gas, Wood Pellets)': 'Coal',
u'Natual Gas': 'Natural Gas',
u'Natural Gas': 'Natural Gas',
u'Natural Gas (Recovery gas from steel mill)': 'Natural Gas',
u'Nuclear': 'Nuclear',
u'Oil': 'Fuel Oil',
u'Oil distillate': 'Fuel Oil',
u'Sub-bituminous': 'Coal',
u'Syn gas from Coal Gasification': 'Coal',
u'Unknown': 'Unknown',
u'Waste': 'Waste',
u'Waste Furnace Gas': 'Waste',
u'Wood Waste': u'Biomass',
u'blast furnace gas (BFG)': 'Natural Gas',
u'coal': 'Coal',
u'high-calorific coke-oven gas and blast furnace gas': 'Coal',
u'oal Bituminous': 'Coal',
u'syngas from refinery residual oil': 'Fuel Oil'}
for g in database.iterkeys():
database[g]['primaryfuel'] = translatefuel[database[g]['primaryfuel']]
database[g]['secondaryfuel'] = translatefuel[database[g]['secondaryfuel']]
# # Ramp rates set by fuel type (relative to max capacity)
ramprates = {u'Biomass': 1.0, 'Coal': 0.8, 'Fuel Oil': 1.0, 'Geothermal': 1.0, 'Hydro': 1.0, 'Lignite': 0.7,
'Natural Gas': 1.0, 'Nuclear': 0.5, 'Unknown': 1.0, 'Waste': 1.0}
for g in database.iterkeys():
database[g]['ramp'] = ramprates[database[g]['primaryfuel']]
# # Linear cost set by fuel type [$ / MWh] - Uniformly chosen from 90-110% of below.
lincosts = {
u'Biomass': 39.5,
'Coal': 38.6,
'Fuel Oil': 122.2,
'Geothermal': 0.0,
'Hydro': 6.4,
'Lignite': 23.8,
'Natural Gas': 55.6,
'Nuclear': 11.8,
'Unknown': 130.0,
'Waste': 39.5}
# Jitter generators by +-10%
for g in database.iterkeys():
random.seed(g)
database[g]['lincost'] = lincosts[database[g]['primaryfuel']]*(random.random()*0.2+0.9)
# # Minimal up- and downtimes by fuel type
# No source - look for one!
uptimes = {u'Biomass': 8, 'Coal': 8, 'Fuel Oil': 2, 'Geothermal': 0, 'Hydro': 0, 'Lignite': 8,
'Natural Gas': 2, 'Nuclear': 24, 'Unknown': 8, 'Waste': 8}
downtimes = {u'Biomass': 8, 'Coal': 8, 'Fuel Oil': 4, 'Geothermal': 0, 'Hydro': 0, 'Lignite': 8,
'Natural Gas': 4, 'Nuclear': 24, 'Unknown': 8, 'Waste': 4}
for g in database.iterkeys():
f = database[g]['primaryfuel']
database[g]['minuptime'] = uptimes[f]
database[g]['mindowntime'] = downtimes[f]
# # Set production of generators which are exported incorrectly from the database.
tosetlist = {
'2175': 2060.,
'2605': 270.,
'2609': 1412.,
'2947': 800.,
'3913': 730.,
'43676': 855.3,
'4396': 372.,
'45044': 355.,
'4938': 868.,
'5270': 466.,
'5682': 2026.,
'5910': 1087.
}
for g, p in tosetlist.iteritems():
database[g]['capacity'] = p
# # Minimal production set by fuel type
mincapacity = {u'Biomass': 0.20,
'Coal': 0.20, # http://ebooks.asmedigitalcollection.asme.org/content.aspx?bookid=240§ionid=38774800
'Fuel Oil': 0.40,
'Geothermal': 0.20, # http://egec.info/wp-content/uploads/2014/10/Flex-Factsheet-Web-Version.pdf
'Hydro': 0.10, # http://www.nzdl.org/gsdlmod?e=d-00000-00---off-0cdl--00-0----0-10-0---0---0direct-10---4-------0-1l--11-en-50---20-about---00-0-1-00-0-0-11-1-0utfZz-8-10&a=d&cl=CL2.12&d=HASH12e30488fe16525235d00f.8.2
'Lignite': 0.20, # Assumed same as coal
'Natural Gas': 0.40, # http://www.alstom.com/Global/Power/Resources/Documents/Brochures/gas-power-plants.pdf
'Nuclear': 0.20, # http://www.iaea.org/NuclearPower/Meetings/2013/2013-09-04-09-06-TM-NPE.html
'Unknown': 0.40,
'Waste': 0.20}
for g in database.iterkeys():
database[g]['minonlinecapacity'] = mincapacity[database[g]['primaryfuel']]*database[g]['capacity']
# # Warm start cycling costs [$/MW cap]
# # Based on data from NREL - "Power plant cycling costs" (April 2012) and
# # http://www.ipautah.com/data/upfiles/newsletters/CyclingArticles.pdf (coal)
# # http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=00574921 (hydro)
cyclecost = {u'Biomass': 65, # Assumed equal to coal
'Coal': 65,
'Fuel Oil': 55, # Assumed equal to gas CCGT
'Geothermal': 4.3, # Assumed equal to hydro
'Hydro': 4.3, # Based on swedish data, in 2012 dollars
'Lignite': 65, # Assumed equal to coal
'Natural Gas': 55, # Assumes CCGT
'Nuclear': 300, # No source - set at large number to represent usual baseload operation
'Unknown': 65,
'Waste': 65} # assumed equal to coal
for g in database.iterkeys():
f = database[g]['primaryfuel']
database[g]['cyclecost'] = cyclecost[f]*database[g]['capacity']
# # # Removing generators that are not connected to mainland Europe or are known duplicates
toremovelist = [
'43804', # Canary Islands, spain
'43815', # Canary Islands, spain
'42778', # Crete, Greece
'42779', # Chios, Greece
'2402', # Duplicate of 39749
'2638' # Duplicate of 39746
]
database = {k: v for k, v in database.iteritems() if k not in toremovelist}
pickle.dump(database, open(metadatadir + 'generator_database_no_affiliation.pickle', 'w'))
|
DTU-ELMA/European_Dataset
|
Scripts/Generator_Handling/1-Extract_GEO_Files.py
|
Python
|
apache-2.0
| 11,254
|
[
"BLAST"
] |
cb51100070a5ed40cc6944c017b9a8a567b027df25de43b93f77f58400b76df3
|
from django_extensions.management.commands.runserver_plus import (
Command as DjangoExtensionsRunserverCommand)
from .runserver_mixin import RunserverMixin
class Command(RunserverMixin, DjangoExtensionsRunserverCommand):
"""
Subclass the DjangoExtensionsRunserverCommand from django-extensions to set
up our gulp environment.
"""
pass
|
beaugunderson/django-gulp
|
django_gulp/management/commands/runserver_plus.py
|
Python
|
mit
| 363
|
[
"GULP"
] |
a67126464dc48186326028c560dc6c2b311905ddd9ab53ecefbdf158b624a380
|
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import re
from nose.plugins.attrib import attr
from unittest import skipUnless
from urllib import urlencode
import mock
import ddt
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from edx_rest_api_client import exceptions
from course_modes.models import CourseMode
from commerce.models import CommerceConfiguration
from commerce.tests import TEST_API_URL, TEST_API_SIGNING_KEY, factories
from commerce.tests.mocks import mock_get_orders
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.tests.test_util import with_edx_domain_context
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@attr('shard_3')
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
URLCONF_MODULES = ['embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# For these tests, three third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
self.configure_dummy_provider(
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(False, "signin_user"),
(False, "register_user"),
(True, "signin_user"),
(True, "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
("signin_user", "dummy", "Dummy"),
("register_user", "dummy", "Dummy"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Microsite Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Microsite")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config(program_listing_enabled=True)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(program_listing_enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
with mock_get_orders():
order_detail = get_user_orders(self.user)
user_order = mock_get_orders.default_response['results'][0]
expected = [
{
'number': user_order['number'],
'price': user_order['total_excl_tax'],
'title': user_order['lines'][0]['title'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/commerce/checkout/receipt/?orderNum=' + user_order['number']
}
]
self.assertEqual(order_detail, expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_honor_course_order_detail(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='honor'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_order_history_with_no_product(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=None
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(len(order_detail), 1)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
|
zhenzhai/edx-platform
|
lms/djangoapps/student_account/test/test_views.py
|
Python
|
agpl-3.0
| 26,045
|
[
"VisIt"
] |
5ad7dabc20f2488222734f1a622f303ede7ad1039ab3b6f1bd00c2825e4e3a47
|
import numpy as np
import pandas as pd
import sys,os,re,multiprocessing,netCDF4
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#netcdf file path
ncfile = sys.argv[1]
netCDF_data = Dataset(ncfile, mode='r')
for group in netCDF_data.groups:
print '\n\nReading', group
#data
spc = pd.DataFrame(netCDF_data.groups[group].variables['Spec'][:])
spc.columns = str(netCDF_data.groups[group].variables['Spec'].head).split(',')
#rte = pd.DataFrame(netCDF_data.groups[group].variables['Rate'][:])
#rte.columns = str(netCDF_data.groups[group].variables['Rate'].head).split(',')
spc = spc/spc.M.mean()
spc.sort_index(axis=1,inplace=True)# arrange alphabetically
pp = PdfPages('%s.pdf'%group)
for i in xrange(0, len(spc.columns), 6):
spc[spc.columns[i:i+5]].plot(subplots=True)
plt.tight_layout()
plt.ylabel('mix ratio')
#plt.locator_params(axis='y',nbins=2)
print '%.03f'%(float(i) / float(len(spc.columns)) ) , '% done'
plt.savefig(pp, format='pdf')
plt.close()
pp.close()
print 'PDF out'
|
wolfiex/ropacode
|
PDF_concentration.py
|
Python
|
cc0-1.0
| 1,202
|
[
"NetCDF"
] |
1a25b6b2e3884b877285aa25886b9706d8753999156dc0cd566666856f54e7a1
|
import sys
sys.path.insert(0, '../../')
import numpy as np
from jax.experimental import optimizers
import matplotlib.pyplot as plt
import time
from sde_gp import SDEGP
import approximate_inference as approx_inf
import priors
import likelihoods
from utils import plot
import pickle
import pandas as pd
pi = 3.141592653589793
plot_intermediate = False
print('loading data ...')
np.random.seed(99)
N = 52 * 10080 # 10080 = one week, 2049280 total points
electricity_data = pd.read_csv('electricity.csv', sep=' ', header=None, engine='python').values[:N, :]
x = electricity_data[:, 0][:, None]
y = electricity_data[:, 1][:, None]
print('N =', N)
ind_shuffled = np.random.permutation(N)
ind_split = np.stack(np.split(ind_shuffled, 10)) # 10 random batches of data indices
if len(sys.argv) > 1:
plot_final = False
method = int(sys.argv[1])
fold = int(sys.argv[2])
else:
plot_final = True
method = 0
fold = 0
print('method number', method)
print('batch number', fold)
# Get training and test indices
ind_test = ind_split[fold] # np.sort(ind_shuffled[:N//10])
ind_train = np.concatenate(ind_split[np.arange(10) != fold])
x_train = x[ind_train] # 90/10 train/test split
x_test = x[ind_test]
y_train = y[ind_train]
y_test = y[ind_test]
var_y = .1
var_f = 1. # GP variance
len_f = 1. # GP lengthscale
period = 1. # period of quasi-periodic component
len_p = 5. # lengthscale of quasi-periodic component
var_f_mat = 1.
len_f_mat = 1.
prior1 = priors.Matern32(variance=var_f_mat, lengthscale=len_f_mat)
prior2 = priors.QuasiPeriodicMatern12(variance=var_f, lengthscale_periodic=len_p,
period=period, lengthscale_matern=len_f)
prior = priors.Sum([prior1, prior2])
lik = likelihoods.Gaussian(variance=var_y)
if method == 0:
inf_method = approx_inf.EKS(damping=.1)
elif method == 1:
inf_method = approx_inf.UKS(damping=.1)
elif method == 2:
inf_method = approx_inf.GHKS(damping=.1)
elif method == 3:
inf_method = approx_inf.EP(power=1, intmethod='GH', damping=.1)
elif method == 4:
inf_method = approx_inf.EP(power=0.5, intmethod='GH', damping=.1)
elif method == 5:
inf_method = approx_inf.EP(power=0.01, intmethod='GH', damping=.1)
elif method == 6:
inf_method = approx_inf.VI(intmethod='GH', damping=.1)
model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, approx_inf=inf_method)
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])
def gradient_step(i, state, mod):
params = get_params(state)
mod.prior.hyp = params[0]
mod.likelihood.hyp = params[1]
# grad(Filter) + Smoother:
neg_log_marg_lik, gradients = mod.run()
print('iter %2d: nlml=%2.2f' %
(i, neg_log_marg_lik))
if plot_intermediate:
plot(mod, i)
return opt_update(i, gradients, state)
print('optimising the hyperparameters ...')
t0 = time.time()
num_iters = 250
for j in range(num_iters):
opt_state = gradient_step(j, opt_state, model)
t1 = time.time()
print('optimisation time: %2.2f secs' % (t1-t0))
x_plot = np.linspace(np.min(x), np.max(x), N)
# calculate posterior predictive distribution via filtering and smoothing at train & test locations:
print('calculating the posterior predictive distribution ...')
t0 = time.time()
nlpd = model.negative_log_predictive_density(t=x_test, y=y_test)
posterior_mean, posterior_cov = model.predict(t=x_plot)
t1 = time.time()
print('prediction time: %2.2f secs' % (t1-t0))
print('test NLPD: %1.2f' % nlpd)
with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "wb") as fp:
pickle.dump(nlpd, fp)
# with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp:
# nlpd_show = pickle.load(fp)
# print(nlpd_show)
if plot_final:
lb = posterior_mean - 1.96 * posterior_cov**0.5
ub = posterior_mean + 1.96 * posterior_cov**0.5
print('plotting ...')
plt.figure(1, figsize=(12, 5))
plt.clf()
plt.plot(x, y, 'b.', label='training observations', markersize=4)
plt.plot(x_test, y_test, 'r.', alpha=0.5, label='test observations', markersize=4)
plt.plot(x_plot, posterior_mean, 'g', label='posterior mean')
plt.fill_between(x_plot, lb, ub, color='g', alpha=0.05, label='95% confidence')
plt.xlim(x_plot[0], x_plot[-1])
plt.legend()
plt.title('GP regression via Kalman smoothing. Test NLPD: %1.2f' % nlpd)
plt.xlabel('time, $t$')
plt.show()
|
AaltoML/kalman-jax
|
kalmanjax/experiments/electricity/electricity.py
|
Python
|
apache-2.0
| 4,548
|
[
"Gaussian"
] |
dbb9550363a9e2282937e9fc727495b11671642469a4662058d832a55d21b0c7
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from petl.compat import PY2
if PY2:
from urllib import unquote_plus
else:
from urllib.parse import unquote_plus
import petl as etl
# activate tabix extension
import petlx.bio.tabix
def gff3_parse_attributes(attributes_string):
"""
Parse a string of GFF3 attributes ('key=value' pairs delimited by ';')
and return a dictionary.
"""
attributes = dict()
fields = attributes_string.split(';')
for f in fields:
if '=' in f:
key, value = f.split('=')
attributes[unquote_plus(key).strip()] = unquote_plus(value.strip())
elif len(f) > 0:
# not strictly kosher
attributes[unquote_plus(f).strip()] = True
return attributes
GFF3_HEADER = ('seqid', 'source', 'type', 'start', 'end', 'score', 'strand',
'phase', 'attributes')
def fromgff3(filename, region=None):
"""
Extract feature rows from a GFF3 file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromgff3('fixture/sample.gff')
>>> table1.look(truncate=30)
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
| seqid | source | type | start | end | score | strand | phase | attributes |
+==============+=========+===============+=======+=========+=======+========+=======+================================+
| 'apidb|MAL1' | 'ApiDB' | 'supercontig' | 1 | 643292 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL2' | 'ApiDB' | 'supercontig' | 1 | 947102 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL3' | 'ApiDB' | 'supercontig' | 1 | 1060087 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL4' | 'ApiDB' | 'supercontig' | 1 | 1204112 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL5' | 'ApiDB' | 'supercontig' | 1 | 1343552 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+-------+---------+-------+--------+-------+--------------------------------+
...
A region query string of the form '[seqid]' or '[seqid]:[start]-[end]'
may be given for the `region` argument. If given, requires the GFF3
file to be position sorted, bgzipped and tabix indexed. Requires pysam to be
installed. E.g.::
>>> # extract from a specific genome region via tabix
... table2 = etl.fromgff3('fixture/sample.sorted.gff.gz',
... region='apidb|MAL5:1289593-1289595')
>>> table2.look(truncate=30)
+--------------+---------+---------------+---------+---------+-------+--------+-------+--------------------------------+
| seqid | source | type | start | end | score | strand | phase | attributes |
+==============+=========+===============+=========+=========+=======+========+=======+================================+
| 'apidb|MAL5' | 'ApiDB' | 'supercontig' | 1 | 1343552 | '.' | '+' | '.' | {'localization': 'nuclear', 'o |
+--------------+---------+---------------+---------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL5' | 'ApiDB' | 'exon' | 1289594 | 1291685 | '.' | '+' | '.' | {'size': '2092', 'Parent': 'ap |
+--------------+---------+---------------+---------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL5' | 'ApiDB' | 'gene' | 1289594 | 1291685 | '.' | '+' | '.' | {'ID': 'apidb|MAL5_18S', 'web_ |
+--------------+---------+---------------+---------+---------+-------+--------+-------+--------------------------------+
| 'apidb|MAL5' | 'ApiDB' | 'rRNA' | 1289594 | 1291685 | '.' | '+' | '.' | {'ID': 'apidb|rna_MAL5_18S-1', |
+--------------+---------+---------------+---------+---------+-------+--------+-------+--------------------------------+
"""
if region is None:
# parse file as tab-delimited
table = etl.fromtsv(filename)
else:
# extract via tabix
table = etl.fromtabix(filename, region=region)
return (
table
.pushheader(GFF3_HEADER)
.skipcomments('#')
# ignore any row not 9 values long (e.g., trailing fasta)
.rowlenselect(9)
# parse attributes into a dict
.convert('attributes', gff3_parse_attributes)
# parse coordinates
.convert(('start', 'end'), int)
)
etl.fromgff3 = fromgff3
|
alimanfoo/petlx
|
petlx/bio/gff3.py
|
Python
|
mit
| 5,386
|
[
"pysam"
] |
75bcafb9b631b3d4a5e028f253cb5887a64040ef2ef9df3f2ef251ae40d38714
|
""" This is a test of the creation of the json dump file
"""
import unittest
import os
from DIRAC.WorkloadManagementSystem.Utilities.PilotCStoJSONSynchronizer import PilotCStoJSONSynchronizer
from DIRAC.ConfigurationSystem.private.ConfigurationClient import ConfigurationClient
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Utilities.CFG import CFG
class PilotCStoJSONSynchronizerTestCase(unittest.TestCase):
""" Base class for the PilotCStoJSONSynchronizer test cases
"""
def setUp(self):
# Creating test configuration file
self.testCfgFileName = 'test.cfg'
cfgContent = '''
DIRAC
{
Setup=TestSetup
Setups
{
TestSetup
{
WorkloadManagement=MyWM
}
}
}
Systems
{
WorkloadManagement
{
MyWM
{
URLs
{
Service1 = dips://server1:1234/WorkloadManagement/Service1
Service2 = dips://$MAINSERVERS$:5678/WorkloadManagement/Service2
}
FailoverURLs
{
Service2 = dips://failover1:5678/WorkloadManagement/Service2
}
}
}
}
Operations{
Defaults
{
Pilot
{
Project = LHCb
GenericPilotDN = /DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=doe/CN=111213/CN=Joe Doe
GenericPilotGroup = xxx_pilot
}
MainServers = gw1, gw2
}
}
Registry
{
Users
{
ttester
{
DN = /DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=ttester/CN=696969/CN=Thomas Tester
CA = /DC=ch/DC=cern/CN=CERN Grid Certification Authority
Email = thomas.tester@cern.ch
}
franekbolek
{
DN = /DC=ch/DC=voodo/OU=Organic Units/OU=Users/CN=franekbolek/CN=111122/CN=Franek Bolek
CA = /DC=ch/DC=voodo/CN=Voodo Grid Certification Authority
Email = franek.bolek@voodo.pl
}
}
Groups
{
lhcb_pilot
{
#@@-host - /DC=ch/DC=voodo/OU=computers/CN=brabra.voodo.pl
Users = franekbolek
Users += ttester
Properties = GenericPilot
Properties += LimitedDelegation
VOMSRole = /lhcb/Role=pilot
#@@-ggg@diracAdmin - 2015-07-07 13:40:55
VO = lhcb
}
}
}
'''
with open(self.testCfgFileName, 'w') as f:
f.write(cfgContent)
gConfig = ConfigurationClient(fileToLoadList=[self.testCfgFileName]) # we replace the configuration by our own one.
self.setup = gConfig.getValue('/DIRAC/Setup', '')
self.wm = gConfig.getValue('DIRAC/Setups/' + self.setup + '/WorkloadManagement', '')
def tearDown(self):
try:
os.remove(self.testCfgFileName)
except OSError:
pass
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
class Test_PilotCStoJSONSynchronizer_sync(PilotCStoJSONSynchronizerTestCase):
def test_success(self):
synchroniser = PilotCStoJSONSynchronizer()
res = synchroniser._syncJSONFile()
self.assertTrue(res['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(PilotCStoJSONSynchronizerTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Test_PilotCStoJSONSynchronizer_sync))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
chaen/DIRAC
|
WorkloadManagementSystem/Utilities/test/Test_PilotCStoJSONSynchronizer.py
|
Python
|
gpl-3.0
| 3,675
|
[
"DIRAC"
] |
0e571fc6f82ca0a856018ca1a610dde10009d6a55f249a86b00b339fb34839b4
|
# -*- coding: utf-8 -*-
"""OpenERP community orm.Model checker"""
import re
try:
import ast
from ast import NodeVisitor
except ImportError:
from flake8.util import ast
from ast import NodeVisitor
from common_checker.base_checker import BaseChecker
# To improve manage spaces
INVALID_MODEL_CLASS = ['osv', 'osv.osv']
INVALID_TRANSIENT_CLASS = ['osv_memory']
MODEL_NAMES = ['Model', 'TransientModel', 'AbstractModel', 'BaseModel']
INVALID_CLASS_NAME = re.compile(r'([A-Z][a-z0-9]+)+')
class OpenERPModelChecker(BaseChecker, ast.NodeVisitor):
"""ast.NodeVisitor subclass that check root ast.node.
It checks class validity
Please take look at ast.Node visitor for more information
about visit/visitor behavior
"""
invalid_name = INVALID_CLASS_NAME.match
O701 = 'osv.osv is deprecated, please use orm.Model'
O702 = 'osv.osv_memory is deprecated, please use orm.TransientModel'
O703 = 'orm.Model class name should NOT use CapWords convention'
def make_error_tuple(self, code, node):
"""Make an error tuple used by flake8
Uses input code to find corresponding property lookup
:param code: string of code number must be set as property
:param node: ast node source of error
:returns: (line number, col, text, type)
"""
code_text = '%s %s' % (code, getattr(self, code))
return (node.lineno, node.col_offset, code_text, type(self))
def check_model_type(self, node):
"""Check if deprecated osv.osv is used"""
faulty = [x for x in node.bases if getattr(x, 'attr', None)
in INVALID_MODEL_CLASS]
for fault in faulty:
self.errors.append(self.make_error_tuple('O701', node))
def check_model_transient_type(self, node):
"""Check if deprecated osv.osv_memory is used"""
faulty = [x for x in node.bases if getattr(x, 'attr', None)
in INVALID_TRANSIENT_CLASS]
for fault in faulty:
self.errors.append(self.make_error_tuple('O702', node))
def check_model_name(self, node):
"""Check is Model name follows conventions"""
if any(x for x in node.bases if getattr(x, 'attr', None) in MODEL_NAMES):
if self.invalid_name(node.name):
self.errors.append(self.make_error_tuple('O703', node))
def visit_ClassDef(self, node):
"""Visits and validate orm.Model definition"""
self.check_model_type(node)
self.check_model_transient_type(node)
self.check_model_name(node)
|
nbessi/openerp-conventions
|
v7/model_checker.py
|
Python
|
mit
| 2,572
|
[
"VisIt"
] |
d1a31fb5b5c9884a992983191866b1c6b289189bedf38cd8d9bdd696a14dbdeb
|
from copy import deepcopy
from scipy import fftpack
from lib2.Measurement import *
from lib2.IQPulseSequence import IQPulseBuilder
class DigitizerWithPowerSweepMeasurementBase(Measurement):
"""
Class for measurements with a Spectrum digitizer and power sweep
This one must do:
create Measurement object, set up all devices and take them from the class;
set up all the parameters
make measurements:
-- sweep power/if_freq of one/another/both of generators
and/or central if_freq of EXA and measure single trace / list sweep for certain frequencies
--
"""
def __init__(self, name, sample_name, measurement_result_class, **devs_aliases):
"""
Parameters
----------
name : str
name of bias measurement
sample_name : str
name of measured sample
measurement_result_class : MeasurementResult
measurement result for appropriate data handling and visualization for thi measurement
devs_aliases : dict[str, Any]
same as for Measurement class
Notes
---------
vna and bias source is optional
list_devs_names: {exa_name: default_name, src_plus_name: default_name,
src_minus_name: default_name, vna_name: default_name, current_name: default_name}
"""
self._dig = devs_aliases.pop("dig", None)[0]
super().__init__(name, sample_name, devs_aliases)
self._devs_aliases = list(devs_aliases.keys())
self._measurement_result = measurement_result_class(name, sample_name)
# measurement class specific parameters section
self._cal = None
self._adc_parameters = None
self._lo_parameters = None
self._waveform_functions = {"CONTINUOUS TWO WAVES": self.get_two_continuous_waves,
"CONTINUOUS WAVE": self.get_continuous_wave,
"CONTINUOUS TWO WAVES FG": self.get_two_continuous_waves_fg}
self._chosen_waveform_function = self._waveform_functions["CONTINUOUS TWO WAVES"]
self._delta = 0
self._modulation_array = None
self._sweep_powers = None
self.pulse_builder = None
self._start_idx = None
self._end_idx = None
self._frequencies = None
def set_fixed_parameters(self, waveform_type, awg_parameters=[], adc_parameters=[], freq_limits=(), lo_parameters=[]):
"""
Parameters
----------
waveform_type : str
Choose the desired mode of operation
One of the following is possible:
"CONTINUOUS TWO WAVES"
"CONTINUOUS WAVE"
"CONTINUOUS TWO WAVES FG"
awg_parameters : list[dict[str,Any]]
maybe it is iqawg parameters?
adc_parameters : list[dict[str, Any]]
"channels" : [1], # a list of channels to measure
"ch_amplitude": 200, # amplitude for every channel
"dur_seg": 100e-6, # duration of a segment in us
"n_avg": 80000, # number of averages
"n_seg": 2, # number of segments
"oversampling_factor": 2, # sample_rate = max_sample_rate / oversampling_factor
"pretrigger": 32,
freq_limits : tuple[float]
fourier limits for visualization
lo_parameters : list[dict[str, Any]]
Returns
-------
None
Examples
________
.ipynb
name = "CWM_P";
sample_name = "QOP_2_probe";
wmBase = FourWaveMixingBase(name, sample_name, dig=[dig], lo=[exg], iqawg=[iqawg]);
dig.stop_card()
#awg.trigger_output_config("OFF", channel=channelI)
#awg.trigger_output_config("ON", channel=channelQ)
adc_pars = {"channels" : [1], # a list of channels to measure
"ch_amplitude": 200, # amplitude for every channel
"dur_seg": 50e-6, # duration of a segment in us
"n_avg": 20000, # number of averages
"n_seg": 8, # number of segments
"oversampling_factor": 4, # sample_rate = max_sample_rate / oversampling_factor
"pretrigger": 32,
}
lo_pars = { "power": lo_power,
"if_freq": lo_freq,
}
wmBase.set_fixed_parameters(delta = 20e3, awg_parameters=[{"calibration": ro_cal}],
adc_parameters=[adc_pars], freq_limits=(19.5, 20.5), lo_parameters=[lo_pars])
wmBase.set_swept_parameters(powers_limits=(-40, 0), n_powers=201)
#awg.trigger_output_config("ON", channel=channelQ)
"""
self._chosen_waveform_function = self._waveform_functions[waveform_type]
if len(awg_parameters) > 0:
self._cal = awg_parameters[0]["calibration"]
self._amplitudes = deepcopy(self._cal._if_amplitudes)
self.pulse_builder = WMPulseBuilder(self._cal)
if len(adc_parameters) > 0:
self._adc_parameters = adc_parameters[0]
self._dig.set_oversampling_factor(self._adc_parameters["oversampling_factor"])
self._segment_size_optimal = int(self._adc_parameters["dur_seg"] * self._dig.get_sample_rate())
self._segment_size = self._segment_size_optimal + 32 - self._segment_size_optimal % 32
self._bufsize = self._adc_parameters["n_seg"] * self._segment_size * 4 * len(self._adc_parameters["channels"])
self._dig.setup_averaging_mode(self._adc_parameters["channels"], self._adc_parameters["ch_amplitude"],
self._adc_parameters["n_seg"], self._segment_size,
self._adc_parameters["pretrigger"],
self._adc_parameters["n_avg"])
self._freq_limits = freq_limits
# optimal size calculation
self.nfft = fftpack.helper.next_fast_len(self._adc_parameters["n_seg"] * self._segment_size_optimal)
# obtaining frequencies (frequencies is duplicating)
xf = np.fft.fftfreq(self.nfft, 1 / self._dig.get_sample_rate()) / 1e6
self._start_idx = np.searchsorted(xf[:self.nfft // 2 - 1], self._freq_limits[0])
self._end_idx = np.searchsorted(xf[:self.nfft // 2 - 1], self._freq_limits[1])
self._frequencies = xf[self._start_idx:self._end_idx + 1]
self._measurement_result.get_context().update({"calibration_results": self._cal.get_optimization_results(), \
"radiation_parameters": self._cal.get_radiation_parameters()})
super().set_fixed_parameters(iqawg=awg_parameters, lo=lo_parameters)
if waveform_type == "CONTINUOUS TWO WAVES FG":
self._iqawg[0].output_continuous_two_freq_IQ_waves(self._delta)
def set_swept_parameters(self, powers_limits, n_powers):
self._sweep_powers = np.linspace(*powers_limits, n_powers)
swept_parameters = {"powers at $\\omega_{p}$": (self._set_power, self._sweep_powers)}
super().set_swept_parameters(**swept_parameters)
par_name = list(swept_parameters.keys())[0]
self._measurement_result.set_parameter_name(par_name)
# self._sources_on()
def close_devs(self, *devs_to_close):
if "spcm" in devs_to_close:
self._dig.close()
Measurement.close_devs(devs_to_close)
def _sources_on(self):
iq_sequence = self.pulse_builder.add_zero_pulse(10000).build()
self._iqawg[0].output_pulse_sequence(iq_sequence)
self._lo[0].set_output_state("ON")
def _sources_off(self):
iq_sequence = self.pulse_builder.add_zero_pulse(10000).build()
self._iqawg[0].output_pulse_sequence(iq_sequence)
self._lo[0].set_output_state("OFF")
def srcs_power_calibration(self):
"""
To define powers to set in setter (not implemented yet)
"""
pass
def _set_power(self, power):
k = np.power(10, power / 20)
self._chosen_waveform_function(k)
# iq_sequence = self._chosen_waveform_function(k)
# self._iqawg[0].output_pulse_sequence(iq_sequence)
def get_two_continuous_waves(self, k_ampl):
duration = 2e9 * self._adc_parameters["dur_seg"]
return self.pulse_builder.add_simultaneous_pulses(duration, self._delta, amplitude=k_ampl).build()
def get_two_continuous_waves_fg(self, k_ampl):
self._iqawg[0].change_amplitudes_of_cont_IQ_waves(k_ampl)
self._iqawg[0].update_modulation_coefficient_of_IQ_waves(2.)
def get_continuous_wave(self, k_ampl):
duration = 1e9 * self._adc_parameters["dur_seg"]
return self.pulse_builder.add_sine_pulse(duration, amplitude_mult=k_ampl).build()
def _prepare_measurement_result_data(self, parameter_names, parameters_values):
measurement_data = super()._prepare_measurement_result_data(parameter_names, parameters_values)
measurement_data["if_freq"] = self._frequencies
return measurement_data
def _recording_iteration(self):
data = self._dig.measure(self._bufsize) # data in mV
# deleting extra samples from segments
a = np.arange(self._segment_size_optimal, len(data), self._segment_size)
b = np.concatenate([a + i for i in range(0, self._segment_size - self._segment_size_optimal)])
data_cut = np.delete(data, b)
yf = np.abs(np.fft.fft(data_cut, self.nfft))[self._start_idx:self._end_idx + 1] * 2 / self.nfft
self._measurement_result._iter += 1
return yf
class WMPulseBuilder(IQPulseBuilder):
"""IQ Pulse builder for wave mixing and for other measurements for a single qubit in line """
def add_simultaneous_pulses(self, duration, delta_freq, phase=0, amplitude=1,
window="rectangular", hd_amplitude=0):
"""
Adds two simultaneous pulses with amplitudes defined by the iqmx_calibration at frequencies
(f_lo-f_if) ± delta_freq (or simpler w0 ± dw) and some phase to the sequence. All sine pulses will be parts
of the same continuous wave at if_freq of f_if
Parameters:
-----------
duration: float, ns
Duration of the pulse in nanoseconds. For pulses other than rectangular
will be interpreted as t_g (see F. Motzoi et al. PRL (2009))
delta_freq: int, Hz
The shift of two sidebands from the central if_freq. Ought to be > 0 Hz
phase: float, rad
Adds a relative phase to the outputted trace.
amplitude: float
Calibration if_amplitudes will be scaled by the
amplitude_value.
window: string
List containing the name and the description of the modulating
window of the pulse.
Implemented modulations:
"rectangular"
Rectangular window.
"gaussian"
Gaussian window, see F. Motzoi et al. PRL (2009).
"hahn"
Hahn sin^2 window
hd_amplitude: float
correction for the Half Derivative method, theoretically should be 1
"""
freq_m = self._iqmx_calibration._if_frequency - delta_freq
freq_p = self._iqmx_calibration._if_frequency + delta_freq
if_offsets = self._iqmx_calibration._if_offsets
if_amplitudes = self._iqmx_calibration._if_amplitudes
sequence_m = IQPulseBuilder(self._iqmx_calibration).add_sine_pulse(duration, phase, amplitude, window,
hd_amplitude,
freq_m, if_offsets/2, if_amplitudes/2).build()
sequence_p = IQPulseBuilder(self._iqmx_calibration).add_sine_pulse(duration, phase, amplitude, window,
hd_amplitude,
freq_p, if_offsets/2, if_amplitudes/2).build()
final_seq = sequence_m.direct_add(sequence_p)
self._pulse_seq_I += final_seq._i
self._pulse_seq_Q += final_seq._q
return self
|
vdrhtc/Measurement-automation
|
lib2/powerSweepMeasurementBase.py
|
Python
|
gpl-3.0
| 12,393
|
[
"Gaussian"
] |
26b8d21de225c8f6d3d67ae66c85c36fec6d24148f9e5cb12a92220f93a17374
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Douglas S. Blank
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
#
"""
Display references for any object
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.utils.alive import probably_alive
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.datehandler import displayer
from gramps.gen.config import config
def run(database, document, date):
"""
Display people probably alive and their ages on a particular date.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
if not date.get_valid():
sdoc.paragraph("Date is not a valid date.")
return
# display the title
if date.get_day_valid():
sdoc.title(_("People and their ages the %s") %
displayer.display(date))
else:
sdoc.title(_("People and their ages on %s") %
displayer.display(date))
stab.columns(_("Person"), _("Age"), _("Status")) # Actual Date makes column unicode
alive_matches = 0
dead_matches = 0
for person in sdb.all_people():
alive, birth, death, explain, relative = \
probably_alive(person, database, date, return_range=True)
# Doesn't show people probably alive but no way of figuring an age:
if alive:
if birth:
diff_span = (date - birth)
stab.row(person, str(diff_span), _("Alive: %s") % explain)
stab.row_sort_val(1, int(diff_span))
else:
stab.row(person, "", _("Alive: %s") % explain)
stab.row_sort_val(1, 0)
alive_matches += 1
else: # not alive
if birth:
diff_span = (date - birth)
stab.row(person, str(diff_span), _("Deceased: %s") % explain)
stab.row_sort_val(1, int(diff_span))
else:
stab.row(person, "", _("Deceased: %s") % explain)
stab.row_sort_val(1, 1)
dead_matches += 1
document.has_data = (alive_matches + dead_matches) > 0
sdoc.paragraph(_("\nLiving matches: %(alive)d, "
"Deceased matches: %(dead)d\n") %
{'alive' : alive_matches, 'dead' : dead_matches})
if document.has_data:
stab.write(sdoc)
sdoc.paragraph("")
def get_event_date_from_ref(database, ref):
date = None
if ref:
handle = ref.get_reference_handle()
if handle:
event = database.get_event_from_handle(handle)
if event:
date = event.get_date_object()
return date
|
SNoiraud/gramps
|
gramps/plugins/quickview/ageondate.py
|
Python
|
gpl-2.0
| 3,562
|
[
"Brian"
] |
d76b70c4e32dec6fa30f313288302a84f627880b061834f7175e7d8997c94f1c
|
from src.preprocess import DataPreprocessing
import numpy as np
from src.NaiveBayesClassifier import NaiveBayesClassifier
import pandas as pd
import time
def preprocess_data(filepath, rmv_missing_values=True):
"""
This function loads the data from 'filepath' and handles the missing value in the data.
"""
process = DataPreprocessing()
df = process.loadfile(filepath)
df.drop('fnlwgt', axis=1, inplace=True)
df.drop('education-num', axis=1, inplace=True)
if rmv_missing_values:
df_processed = process.append_y(process.handle_missing_values(df, process.remove_missing_values))
else:
df_processed = process.append_y(process.handle_missing_values(df, process.allocate_category_to_missing_values))
return df_processed
def evaluate_model(result):
# condition 1 and predicted 1
true_pos = len(result[(result['Y'] == 1) & (result['Y_posteriori_1'] > result['Y_posteriori_0'])])
# condition 0 and predicted 0
true_neg = len(result[(result['Y'] == 0) & (result['Y_posteriori_1'] < result['Y_posteriori_0'])])
# condition 1 and predicted 0
false_neg = len(result[(result['Y'] == 1) & (result['Y_posteriori_1'] < result['Y_posteriori_0'])])
# condition 0 and predicted 1
false_pos = len(result[(result['Y'] == 0) & (result['Y_posteriori_1'] > result['Y_posteriori_0'])])
print("\t\t\t\t\tResults")
print("\n\t\t\t\t\tConfusion Matrix")
print("===============================================================")
print("\t\t\t Predictions")
print("\tsalary>50k\t\t\tsalary<=50k")
print("\t\t" + str(true_pos) + "\t\t\t\t" + str(false_neg) + "\t\tsalary>50k")
print("\t\t" + str(false_pos) + "\t\t\t\t" + str(true_neg) + "\t\tsalary<=50k")
print("===============================================================")
print("\ntotal rows processed = " + str(true_neg + true_pos + false_neg + false_pos))
print("\ntotal rows processed accurately = " + str(true_neg + true_pos))
# computing performance measures
accuracy = (true_pos + true_neg) / (true_neg + true_pos + false_neg + false_pos)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
f1_measure = (2 * precision * recall) / (precision + recall)
precision_0 = true_neg / (true_neg + false_neg)
recall_0 = true_neg / (true_neg + false_pos)
f1_measure_0 = (2 * precision_0 * recall_0) / (precision_0 + recall_0)
print('\nAccuracy = ' + str(accuracy))
print("\nClass = 'salary>50K'")
print('\nPrecision = ' + str(precision)
+ ',\tRecall = ' + str(recall) + ",\tF1 Measure = " + str(f1_measure))
print("\nClass = 'salary<=50K'")
print('\nPrecision = ' + str(precision_0)
+ ',\tRecall = ' + str(recall_0) + ",\tF1 Measure = " + str(f1_measure_0))
def main():
"""
main driver function
"""
binning = [True, False]
rmv_missing_val = [True, False]
# run for all possible combinations of {binning, gaussian} and
# {remove missing values, assign category to missing values}
for rmv_bool in rmv_missing_val:
for bin_bool in binning:
print("----------------------------------Start------------------------------------------")
print("\n\t\tBinning = " + str(bin_bool) + " and Removing missing value = " + str(rmv_bool))
# get start time
st_time = time.time()
# preprocess data
df = preprocess_data("../data/adult_censusdata.txt", rmv_bool)
# define categorical and numeric attributes
col_categorical = ['Workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
'native-country']
col_numeric = {'Age': 5, 'capital-gains': 5075, 'capital-loss': 270, 'hours-per-week': 10}
# define dependent variable
dependent_var = 'Y'
# split dataset for 10 fold cross validation
KFold = np.array_split(df, 10)
tempresult = []
print("Running 10 fold cross validation...")
# run 10 fold cross validation
for i in range(10):
train_data = pd.DataFrame()
# test on all data sets except for ith one
for k in range(10):
if k != i:
# accumulate train data
train_data = train_data.append(KFold[k])
# get test data
test_data = KFold[i]
# define NaiveBayes classifier
classifier = NaiveBayesClassifier(col_categorical, col_numeric, bin_bool)
# generate Naive Bayes model
model = classifier.train_model(train_data, dependent_var)
# test model on test data set.
tempresult.append(classifier.test_model(model, test_data))
# accumulate all 10 fold crossvalidation results
result = pd.concat(tempresult)
print("Evaluating model...")
# evaluate the model's results
evaluate_model(result)
print("-------------------------------Run time = {} secs-------------------".format(time.time() - st_time) + "\n")
if __name__ == '__main__':
main()
|
Prateek-Gupta1/NaiveBayesianPython
|
src/main.py
|
Python
|
mit
| 5,308
|
[
"Gaussian"
] |
81e734db7c01d0f2c2410d33bc2820a00455f7464ab60559c397e73e6579db73
|
from MakeData import full_path
from MakeData import write_synapses
from MakeData import write_neurons
from MakeData import make_neurons
from MakeData import make_synapses
from MakeData import make_centers
from MakeData import make_bounds
import unittest as ut
import logging as log
import numpy as np
import bfly
import sys
class TestDatabase(ut.TestCase):
""" set up tests for :class:`DatabaseLayer.Zodb`
"""
DB_PATH = None
DB_TYPE = 'Nodb'
RUNTIME = bfly.UtilityLayer.RUNTIME()
# Log to the command line
log_info = {
'stream': sys.stdout,
'level': log.INFO
}
def test_database(self):
""" test that :mod:`DatabaseLayer` can start \
and successfully deliver responses at a reasonable speed
"""
# Log to command line
log.basicConfig(**self.log_info)
# Neuron and synapse counts
neuron_n = 100
synapse_n = 1000
# Random seeds
neuron_seed = 8675309
synapse_seed = 525600
# Set the channel and dataset paths
channel_path = full_path('data/channel.h5')
data_path = full_path('data')
# shape, and type for temp file
zyx_shape = [250, 2500, 2500]
ids_info = np.iinfo(np.uint32)
zyx_info = np.iinfo(np.uint32)
# Make neurons
neuron_ids, other_ids = make_neurons(neuron_n, ids_info, neuron_seed)
neuron_centers = make_centers(neuron_n, zyx_shape, zyx_info, neuron_seed)
# Make synapses
synapse_ids = np.arange(synapse_n, dtype=ids_info.dtype)
synapse_pairs = make_synapses(synapse_n, neuron_ids, ids_info, synapse_seed)
synapse_centers = make_centers(synapse_n, zyx_shape, zyx_info, synapse_seed)
# Get constants for input files
k_files = self.RUNTIME.DB.FILE
# Save synapse-connections.json
write_synapses(k_files, data_path, synapse_pairs, synapse_centers)
# Save neuron-soma.json
write_neurons(k_files, data_path, neuron_ids, neuron_centers)
# Make a dummy database
db_class = getattr(bfly.DatabaseLayer, self.DB_TYPE)
db = db_class(self.DB_PATH, self.RUNTIME)
# Make a dummy config
temp_config = {
'experiments': [{
'name': 'a',
'samples': [{
'name': 'b',
'datasets': [{
'path': data_path,
'name': 'c',
'channels': [{
'path': channel_path,
'name': 'd'
}]
}]
}]
}]
}
# Load the configuraton json files
db.load_config(temp_config)
# Get constants for the database
k_tables = self.RUNTIME.DB.TABLE
s_table = k_tables.SYNAPSE.NAME
n_table = k_tables.NEURON.NAME
####
# S1 : is_synapse
####
msg = "is_synapse: ID {} returns {}"
# Should be synapses
for syn in synapse_ids:
res = db.is_synapse(s_table, channel_path, syn)
self.assertTrue(res, msg.format(syn, res))
# Should not be synapses
for syn in range(synapse_ids[-1]+1, 2*synapse_n):
res = db.is_synapse(s_table, channel_path, syn)
self.assertFalse(res, msg.format(syn, res))
####
# S5 : is_neuron
####
msg = "is_neuron: ID {} returns {}"
# Should be neruons
for nrn in neuron_ids:
res = db.is_neuron(n_table, channel_path, nrn)
self.assertTrue(res, msg.format(nrn, res))
# Should not be neurons
for nrn in other_ids:
res = db.is_neuron(n_table, channel_path, nrn)
self.assertFalse(res, msg.format(nrn, res))
# Get the list of keys for coordinates
k_axes = k_tables.ALL.POINT_LIST
MAX_SCALE = 10
####
# S3 : synapse_keypoint
####
msg = """synapse_keypoint: ID {0} returns {1}={2}.
It should have {1}={3} at scale {4}.
"""
# Should match centers
for syn, cen in zip(synapse_ids, synapse_centers):
# Use an arbitrary scale
scale = 2 ** (syn % MAX_SCALE)
center = cen // [1, scale, scale]
result = db.synapse_keypoint(s_table, channel_path, syn, scale)
# Error checker
def asserter(i):
ideal = center[i]
axis = k_axes[i]
res = result[axis]
# Assert result has ideal value
error = msg.format(syn, axis, res, ideal, scale)
self.assertEqual(res, ideal, error)
# Assert all axes are expected
map(asserter, range(3))
####
# S7 : neuron_keypoint
####
msg = """neuron_keypoint: ID {0} returns {1}={2}.
It should have {1}={3} at scale {4}.
"""
# Should match centers
for nrn, cen in zip(neuron_ids, neuron_centers):
# Use an arbitrary scale
scale = 2 ** (nrn % MAX_SCALE)
center = cen // [1, scale, scale]
result = db.neuron_keypoint(n_table, channel_path, nrn, scale)
# Error checker
def asserter(i):
ideal = center[i]
axis = k_axes[i]
res = result[axis]
# Assert result has ideal value
error = msg.format(nrn, axis, res, ideal, scale)
self.assertEqual(res, ideal, error)
# Assert all axes are expected
map(asserter, range(3))
k_links = self.RUNTIME.FEATURES.LINKS
k_sides = [k_links.PRE.NAME, k_links.POST.NAME]
####
# S3 : synapse_parent
####
msg = """synapse_parent:
ID {0} shows a {1} of {2}, but
ID {0} should have {1} of {3}.
"""
for syn, pair in zip(synapse_ids, synapse_pairs):
result = db.synapse_parent(s_table, channel_path, syn)
# Error checker
def asserter(i):
ideal = pair[i]
side = k_sides[i]
res = result[side]
# Assert result has ideal value
error = msg.format(syn, side, res, ideal)
self.assertEqual(res, ideal, error)
# Assert all axes are expected
map(asserter, range(2))
####
# S8 : neuron_children
####
msg = """neuron_children:
In bounds from {4} to {5},
ID {0} has \033[91m{2}\033[0m part of synapse {1}, but
ID {0} should have \033[92m{3}\033[0m part of synapse {1}.
"""
# Keywords for logging
k_words = ['no','the 1st','the 2nd','each']
# Check for all neurons
for nrn in neuron_ids:
# Get all synapses with neuron
is_nrn = synapse_pairs == nrn
is_syn = is_nrn.any(1)
ideal_nrn = is_nrn[is_syn]*[1,2]
# Get synapse relations to neuron
ideal_ids = synapse_ids[is_syn]
ideal_kinds = np.sum(ideal_nrn, 1)
ideal_centers = synapse_centers[is_syn]
# Combine ids with kind of relation
ideal_syns = np.c_[ideal_ids, ideal_kinds]
# Error checker
def asserter(start, stop):
# Get ideal values within bounds
above = ideal_centers >= start
below = ideal_centers < stop
# Get synapse ids and kinds
bound = (above & below).all(1)
ideal = dict(ideal_syns[bound])
# Get results from the database
res = db.neuron_children(s_table, channel_path, nrn, start, stop)
# Test all keys in ideal or result
for syn in set(ideal.keys()) | set(res.keys()):
# Get both keywords
res_word = k_words[res.get(syn, 0)]
ideal_word = k_words[ideal.get(syn, 0)]
# Assert both labels are equal
error = msg.format(nrn, syn, res_word, ideal_word, start, stop)
self.assertEqual(res_word, ideal_word, error)
# Test whole image and random bounds
any_bounds = make_bounds(zyx_shape, zyx_info, nrn)
asserter([0,0,0], zyx_shape)
asserter(*any_bounds)
if __name__ == '__main__':
ut.main()
|
Rhoana/butterfly
|
tests/TestDatabase.py
|
Python
|
mit
| 8,555
|
[
"NEURON"
] |
48f87650f3e5e6298cf3cbb86ecf3dd90c74408381e652c727e5f1d0dff893a0
|
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Robert Hammelrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Some parts of the software are a port of code provided by Rinky-Dink Electronics, Henning Karlsen,
# with the following copyright notice:
#
## Copyright (C)2015 Rinky-Dink Electronics, Henning Karlsen. All right reserved
## This library is free software; you can redistribute it and/or
## modify it under the terms of the CC BY-NC-SA 3.0 license.
## Please see the included documents for further information.
#
# Class supporting TFT LC-displays with a parallel Interface
# First example: Controller SSD1963 with a 4.3" or 7" display
#
# The minimal connection is:
# X1..X8 for data, Y9 for /Reset, Y10 for /RD, Y11 for /WR and Y12 for /RS
# Then LED must be hard tied to Vcc and /CS to GND.
#
import pyb, stm
from uctypes import addressof
from tft.driver import TFT_io
import gc
# define constants
#
RESET = const(1 << 10) ## Y9
RD = const(1 << 11) ## Y10
WR = const(0x01) ## Y11
D_C = const(0x02) ## Y12
LED = const(1 << 8) ## Y3
POWER = const(1 << 9) ## Y4
## CS is not used and must be hard tied to GND
PORTRAIT = const(1)
LANDSCAPE = const(0)
class TFT:
def __init__(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
self.tft_init(controller, lcd_type, orientation, v_flip, h_flip)
def tft_init(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
#
# For convenience, define X1..X1 and Y9..Y12 as output port using thy python functions.
# X1..X8 will be redefind on the fly as Input by accessing the MODER control registers
# when needed. Y9 is treate seperately, since it is used for Reset, which is done at python level
# since it need long delays anyhow, 5 and 15 ms vs. 10 µs.
#
# Set TFT general defaults
self.controller = controller
self.lcd_type = lcd_type
self.orientation = orientation
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = 0 # flip blue/red
self.rc_flip = 0 # flip row/column
self.setColor((255, 255, 255)) # set FG color to white as can be.
self.setBGColor((0, 0, 0)) # set BG to black
self.bg_buf = bytearray()
#
self.pin_led = None # deferred init Flag
self.power_control = power_control
if self.power_control:
# special treat for Power Pin
self.pin_power = pyb.Pin("Y4", pyb.Pin.OUT_PP)
self.power(True) ## switch Power on
#
pyb.delay(10)
# this may have to be moved to the controller specific section
if orientation == PORTRAIT:
self.setXY = TFT_io.setXY_P
self.drawPixel = TFT_io.drawPixel_P
else:
self.setXY = TFT_io.setXY_L
self.drawPixel = TFT_io.drawPixel_L
self.swapbytes = TFT_io.swapbytes
self.swapcolors = TFT_io.swapcolors
# ----------
for pin_name in ["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8",
"Y10", "Y11", "Y12"]:
pin = pyb.Pin(pin_name, pyb.Pin.OUT_PP) # set as output
pin.value(1) ## set high as default
# special treat for Reset
self.pin_reset = pyb.Pin("Y9", pyb.Pin.OUT_PP)
# Reset the device
self.pin_reset.value(1) ## do a hard reset
pyb.delay(10)
self.pin_reset.value(0) ## Low
pyb.delay(20)
self.pin_reset.value(1) ## set high again
pyb.delay(20)
#
# Now initialiize the LCD
# This is for the SSD1963 controller and two specific LCDs. More may follow.
# Data taken from the SSD1963 data sheet, SSD1963 Application Note and the LCD Data sheets
#
if controller == "SSD1963": # 1st approach for 480 x 272
TFT_io.tft_cmd_data(0xe2, bytearray(b'\x1d\x02\x54'), 3) # PLL multiplier, set PLL clock to 100M
# N=0x2D for 6.5MHz, 0x1D for 10MHz crystal
# PLLClock = Crystal * (Mult + 1) / (Div + 1)
# The intermediate value Crystal * (Mult + 1) must be between 250MHz and 750 MHz
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x01'), 1) # PLL Enable
pyb.delay(10)
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x03'), 1)
pyb.delay(10)
TFT_io.tft_cmd(0x01) # software reset
pyb.delay(10)
#
# Settings for the LCD
#
# The LCDC_FPR depends on PLL clock and the reccomended LCD Dot clock DCLK
#
# LCDC_FPR = (DCLK * 1048576 / PLLClock) - 1
#
# The other settings are less obvious, since the definitions of the SSD1963 data sheet and the
# LCD data sheets differ. So what' common, even if the names may differ:
# HDP Horizontal Panel width (also called HDISP, Thd). The value store in the register is HDP - 1
# VDP Vertical Panel Width (also called VDISP, Tvd). The value stored in the register is VDP - 1
# HT Total Horizontal Period, also called HP, th... The exact value does not matter
# VT Total Vertical Period, alco called VT, tv, .. The exact value does not matter
# HPW Width of the Horizontal sync pulse, also called HS, thpw.
# VPW Width of the Vertical sync pulse, also called VS, tvpw
# Front Porch (HFP and VFP) Time between the end of display data and the sync pulse
# Back Porch (HBP and VBP Time between the start of the sync pulse and the start of display data.
# HT = FP + HDP + BP and VT = VFP + VDP + VBP (sometimes plus sync pulse width)
# Unfortunately, the controller does not use these front/back porch times, instead it uses an starting time
# in the front porch area and defines (see also figures in chapter 13.3 of the SSD1963 data sheet)
# HPS Time from that horiz. starting point to the start of the horzontal display area
# LPS Time from that horiz. starting point to the horizontal sync pulse
# VPS Time from the vert. starting point to the first line
# FPS Time from the vert. starting point to the vertical sync pulse
#
# So the following relations must be held:
#
# HT > HDP + HPS
# HPS >= HPW + LPS
# HPS = Back Porch - LPS, or HPS = Horizontal back Porch
# VT > VDP + VPS
# VPS >= VPW + FPS
# VPS = Back Porch - FPS, or VPS = Vertical back Porch
#
# LPS or FPS may have a value of zero, since the length of the front porch is detemined by the
# other figures
#
# The best is to start with the recomendations of the lCD data sheet for Back porch, grab a
# sync pulse with and the determine the other, such that they meet the relations. Typically, these
# values allow for some ambuigity.
#
if lcd_type == "LB04301": # Size 480x272, 4.3", 24 Bit, 4.3"
#
# Value Min Typical Max
# DotClock 5 MHZ 9 MHz 12 MHz
# HT (Hor. Total 490 531 612
# HDP (Hor. Disp) 480
# HBP (back porch) 8 43
# HFP (Fr. porch) 2 8
# HPW (Hor. sync) 1
# VT (Vert. Total) 275 288 335
# VDP (Vert. Disp) 272
# VBP (back porch) 2 12
# VFP (fr. porch) 1 4
# VPW (vert. sync) 1 10
#
# This table in combination with the relation above leads to the settings:
# HPS = 43, HPW = 8, LPS = 0, HT = 531
# VPS = 14, VPW = 10, FPS = 0, VT = 288
#
self.disp_x_size = 479
self.disp_y_size = 271
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x01\x70\xa3'), 3) # PLL setting for PCLK
# (9MHz * 1048576 / 100MHz) - 1 = 94371 = 0x170a3
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x20, # 24 Color bits, HSync/VSync low, No Dithering
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x02\x13\x00\x2b\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 531 HPS 43 HPW=Sync pulse 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x01\x20\x00\x0e\x0a\x00\x00'), 7)
# VSYNC, Set VT 288 VPS 14 VPW 10 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
elif lcd_type == "AT070TN92": # Size 800x480, 7", 18 Bit, lower color bits ignored
#
# Value Min Typical Max
# DotClock 26.4 MHz 33.3 MHz 46.8 MHz
# HT (Hor. Total 862 1056 1200
# HDP (Hor. Disp) 800
# HBP (back porch) 46 46 46
# HFP (Fr. porch) 16 210 254
# HPW (Hor. sync) 1 40
# VT (Vert. Total) 510 525 650
# VDP (Vert. Disp) 480
# VBP (back porch) 23 23 23
# VFP (fr. porch) 7 22 147
# VPW (vert. sync) 1 20
#
# This table in combination with the relation above leads to the settings:
# HPS = 46, HPW = 8, LPS = 0, HT = 1056
# VPS = 23, VPW = 10, VPS = 0, VT = 525
#
self.disp_x_size = 799
self.disp_y_size = 479
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x05\x53\xf6'), 3) # PLL setting for PCLK
# (33.3MHz * 1048576 / 100MHz) - 1 = 349174 = 0x553f6
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x00, # 18 Color bits, HSync/VSync low, No Dithering/FRC
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x04\x1f\x00\x2e\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 1056 HPS 46 HPW 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x02\x0c\x00\x17\x08\x00\x00'), 7)
# VSYNC, Set VT 525 VPS 23 VPW 08 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
else:
print("Wrong Parameter lcd_type: ", lcd_type)
return
TFT_io.tft_cmd_data_AS(0xBA, bytearray(b'\x0f'), 1) # GPIO[3:0] out 1
TFT_io.tft_cmd_data_AS(0xB8, bytearray(b'\x07\x01'), 1) # GPIO3=input, GPIO[2:0]=output
TFT_io.tft_cmd_data_AS(0xf0, bytearray(b'\x00'), 1) # Pixel data Interface 8 Bit
TFT_io.tft_cmd(0x29) # Display on
TFT_io.tft_cmd_data_AS(0xbe, bytearray(b'\x06\xf0\x01\xf0\x00\x00'), 6)
# Set PWM for B/L
TFT_io.tft_cmd_data_AS(0xd0, bytearray(b'\x0d'), 1) # Set DBC: enable, agressive
else:
print("Wrong Parameter controller: ", controller)
return
#
# Set character printing defaults
#
self.text_font = None
self.setTextStyle(self.color, self.BGcolor, 0, None, 0)
#
# Init done. clear Screen and switch BG LED on
#
self.text_x = self.text_y = self.text_yabs = 0
self.clrSCR() # clear the display
# self.backlight(100) ## switch BG LED on
#
# Return screen dimensions
#
def getScreensize(self):
if self.orientation == LANDSCAPE:
return (self.disp_x_size + 1, self.disp_y_size + 1)
else:
return (self.disp_y_size + 1, self.disp_x_size + 1)
#
# set backlight brightness
#
def backlight(self, percent):
# deferred init of LED PIN
if self.pin_led is None:
# special treat for BG LED
self.pin_led = pyb.Pin("Y3", pyb.Pin.OUT_PP)
self.led_tim = pyb.Timer(4, freq=500)
self.led_ch = self.led_tim.channel(3, pyb.Timer.PWM, pin=self.pin_led)
percent = max(0, min(percent, 100))
self.led_ch.pulse_width_percent(percent) # set LED
#
# switch power on/off
#
def power(self, onoff):
if self.power_control:
if onoff:
self.pin_power.value(True) ## switch power on or off
else:
self.pin_power.value(False)
#
# set the tft flip modes
#
def set_tft_mode(self, v_flip = False, h_flip = False, c_flip = False, orientation = LANDSCAPE):
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = c_flip # flip blue/red
self.orientation = orientation # LANDSCAPE/PORTRAIT
TFT_io.tft_cmd_data_AS(0x36,
bytearray([(self.orientation << 5) |(self.c_flip << 3) | (self.h_flip & 1) << 1 | (self.v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
#
# get the tft flip modes
#
def get_tft_mode(self):
return (self.v_flip, self.h_flip, self.c_flip, self.orientation) #
#
# set the color used for the draw commands
#
def setColor(self, fgcolor):
self.color = fgcolor
self.colorvect = bytearray(self.color) # prepare byte array
#
# Set BG color used for the draw commands
#
def setBGColor(self, bgcolor):
self.BGcolor = bgcolor
self.BGcolorvect = bytearray(self.BGcolor) # prepare byte array
self.BMPcolortable = bytearray([self.BGcolorvect[2], # create colortable
self.BGcolorvect[1], self.BGcolorvect[0],0,
self.colorvect[2], self.colorvect[1], self.colorvect[0],0])
#
# get the color used for the draw commands
#
def getColor(self):
return self.color
#
# get BG color used for
#
def getBGColor(self):
return self.BGcolor
#
# Draw a single pixel at location x, y with color
# Rather slow at 40µs/Pixel
#
def drawPixel_py(self, x, y, color):
self.setXY(x, y, x, y)
TFT_io.displaySCR_AS(color, 1) #
#
# clear screen, set it to BG color.
#
def clrSCR(self, color = None):
colorvect = self.BGcolorvect if color is None else bytearray(color)
self.clrXY()
TFT_io.fillSCR_AS(colorvect, (self.disp_x_size + 1) * (self.disp_y_size + 1))
self.setScrollArea(0, self.disp_y_size + 1, 0)
self.setScrollStart(0)
self.setTextPos(0,0)
#
# reset the address range to fullscreen
#
def clrXY(self):
if self.orientation == LANDSCAPE:
self.setXY(0, 0, self.disp_x_size, self.disp_y_size)
else:
self.setXY(0, 0, self.disp_y_size, self.disp_x_size)
#
# Draw a line from x1, y1 to x2, y2 with the color set by setColor()
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawLine(self, x1, y1, x2, y2, color = None):
if y1 == y2:
self.drawHLine(x1, y1, x2 - x1 + 1, color)
elif x1 == x2:
self.drawVLine(x1, y1, y2 - y1 + 1, color)
else:
colorvect = self.colorvect if color is None else bytearray(color)
dx, xstep = (x2 - x1, 1) if x2 > x1 else (x1 - x2, -1)
dy, ystep = (y2 - y1, 1) if y2 > y1 else (y1 - y2, -1)
col, row = x1, y1
if dx < dy:
t = - (dy >> 1)
while True:
self.drawPixel(col, row, colorvect)
if row == y2:
return
row += ystep
t += dx
if t >= 0:
col += xstep
t -= dy
else:
t = - (dx >> 1)
while True:
self.drawPixel(col, row, colorvect)
if col == x2:
return
col += xstep
t += dy
if t >= 0:
row += ystep
t -= dx
#
# Draw a horizontal line with 1 Pixel width, from x,y to x + l - 1, y
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawHLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
x -= l
self.setXY(x, y, x + l - 1, y) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw a vertical line with 1 Pixel width, from x,y to x, y + l - 1
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawVLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
y -= l
self.setXY(x, y, x, y + l - 1) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.drawHLine(x1, y1, x2 - x1 + 1, color)
self.drawHLine(x1, y2, x2 - x1 + 1, color)
self.drawVLine(x1, y1, y2 - y1 + 1, color)
self.drawVLine(x2, y1, y2 - y1 + 1, color)
#
# Fill rectangle
# Almost straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillRectangle(self, x1, y1, x2, y2, color=None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.setXY(x1, y1, x2, y2) # set display window
if color:
TFT_io.fillSCR_AS(bytearray(color), (x2 - x1 + 1) * (y2 - y1 + 1))
else:
TFT_io.fillSCR_AS(self.colorvect, (x2 - x1 + 1) * (y2 - y1 + 1))
#
# Draw smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if (x2-x1) > 4 and (y2-y1) > 4:
colorvect = self.colorvect if color is None else bytearray(color)
self.drawPixel(x1 + 2,y1 + 1, colorvect)
self.drawPixel(x1 + 1,y1 + 2, colorvect)
self.drawPixel(x2 - 2,y1 + 1, colorvect)
self.drawPixel(x2 - 1,y1 + 2, colorvect)
self.drawPixel(x1 + 2,y2 - 1, colorvect)
self.drawPixel(x1 + 1,y2 - 2, colorvect)
self.drawPixel(x2 - 2,y2 - 1, colorvect)
self.drawPixel(x2 - 1,y2 - 2, colorvect)
self.drawHLine(x1 + 3, y1, x2 - x1 - 5, colorvect)
self.drawHLine(x1 + 3, y2, x2 - x1 - 5, colorvect)
self.drawVLine(x1, y1 + 3, y2 - y1 - 5, colorvect)
self.drawVLine(x2, y1 + 3, y2 - y1 - 5, colorvect)
#
# Fill smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
t = x1; x1 = x2; x2 = t
if y1 > y2:
t = y1; y1 = y2; y2 = t
if (x2-x1) > 4 and (y2-y1) > 4:
for i in range(((y2 - y1) // 2) + 1):
if i == 0:
self.drawHLine(x1 + 3, y1 + i, x2 - x1 - 5, color)
self.drawHLine(x1 + 3, y2 - i, x2 - x1 - 5, color)
elif i == 1:
self.drawHLine(x1 + 2, y1 + i, x2 - x1 - 3, color)
self.drawHLine(x1 + 2, y2 - i, x2 - x1 - 3, color)
elif i == 2:
self.drawHLine(x1 + 1, y1 + i, x2 - x1 - 1, color)
self.drawHLine(x1 + 1, y2 - i, x2 - x1 - 1, color)
else:
self.drawHLine(x1, y1 + i, x2 - x1 + 1, color)
self.drawHLine(x1, y2 - i, x2 - x1 + 1, color)
#
# draw a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawCircle(self, x, y, radius, color = None):
colorvect = self.colorvect if color is None else bytearray(color)
f = 1 - radius
ddF_x = 1
ddF_y = -2 * radius
x1 = 0
y1 = radius
self.drawPixel(x, y + radius, colorvect)
self.drawPixel(x, y - radius, colorvect)
self.drawPixel(x + radius, y, colorvect)
self.drawPixel(x - radius, y, colorvect)
while x1 < y1:
if f >= 0:
y1 -= 1
ddF_y += 2
f += ddF_y
x1 += 1
ddF_x += 2
f += ddF_x
self.drawPixel(x + x1, y + y1, colorvect)
self.drawPixel(x - x1, y + y1, colorvect)
self.drawPixel(x + x1, y - y1, colorvect)
self.drawPixel(x - x1, y - y1, colorvect)
self.drawPixel(x + y1, y + x1, colorvect)
self.drawPixel(x - y1, y + x1, colorvect)
self.drawPixel(x + y1, y - x1, colorvect)
self.drawPixel(x - y1, y - x1, colorvect)
#
# fill a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
# Instead of calculating x = sqrt(r*r - y*y), it searches the x
# for r*r = x*x + x*x
#
def fillCircle(self, x, y, radius, color = None):
r_square = radius * radius * 4
for y1 in range (-(radius * 2), 1):
y_square = y1 * y1
for x1 in range (-(radius * 2), 1):
if x1*x1+y_square <= r_square:
x1i = x1 // 2
y1i = y1 // 2
self.drawHLine(x + x1i, y + y1i, 2 * (-x1i), color)
self.drawHLine(x + x1i, y - y1i, 2 * (-x1i), color)
break;
#
# Draw a bitmap at x,y with size sx, sy
# mode determines the type of expected data
# mode = 1: The data contains 1 bit per pixel, mapped to fg/bg color
# unless a colortable is provided
# mode = 2: The data contains 2 bit per pixel; a colortable with 4 entries must be provided
# mode = 4: The data contains 4 bit per pixel;
# a colortable with 16 entries must be provided
# mode = 8: The data contains 8 bit per pixel;
# a colortable with 256 entries must be provided
# mode = 16: The data must contain 2 packed bytes/pixel red/green/blue in 565 format
# mode = 24: The data must contain 3 bytes/pixel red/green/blue
#
def drawBitmap(self, x, y, sx, sy, data, mode = 24, colortable = None):
self.setXY(x, y, x + sx - 1, y + sy - 1)
if mode == 24:
TFT_io.displaySCR_AS(data, sx * sy)
elif mode == 16:
TFT_io.displaySCR565_AS(data, sx * sy)
elif mode == 1:
if colortable is None:
colortable = self.BMPcolortable # create colortable
TFT_io.displaySCR_bmp(data, sx*sy, 1, colortable)
elif mode == 2:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 2, colortable)
elif mode == 4:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 4, colortable)
elif mode == 8:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 8, colortable)
#
# set scroll area to the region between the first and last line
#
def setScrollArea(self, tfa, vsa, bfa):
TFT_io.tft_cmd_data_AS(0x33, bytearray( #set scrolling range
[(tfa >> 8) & 0xff, tfa & 0xff,
(vsa >> 8) & 0xff, vsa & 0xff,
(bfa >> 8) & 0xff, bfa & 0xff]), 6)
self.scroll_tfa = tfa
self.scroll_vsa = vsa
self.scroll_bfa = bfa
self.setScrollStart(self.scroll_tfa)
x, y = self.getTextPos()
self.setTextPos(x, y) # realign pointers
#
# get scroll area of the region between the first and last line
#
def getScrollArea(self):
return self.scroll_tfa, self.scroll_vsa, self.scroll_bfa
#
# set the line which is displayed first
#
def setScrollStart(self, lline):
self.scroll_start = lline # store the logical first line
TFT_io.tft_cmd_data_AS(0x37, bytearray([(lline >> 8) & 0xff, lline & 0xff]), 2)
#
# get the line which is displayed first
#
def getScrollStart(self):
return self.scroll_start # get the logical first line
#
# Scroll vsa up/down by a number of pixels
#
def scroll(self, pixels):
line = ((self.scroll_start - self.scroll_tfa + pixels) % self.scroll_vsa
+ self.scroll_tfa)
self.setScrollStart(line) # set the new line
#
# Set text position
#
def setTextPos(self, x, y, clip = False, scroll = True):
self.text_width, self.text_height = self.getScreensize() ## height possibly wrong
self.text_x = x
if self.scroll_tfa <= y < (self.scroll_tfa + self.scroll_vsa): # in scroll area ? check later for < or <=
# correct position relative to scroll start
self.text_y = (y + self.scroll_start - self.scroll_tfa)
if self.text_y >= (self.scroll_tfa + self.scroll_vsa):
self.text_y -= self.scroll_vsa
else: # absolute
self.text_y = y
self.text_yabs = y
# Hint: self.text_yabs = self.text_y - self.scroll_start) % self.scroll_vsa + self.scroll_tfa)
if clip and (self.text_x + clip) < self.text_width:
self.text_width = self.text_x + clip
self.text_scroll = scroll
#
# Get text position
#
def getTextPos(self, abs = True):
if abs:
return (self.text_x, self.text_yabs)
else:
return (self.text_x, self.text_y)
#
# Set Text Style
#
def setTextStyle(self, fgcolor=None, bgcolor=None, transparency=None, font=None, gap=None):
if font is not None:
self.text_font = font
self.text_rows = font.height()
self.text_cols = font.max_width()
if transparency is not None:
self.transparency = transparency
if gap is not None:
self.text_gap = gap
if bgcolor is not None:
self.text_bgcolor = bgcolor
if fgcolor is not None:
self.text_fgcolor = fgcolor
self.text_color = (bytearray(self.text_bgcolor)
+ bytearray(self.text_fgcolor)
+ bytearray([self.transparency]))
#
# Get Text Style: return (color, bgcolor, font, transpareny, gap)
#
def getTextStyle(self):
return (self.text_color[3:6], self.text_color[0:3],
self.transparency, self.text_font, self.text_gap)
#
# Check, if a new line is to be opened
# if yes, advance, including scrolling, and clear line, if flags is set
# Obsolete?
#
def printNewline(self, clear = False):
if (self.text_yabs + self.text_rows) >= (self.scroll_tfa + self.scroll_vsa): # does the line fit?
self.scroll(self.text_rows) # no. scroll
else: # Yes, just advance pointers
self.text_yabs += self.text_rows
self.setTextPos(self.text_x, self.text_yabs)
if clear:
self.printClrLine(2) # clear actual line
#
# Carriage Return
#
def printCR(self): # clear to end of line
self.text_x = 0
#
# clear line modes
#
def printClrLine(self, mode = 0): # clear to end of line/bol/line
if mode == 0:
self.setXY(self.text_x, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_width - self.text_x + 1) * self.text_rows)
elif mode == 1 and self.text_x > 0:
self.setXY(0, self.text_y,
self.text_x - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_x - 1) * self.text_rows)
elif mode == 2:
self.setXY(0, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.text_rows)
#
# clear sreen modes
#
def printClrSCR(self): # clear Area set by setScrollArea
self.setXY(0, self.scroll_tfa,
self.text_width - 1, self.scroll_tfa + self.scroll_vsa) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.scroll_vsa)
self.setScrollStart(self.scroll_tfa)
self.setTextPos(0, self.scroll_tfa)
#
# Print string s, returning the length of the printed string in pixels
#
def printString(self, s, bg_buf=None):
len = 0
for c in s:
cols = self.printChar(c, bg_buf)
if cols == 0: # could not print (any more)
break
len += cols
return len
#
# Print string c using the given char bitmap at location x, y, returning the width of the printed char in pixels
#
def printChar(self, c, bg_buf=None):
# get the charactes pixel bitmap and dimensions
if self.text_font:
fmv, rows, cols = self.text_font.get_ch(c)
else:
raise AttributeError('No font selected')
cbytes, cbits = divmod(cols, 8) # Not in packed format
dcols = (cbytes + 1) * 8 if cbits else cbytes * 8 # cols for display
pix_count = dcols * rows # number of bits in the char
# test char fit
if self.text_x + cols > self.text_width: # does the char fit on the screen?
if self.text_scroll:
self.printCR() # No, then CR
self.printNewline(True) # NL: advance to the next line
else:
return 0
# Retrieve Background data if transparency is required
if self.transparency: # in case of transpareny, the frame buffer content is needed
if bg_buf is None: # buffer allocation needed?
if len(self.bg_buf) < pix_count * 3:
del(self.bg_buf)
gc.collect()
self.bg_buf = bytearray(pix_count * 3) # Make it bigger
bg_buf = self.bg_buf
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.tft_read_cmd_data_AS(0x2e, bg_buf, pix_count * 3) # read background data
else:
bg_buf = 0 # dummy assignment, since None is not accepted
# Set XY range & print char
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.displaySCR_charbitmap(addressof(fmv), pix_count, self.text_color, bg_buf) # display char!
#advance pointer
self.text_x += (cols + self.text_gap)
return cols + self.text_gap
|
peterhinch/micropython-tft-gui
|
tft/driver/tft.py
|
Python
|
mit
| 32,591
|
[
"CRYSTAL"
] |
083ddf46420f374671208a27021762413c03daf956275e6ab4190eb2e2923be4
|
# test_radl - Test for module ``radl``.
# Copyright (C) 2014 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest
import logging
from mock import patch, MagicMock, mock_open
from collections import namedtuple
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
sys.path.append("..")
sys.path.append(".")
from IM2.radl.radl import RADL, system, network
from IM2.radl.radl_parse import parse_radl
from ec3 import ClusterStore, CLI, CmdLaunch, CmdList, CmdTemplates, CmdDestroy, CmdReconfigure, CmdClone, CmdStop, CmdRestart, CmdSsh, CmdUpdate
cluster_data = """system front (
state = 'configured' and
__im_server = 'http://server.com:8800' and
__infrastructure_id = 'infid' and
__vm_id = '0' and
auth = '[{"type": "InfrastructureManager", "username": "user", "password": "pass"}]'
)"""
if sys.version_info > (3, 0):
open_name = 'builtins.open'
else:
open_name = '__builtin__.open'
class TestEC3(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def gen_radl(self):
radl = RADL()
n = network("public")
n.setValue("outbound", "yes")
s = system("front")
s.setValue("ec3aas.username", "user")
s.setValue("state", "configured")
s.setValue("nodes", "1")
s.setValue("net_interface.0.connection", n)
s.setValue("net_interface.0.ip", "8.8.8.8")
s.setValue("disk.0.os.credentials.password", "pass")
s.setValue("disk.0.os.credentials.username", "user")
s.setValue("provider.type", "OpenStack")
radl.add(s)
return radl, s
def get_response(self, method, url, verify, headers, data=None):
resp = MagicMock()
resp.status_code = 400
parts = urlparse(url)
url = parts[2]
params = parts[4]
if method == "GET":
if url == "/infrastructures/infid" or url == "/infrastructures/newinfid":
resp.status_code = 200
resp.json.return_value = {"uri-list": [{ "uri": "http://server.com/infid/vms/0"},
{ "uri": "http://server.com/infid/vms/1"}]}
elif url == "/infrastructures/infid/state":
resp.status_code = 200
resp.json.return_value = {"state": {"state": "configured",
"vm_states": {"0": "configured",
"1": "configured"}}}
elif url == "/infrastructures/infid/vms/0":
resp.status_code = 200
resp.text = "network public (outbound='yes')\n"
resp.text += "system front (net_interface.0.connection = 'public' and net_interface.0.ip = '8.8.8.8')"
elif url == "/infrastructures/infid/data":
resp.status_code = 200
resp.json.return_value = {"data": "data"}
elif url == "/infrastructures/infid/contmsg":
resp.status_code = 200
resp.text = "contmsg"
elif url == "/infrastructures/infid/radl":
resp.status_code = 200
resp.text = "network public (outbound='yes')\n"
resp.text += "system front (net_interface.0.connection = 'public' and net_interface.0.ip = '8.8.8.8')"
elif method == "POST":
if url == "/infrastructures":
resp.status_code = 200
resp.text = 'http://server.com/infid'
elif url == "/infrastructures/infid":
resp.status_code = 200
resp.text = ''
elif method == "PUT":
if url == "/infrastructures":
resp.status_code = 200
resp.text = 'http://server.com/newinfid'
elif url == "/infrastructures/infid/reconfigure":
resp.status_code = 200
resp.text = ''
elif url == "/infrastructures/newinfid/stop":
resp.status_code = 200
resp.text = ''
elif url == "/infrastructures/infid/start":
resp.status_code = 200
resp.text = ''
elif method == "DELETE":
if url == "/infrastructures/infid":
resp.status_code = 200
return resp
@patch('ec3.ClusterStore')
def test_list(self, cluster_store):
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
Options = namedtuple('Options', ['json', 'refresh', 'username'])
options = Options(json=False, refresh=False, username=['user'])
old_stdout = sys.stdout
sys.stdout = StringIO()
CmdList.run(options)
res = sys.stdout.getvalue()
sys.stdout = old_stdout
self.assertEquals(res, " name state IP nodes provider \n---------------------------------------------\n name configured 8.8.8.8 1 OpenStack \n")
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
@patch('requests.request')
def test_launch(self, requests, display, cluster_store):
Options = namedtuple('Options', ['quiet'])
cli_options = Options(quiet=False)
CLI.logger = logging.getLogger('ec3')
CLI.options = cli_options
cluster_store.list.return_value = ["name"]
Options = namedtuple('Options', ['not_store', 'clustername', 'auth_file', 'restapi', 'dry_run', 'templates',
'add', 'golden_image', 'print_radl', 'json', 'yes', 'destroy'])
auth_file = [MagicMock()]
auth_file[0].readlines.return_value = ["type = InfrastructureManager; username = user; password = pass"]
options = Options(not_store=False, clustername="name", auth_file=auth_file, restapi=['http://server.com:8800'],
dry_run=True, templates=['ubuntu-ec2','kubernetes'], add=False, golden_image=False,
print_radl=True, json=False, yes=True, destroy=False)
with self.assertRaises(SystemExit) as ex1:
CmdLaunch.run(options)
self.assertEquals("1" ,str(ex1.exception))
cluster_store.list.return_value = []
with self.assertRaises(SystemExit) as ex2:
CmdLaunch.run(options)
self.assertEquals("0" ,str(ex2.exception))
radl = """system front (
net_interface.1.dns_name = 'kubeserverpublic' and
disk.0.os.credentials.username = 'ubuntu' and
disk.0.applications contains (
name = 'ansible.modules.grycap.kubernetes'
) and
disk.0.applications contains (
name = 'ansible.modules.grycap.clues'
) and
disk.0.applications contains (
name = 'ansible.modules.grycap.im'
) and
cpu.count >= 2 and
net_interface.1.connection = 'public' and
queue_system = 'kubernetes' and
net_interface.0.dns_name = 'kubeserver' and
instance_type = 't1.micro' and
ec3_templates = 'im,clues2,kubernetes' and
disk.0.image.url = 'aws://us-east-1/ami-30519058' and
auth = 'username = user ; password = pass ; type = InfrastructureManager
' and
net_interface.0.connection = 'private' and
memory.size >= 2048m and
disk.0.os.name = 'linux' and
ec3_templates_cmd = 'ubuntu-ec2 kubernetes'
)
system wn (
disk.0.image.url = 'aws://us-east-1/ami-30519058' and
instance_type = 't1.micro' and
ec3_max_instances = 10 and
memory.size >= 2048m and
net_interface.0.connection = 'private' and
disk.0.os.name = 'linux' and
disk.0.os.credentials.username = 'ubuntu'
)
network public (
outbound = 'yes' and
outports = '8899/tcp,6443/tcp,80/tcp,8800/tcp'
)
network private (
)
configure front (
@begin
- tasks:
- iptables:
action: insert
chain: INPUT
destination_port: '{{item|dirname}}'
jump: ACCEPT
protocol: '{{item|basename}}'
when: ansible_os_family == "RedHat"
with_items: '{{OUTPORTS.split('','')}}'
- firewalld:
immediate: true
permanent: true
port: '{{item}}'
state: enabled
ignore_errors: true
when: ansible_os_family == "RedHat"
with_items: '{{OUTPORTS.split('','')}}'
vars:
OUTPORTS: 8899/tcp,6443/tcp,80/tcp,8800/tcp
- roles:
- kube_api_server: '{{ IM_NODE_PRIVATE_IP }}'
kube_apiserver_options:
- option: --insecure-port
value: '8080'
kube_apply_repos: []
kube_server: kubeserver
role: grycap.kubernetes
- roles:
- role: grycap.im
- roles:
- auth: '{{AUTH}}'
clues_queue_system: '{{QUEUE_SYSTEM}}'
max_number_of_nodes: '{{ NNODES }}'
role: grycap.clues
vnode_prefix: wn
vars:
AUTH: 'username = user ; password = pass ; type = InfrastructureManager
'
NNODES: '{{ SYSTEMS | selectattr("ec3_max_instances_max", "defined") | sum(attribute="ec3_max_instances_max")
}}'
QUEUE_SYSTEM: kubernetes
SYSTEMS:
- auth: 'username = user ; password = pass ; type = InfrastructureManager
'
class: system
cpu.count_max: inf
cpu.count_min: 2
disk.0.applications:
- name: ansible.modules.grycap.kubernetes
- name: ansible.modules.grycap.clues
- name: ansible.modules.grycap.im
disk.0.image.url: aws://us-east-1/ami-30519058
disk.0.os.credentials.username: ubuntu
disk.0.os.name: linux
ec3_templates:
- im
- clues2
- kubernetes
ec3_templates_cmd: ubuntu-ec2 kubernetes
id: front
instance_type: t1.micro
memory.size_max: inf
memory.size_min: 2147483648
net_interface.0.connection:
class: network
id: private
reference: true
net_interface.0.dns_name: kubeserver
net_interface.1.connection:
class: network
id: public
reference: true
net_interface.1.dns_name: kubeserverpublic
queue_system: kubernetes
- class: network
id: public
outbound: 'yes'
outports:
- 8899/tcp
- 6443/tcp
- 80/tcp
- 8800/tcp
- class: network
id: private
- class: system
disk.0.image.url: aws://us-east-1/ami-30519058
disk.0.os.credentials.username: ubuntu
disk.0.os.name: linux
ec3_max_instances_max: 10
ec3_max_instances_min: 10
id: wn
instance_type: t1.micro
memory.size_max: inf
memory.size_min: 2147483648
net_interface.0.connection:
class: network
id: private
reference: true
@end
)
configure wn (
@begin
- tasks:
- iptables:
action: insert
chain: INPUT
destination_port: '{{item|dirname}}'
jump: ACCEPT
protocol: '{{item|basename}}'
when: ansible_os_family == "RedHat"
with_items: '{{OUTPORTS.split('','')}}'
- firewalld:
immediate: true
permanent: true
port: '{{item}}'
state: enabled
ignore_errors: true
when: ansible_os_family == "RedHat"
with_items: '{{OUTPORTS.split('','')}}'
vars:
OUTPORTS: 8899/tcp,6443/tcp,80/tcp,8800/tcp
- roles:
- kube_server: kubeserver
kube_type_of_node: wn
role: grycap.kubernetes
@end
)
deploy front 1
"""
if sys.version_info < (3, 0):
self.assertEquals(display.call_args_list[1][0][0], radl)
requests.side_effect = self.get_response
options = Options(not_store=False, clustername="name", auth_file=auth_file, restapi=['http://server.com:8800'],
dry_run=False, templates=['ubuntu-ec2','kubernetes'], add=False, golden_image=False,
print_radl=False, json=False, yes=True, destroy=False)
with self.assertRaises(SystemExit) as ex2:
CmdLaunch.run(options)
self.assertEquals("0" ,str(ex2.exception))
self.assertEquals(display.call_args_list[4][0][0], "Infrastructure successfully created with ID: infid")
self.assertEquals(display.call_args_list[5][0][0], "Front-end configured with IP 8.8.8.8")
self.assertEquals(display.call_args_list[6][0][0], "Transferring infrastructure")
self.assertEquals(display.call_args_list[7][0][0], "Front-end ready!")
def test_templates(self):
Options = namedtuple('Options', ['search', 'name', 'json', 'full'])
options = Options(search=[None], name=[None], json=False, full=False)
old_stdout = sys.stdout
sys.stdout = StringIO()
CmdTemplates.run(options)
res = sys.stdout.getvalue()
sys.stdout = old_stdout
self.assertIn(" name kind summary \n", res)
self.assertIn("----------------------------------------------------------------------------------------------------------------------\n", res)
self.assertIn(" galaxy component Galaxy is an open, web-based platform for data intensive biomedical research. \n", res)
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_destroy(self, display, cluster_store, requests):
cluster_store.list.return_value = []
Options = namedtuple('Options', ['restapi', 'json', 'clustername', 'force', 'yes', 'auth_file'])
options = Options(restapi=['http://server.com:8800'], json=False, clustername='name', force=True, yes=True,
auth_file=[])
with self.assertRaises(SystemExit) as ex:
CmdDestroy.run(options)
self.assertEquals("1" ,str(ex.exception))
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdDestroy.run(options)
self.assertEquals("0" ,str(ex.exception))
@patch('requests.request')
@patch('os.listdir')
@patch('os.makedirs')
@patch(open_name, new_callable=mock_open, read_data=cluster_data)
def test_cluster_store(self, mo, makedirs, listdirs, requests):
listdirs.return_value = ["cluster1"]
res = ClusterStore.list()
self.assertEqual(["cluster1"], res)
requests.side_effect = self.get_response
res = ClusterStore.load("cluster1", True)
s = res.get(system("front"))
self.assertEqual(s.getValue("__infrastructure_id"), "infid")
self.assertIn(".ec3/clusters/cluster1", mo.call_args_list[-1][0][0])
if sys.version_info < (3, 0):
expected_res = """network public (\n outbound = \'yes\'\n)\n\nsystem front (\n net_interface.0.ip = \'8.8.8.8\' and\n __infrastructure_id = \'infid\' and\n auth = \'[{"type": "InfrastructureManager", "username": "user", "password": "pass"}]\' and\n __im_server = \'http://server.com:8800\' and\n net_interface.0.connection = \'public\' and\n nodes = 1 and\n contextualization_output = \'contmsg\'\n)"""
self.assertEqual(mo.mock_calls[-2][1][0], expected_res)
def test_cli(self):
testargs = ["ec3", "list"]
with patch.object(sys, 'argv', testargs):
old_stdout = sys.stdout
sys.stdout = StringIO()
res = CLI.run([CmdList])
res = sys.stdout.getvalue()
sys.stdout = old_stdout
self.assertIn(" name ", res)
self.assertIn(" state ", res)
self.assertIn(" IP ", res)
self.assertIn(" nodes ", res)
self.assertIn(" provider \n", res)
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_reconf(self, display, cluster_store, requests):
Options = namedtuple('Options', ['restapi', 'json', 'clustername', 'reload', 'yes',
'auth_file', 'add', 'new_template', 'force'])
options = Options(restapi=['http://server.com:8800'], json=False, clustername='name', reload=False, yes=True,
auth_file=[], add=[], new_template=None, force=False)
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdReconfigure.run(options)
self.assertEquals("0" ,str(ex.exception))
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_clone(self, display, cluster_store, requests):
auth_file = [MagicMock()]
auth_file[0].readlines.return_value = ["type = InfrastructureManager; username = user; password = pass"]
Options = namedtuple('Options', ['restapi', 'json', 'clustername', 'destination', 'auth_file', 'eliminate'])
options = Options(restapi=['http://server.com:8800'], json=False, clustername='name',
destination=["http://server2.com:8800"], auth_file=auth_file, eliminate=True)
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdClone.run(options)
self.assertEquals("0" ,str(ex.exception))
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_stop(self, display, cluster_store, requests):
auth_file = [MagicMock()]
auth_file[0].readlines.return_value = ["type = InfrastructureManager; username = user; password = pass"]
Options = namedtuple('Options', ['restapi', 'json', 'clustername', 'auth_file', 'yes'])
options = Options(restapi=['http://server.com:8800'], json=False, clustername='name', auth_file=auth_file, yes=True)
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdStop.run(options)
self.assertEquals("0" ,str(ex.exception))
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_restart(self, display, cluster_store, requests):
auth_file = [MagicMock()]
auth_file[0].readlines.return_value = ["type = InfrastructureManager; username = user; password = pass"]
Options = namedtuple('Options', ['restapi', 'json', 'clustername', 'auth_file', 'yes'])
options = Options(restapi=['http://server.com:8800'], json=False, clustername='name', auth_file=auth_file, yes=True)
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdRestart.run(options)
self.assertEquals("0" ,str(ex.exception))
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_ssh(self, display, cluster_store):
Options = namedtuple('Options', ['json', 'clustername', 'show_only', 'sshcommand'])
options = Options(json=False, clustername='name', show_only=True, sshcommand=['ls','-l','/tmp'])
cluster_store.list.return_value = ["name"]
radl, s = self.gen_radl()
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
with self.assertRaises(SystemExit) as ex:
CmdSsh.run(options)
self.assertEquals("0" ,str(ex.exception))
self.assertEquals(display.call_args_list[0][0][0], "sshpass -ppass ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no user@8.8.8.8 -p 22 ls -l /tmp")
s.setValue("disk.0.os.credentials.private_key", "priv_key")
with self.assertRaises(SystemExit) as ex:
CmdSsh.run(options)
self.assertEquals("0" ,str(ex.exception))
self.assertIn("ssh -i /tmp/tmp", display.call_args_list[1][0][0])
self.assertIn(" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no user@8.8.8.8 -p 22", display.call_args_list[1][0][0])
if sys.version_info > (3, 0):
priv_key_file = display.call_args_list[1][0][0][7:23]
else:
priv_key_file = display.call_args_list[1][0][0][7:21]
with open(priv_key_file, "r") as f:
self.assertEquals(f.read(), "priv_key")
@patch('requests.request')
@patch('ec3.ClusterStore')
@patch('ec3.CLI.display')
def test_update(self, display, cluster_store, requests):
Options = namedtuple('Options', ['restapi', 'clustername', 'auth_file', 'add'])
options = Options(restapi=['http://server.com:8800'], clustername='name',
auth_file=[], add=["system wn ( cpu.count = 4 )"])
cluster_store.list.return_value = ["name"]
radl, _ = self.gen_radl()
radl.get(system("front")).setValue("ec3_templates_cmd", "ubuntu-ec2 kubernetes")
cluster_store.load.return_value = radl
auth = [{"type": "InfrastructureManager", "username": "user", "password": "pass"}]
cluster_store.get_im_server_infrId_and_vmId_and_auth.return_value = "http://server.com", "infid", "0", auth
requests.side_effect = self.get_response
with self.assertRaises(SystemExit) as ex:
CmdUpdate.run(options)
self.assertEquals("0" ,str(ex.exception))
self.assertEquals(requests.call_args_list[0][0][0], "POST")
self.assertEquals(requests.call_args_list[0][0][1], "http://server.com/infrastructures/infid")
radlo = parse_radl(requests.call_args_list[0][1]['data'])
self.assertEquals(radlo.get(system("wn")).getValue("cpu.count"), 4)
if __name__ == "__main__":
unittest.main()
|
grycap/ec3
|
test/test_ec3.py
|
Python
|
apache-2.0
| 23,952
|
[
"Galaxy"
] |
5d1599a894c00e9a7bb2d789995c2aa436cdc68f962e385e9218197103b23948
|
from collections import Sequence
from distutils.version import LooseVersion
import logging
import warnings
import sys
import os
from os import path as op
import inspect
from functools import wraps
import mayavi
from mayavi import mlab
from mayavi.filters.api import Threshold
import numpy as np
import nibabel as nib
from scipy import sparse
from scipy.spatial.distance import cdist
import matplotlib as mpl
from matplotlib import cm as mpl_cm
from . import cm as surfer_cm
logger = logging.getLogger('surfer')
# Py3k compat
if sys.version[0] == '2':
string_types = basestring # noqa, analysis:ignore
else:
string_types = str
if LooseVersion(mayavi.__version__) == LooseVersion('4.5.0'):
# Monkey-patch Mayavi 4.5:
# In Mayavi 4.5, filters seem to be missing a .point_data attribute that
# Threshold accesses on initialization.
_orig_meth = Threshold._get_data_range
def _patch_func():
return []
def _patch_meth(self):
return []
class _MayaviThresholdPatch(object):
def __enter__(self):
Threshold._get_data_range = _patch_meth
def __exit__(self, exc_type, exc_val, exc_tb):
Threshold._get_data_range = _orig_meth
_mayavi_threshold_patch = _MayaviThresholdPatch()
def threshold_filter(*args, **kwargs):
with _mayavi_threshold_patch:
thresh = mlab.pipeline.threshold(*args, **kwargs)
thresh._get_data_range = _patch_func
return thresh
else:
threshold_filter = mlab.pipeline.threshold
class Surface(object):
"""Container for surface object
Attributes
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment variable.
offset : float | None
If float, align inside edge of each hemisphere to center + offset.
If None, do not change coordinates (default).
units : str
Can be 'm' or 'mm' (default).
"""
def __init__(self, subject_id, hemi, surf, subjects_dir=None,
offset=None, units='mm'):
"""Surface
Parameters
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
offset : float | None
If 0.0, the surface will be offset such that the medial
wall is aligned with the origin. If None, no offset will
be applied. If != 0.0, an additional offset will be used.
"""
if hemi not in ['lh', 'rh']:
raise ValueError('hemi must be "lh" or "rh')
self.subject_id = subject_id
self.hemi = hemi
self.surf = surf
self.offset = offset
self.coords = None
self.faces = None
self.nn = None
self.units = _check_units(units)
subjects_dir = _get_subjects_dir(subjects_dir)
self.data_path = op.join(subjects_dir, subject_id)
def load_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
coords, faces = nib.freesurfer.read_geometry(surf_path)
if self.units == 'm':
coords /= 1000.
if self.offset is not None:
if self.hemi == 'lh':
coords[:, 0] -= (np.max(coords[:, 0]) + self.offset)
else:
coords[:, 0] -= (np.min(coords[:, 0]) + self.offset)
nn = _compute_normals(coords, faces)
if self.coords is None:
self.coords = coords
self.faces = faces
self.nn = nn
else:
self.coords[:] = coords
self.faces[:] = faces
self.nn[:] = nn
@property
def x(self):
return self.coords[:, 0]
@property
def y(self):
return self.coords[:, 1]
@property
def z(self):
return self.coords[:, 2]
def load_curvature(self):
"""Load in curvature values from the ?h.curv file."""
curv_path = op.join(self.data_path, "surf", "%s.curv" % self.hemi)
self.curv = nib.freesurfer.read_morph_data(curv_path)
self.bin_curv = np.array(self.curv > 0, np.int)
def load_label(self, name):
"""Load in a Freesurfer .label file.
Label files are just text files indicating the vertices included
in the label. Each Surface instance has a dictionary of labels, keyed
by the name (which is taken from the file name if not given as an
argument.
"""
label = nib.freesurfer.read_label(op.join(self.data_path, 'label',
'%s.%s.label' % (self.hemi, name)))
label_array = np.zeros(len(self.x), np.int)
label_array[label] = 1
try:
self.labels[name] = label_array
except AttributeError:
self.labels = {name: label_array}
def apply_xfm(self, mtx):
"""Apply an affine transformation matrix to the x,y,z vectors."""
self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))],
mtx.T)[:, :3]
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
size[zidx] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
###############################################################################
# LOGGING (courtesy of mne-python)
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOG_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = "INFO"
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
def verbose(function):
"""Decorator to allow functions to override default log level
Do not call this function directly to set the global verbosity level,
instead use set_log_level().
Parameters (to decorated function)
----------------------------------
verbose : bool, str, int, or None
The level of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
None defaults to using the current log level [e.g., set using
mne.set_log_level()].
"""
arg_names = _get_args(function)
# this wrap allows decorated functions to be pickled (e.g., for parallel)
@wraps(function)
def dec(*args, **kwargs):
# Check if the first arg is "self", if it has verbose, make it default
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
verbose_level = kwargs.get('verbose', default_level)
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except Exception:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
return function(*args, **kwargs)
# set __wrapped__ attribute so ?? in IPython gets the right source
dec.__wrapped__ = function
return dec
###############################################################################
# USEFUL FUNCTIONS
def _check_units(units):
if units not in ('m', 'mm'):
raise ValueError('Units must be "m" or "mm", got %r' % (units,))
return units
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords, units='mm'):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
units : str
Can be 'm' or 'mm' (default).
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space.
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
units = _check_units(units)
if units == 'm':
xfm[:3, 3] /= 1000.
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256, center=None):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
if value is "auto", a default sequential or divergent colormap is
returned
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
center : double, optional
indicates whether desired colormap should be for divergent values,
currently only used to select default colormap for cmap='auto'
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# choose default colormaps (REMEMBER to change doc, e.g., in
# Brain.add_data, when changing these defaults)
if isinstance(cmap, string_types) and cmap == "auto":
if center is None:
cmap = "rocket"
else:
cmap = "icefire"
surfer_cmaps = ["rocket", "mako", "icefire", "vlag"]
surfer_cmaps += [name + "_r" for name in surfer_cmaps]
if not isinstance(cmap, string_types) and isinstance(cmap, Sequence):
colors = list(map(mpl.colors.colorConverter.to_rgba, cmap))
cmap = mpl.colors.ListedColormap(colors)
elif cmap in surfer_cmaps:
cmap = getattr(surfer_cm, cmap)
else:
try:
# Try to get a named matplotlib colormap
# This will also pass Colormap object back out
cmap = mpl_cm.get_cmap(cmap)
except (TypeError, ValueError):
# If we get here, it's a bad input
# but don't raise the matplotlib error as it is less accurate
raise ValueError("Input %r was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
@verbose
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
if smoothing_steps == 'nearest':
mat = _nearest(vertices, adj_mat)
else:
mat = _smooth(vertices, adj_mat, smoothing_steps)
return mat
def _nearest(vertices, adj_mat):
import scipy
from scipy.sparse.csgraph import dijkstra
if LooseVersion(scipy.__version__) < LooseVersion('1.3'):
raise RuntimeError('smoothing_steps="nearest" requires SciPy >= 1.3')
# Vertices can be out of order, so sort them to start ...
order = np.argsort(vertices)
vertices = vertices[order]
_, _, sources = dijkstra(adj_mat, False, indices=vertices, min_only=True,
return_predecessors=True)
col = np.searchsorted(vertices, sources)
# ... then get things back to the correct configuration.
col = order[col]
row = np.arange(len(col))
data = np.ones(len(col))
mat = sparse.coo_matrix((data, (row, col)))
assert mat.shape == (adj_mat.shape[0], len(vertices)), mat.shape
return mat
def _smooth(vertices, adj_mat, smoothing_steps):
from scipy import sparse
logger.debug("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.debug("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
@verbose
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, units='mm',
verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
units : str
Can be 'm' or 'mm' (default).
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface, units=units)
geo.load_geometry()
coords = geo.coords
# work in mm from here on
if geo.units == 'm':
coords = coords * 1000
if coord_as_vert:
coord = coords[coord]
n_vertices = len(coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.debug("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None, raise_error=True, return_why=False):
"""Determine whether the user has a usable fsaverage"""
subjects_dir = _get_subjects_dir(subjects_dir, raise_error=raise_error)
out = ''
if not op.isdir(subjects_dir):
out = 'SUBJECTS_DIR not found: %s' % (subjects_dir,)
else:
fs_dir = op.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
surf_dir = op.join(fs_dir, 'surf')
if not op.isdir(fs_dir):
out = 'fsaverage not found in SUBJECTS_DIR: %s' % (fs_dir,)
elif not op.isdir(surf_dir):
out = 'fsaverage has no "surf" directory: %s' % (surf_dir,)
out = (out == '', out) if return_why else (out == '')
return out
def requires_fsaverage():
import pytest
has, why = has_fsaverage(raise_error=False, return_why=True)
return pytest.mark.skipif(
not has, reason='Requires fsaverage subject data (%s)' % why)
def requires_imageio():
import pytest
try:
from imageio.plugins.ffmpeg import get_exe # noqa, analysis:ignore
except ImportError:
has = False
else:
has = True
return pytest.mark.skipif(not has, reason="Requires imageio with ffmpeg")
def requires_fs():
import pytest
has = ('FREESURFER_HOME' in os.environ)
return pytest.mark.skipif(
not has, reason='Requires FreeSurfer command line tools')
def _get_extra():
# Get extra label for newer freesurfer
subj_dir = _get_subjects_dir()
fname = op.join(subj_dir, 'fsaverage', 'label', 'lh.BA1.label')
return '_exvivo' if not op.isfile(fname) else '', subj_dir
|
nipy/PySurfer
|
surfer/utils.py
|
Python
|
bsd-3-clause
| 26,535
|
[
"Mayavi"
] |
f1ca8eee25ff7f6ad7acc320dbd96be0fe6a3ce96010b00bd5a6ccb71e75b396
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2011 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
import os
import rules
import models
import widgets
import dialogs
import autoupdate
import sinks
import dal
import dal.query
import mimetypes
from PySide import QtGui, QtCore
#from models.chmodel import models.ATTRIBS
from l5rcmcore import *
def new_small_le(parent = None, ro = True):
le = QtGui.QLineEdit(parent)
le.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum )
le.setMaximumSize( QtCore.QSize(32, 24) )
le.setReadOnly(ro)
return le
def new_horiz_line(parent = None):
line = QtGui.QFrame(parent)
line.setObjectName("hline")
line.setGeometry(QtCore.QRect(3, 3, 3, 3))
line.setFrameShape(QtGui.QFrame.Shape.HLine)
line.setFrameShadow(QtGui.QFrame.Sunken)
line.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
return line
def new_vert_line(parent = None):
line = QtGui.QFrame(parent)
line.setObjectName("vline")
line.setGeometry(QtCore.QRect(320, 150, 118, 3))
line.setFrameShape(QtGui.QFrame.Shape.VLine)
line.setFrameShadow(QtGui.QFrame.Sunken)
return line
def new_item_groupbox(name, widget):
grp = QtGui.QGroupBox(name, widget.parent())
vbox = QtGui.QVBoxLayout(grp)
vbox.addWidget(widget)
return grp
def new_small_plus_bt(parent = None):
bt = QtGui.QToolButton(parent)
bt.setAutoRaise(True)
#bt.setText('+')
bt.setIcon( QtGui.QIcon.fromTheme('gtk-add', QtGui.QIcon( get_icon_path('add', (16,16)))) )
bt.setIconSize( QtCore.QSize(16,16) )
bt.setMaximumSize(24,24)
bt.setMinimumSize(16,16)
bt.setToolButtonStyle(QtCore.Qt.ToolButtonFollowStyle)
return bt
def pause_signals(wdgs):
for w in wdgs: w.blockSignals(True)
def resume_signals(wdgs):
for w in wdgs: w.blockSignals(False)
class ZoomableView(QtGui.QGraphicsView):
'''A QGraphicsView that zoom on CTRL+MouseWheel'''
def __init__(self, parent = None):
super(ZoomableView, self).__init__(parent)
self.wp = None
def wheelEvent(self, ev):
if ( ev.modifiers() & QtCore.Qt.ControlModifier ):
factor = pow(1.16, ev.delta() / 240.0)
self.scale(factor, factor)
else:
super(ZoomableView, self).wheelEvent(ev)
def keyPressEvent(self, ev):
super(ZoomableView, self).keyPressEvent(ev)
if ( ev.modifiers() & QtCore.Qt.ControlModifier ):
if ( ev.key() == QtCore.Qt.Key_0 ):
self.resetTransform()
elif ( ev.key() == QtCore.Qt.Key_Minus ):
self.scale(0.80, 0.80)
elif ( ev.key() == QtCore.Qt.Key_Plus ):
self.scale(1.20, 1.20)
def set_wallpaper(self, image):
self.wp = image
self.viewport().update()
def drawBackground(self, painter, rect):
super(ZoomableView, self).drawBackground(painter, rect)
def zoom_image():
sx, sy = 0, 0
tx, ty = rect.x(), rect.y()
sh, sw = self.wp.height(), self.wp.width()
if self.wp.width() > rect.width():
sx = (self.wp.width() - rect.width()) / 2
sw -= sx*2
else:
tx += (rect.width() - self.wp.width()) / 2
if self.wp.height() > rect.height():
sy = (self.wp.height() - rect.height()) / 2
sh -= sy*2
else:
ty += (rect.height() - self.wp.height()) / 2
return QtCore.QRectF(sx, sy, sw, sh), QtCore.QPointF(tx, ty)
if self.wp:
source_rect, target_point = zoom_image()
painter.drawImage( target_point, self.wp, source_rect )
class L5RMain(L5RCMCore):
default_size = QtCore.QSize(820, 720)
default_point_size = 8.25
num_tabs = 10
def __init__(self, locale = None, parent = None):
super(L5RMain, self).__init__(locale, parent)
# character file save path
self.save_path = ''
# slot sinks
self.sink1 = sinks.Sink1(self) # Menu Sink
self.sink2 = sinks.Sink2(self) # MeritFlaw Sink
self.sink3 = sinks.Sink3(self) # Weapons Sink
self.sink4 = sinks.Sink4(self) # Misc Sink
# Build interface and menus
self.build_ui()
self.build_menu()
# Build page 1
self.build_ui_page_1 ()
self.build_ui_page_2 ()
self.build_ui_page_3 ()
self.build_ui_page_4 ()
self.build_ui_page_5 ()
self.build_ui_page_6 ()
self.build_ui_page_7 ()
self.build_ui_page_8 ()
self.build_ui_page_9 ()
self.build_ui_page_10()
self.build_ui_page_about()
self.tabs.setIconSize(QtCore.QSize(24,24))
tabs_icons = ['samurai', 'music', 'burn', 'powers', 'userinfo', 'book', 'katana', 'disk', 'text', 'bag']
for i in xrange(0, self.num_tabs):
self.tabs.setTabIcon(i, QtGui.QIcon(get_tab_icon(tabs_icons[i])))
self.tabs.setTabText(i, '')
# about = app_icon
self.tabs.setTabIcon(self.num_tabs, QtGui.QIcon(get_app_icon_path()))
self.tabs.setTabText(self.num_tabs, '')
# donate button
self.setup_donate_button()
self.connect_signals()
def build_ui(self):
# Main interface widgets
self.view = ZoomableView(self)
settings = QtCore.QSettings()
self.widgets = QtGui.QFrame()
self.widgets.setFrameShape( QtGui.QFrame.StyledPanel )
self.widgets.setLineWidth ( 1 )
#self.widgets.setMaximumSize( QtCore.QSize(9999, 9999) )
self.tabs = QtGui.QTabWidget(self)
#self.setCentralWidget(self.widgets)
self.scene = QtGui.QGraphicsScene(self)
proxy_widget = self.scene.addWidget(self.widgets, QtCore.Qt.Widget)
proxy_widget.setOpacity(float(settings.value('opacity', 0.96)))
self.view.setScene(self.scene)
self.view.setInteractive(True)
self.setCentralWidget(self.view)
self.nicebar = None
mvbox = QtGui.QVBoxLayout(self.widgets)
logo = QtGui.QLabel(self)
#logo.setScaledContents(True)
#logo.setPixmap( QtGui.QPixmap( get_app_file('banner_ss.png') ) )
mvbox.addWidget(logo)
mvbox.addWidget(self.tabs)
self.mvbox = mvbox
# LOAD SETTINGS
geo = settings.value('geometry')
if geo is not None:
self.restoreGeometry(geo)
else:
self.setGeometry( QtCore.QRect(100, 100, 820, 720) )
self.ic_idx = int(settings.value('insight_calculation', 1))-1
ic_calcs = [rules.insight_calculation_1,
rules.insight_calculation_2,
rules.insight_calculation_3]
if self.ic_idx not in range(0, 3):
self.ic_idx = 0
self.ic_calc_method = ic_calcs[self.ic_idx]
self.update_background_image()
def update_background_image(self):
settings = QtCore.QSettings()
wallpaper_ = settings.value('background_image', '')
if os.path.exists( wallpaper_ ):
self.view.set_wallpaper( QtGui.QImage( wallpaper_ ) )
def build_ui_page_1(self):
mfr = QtGui.QFrame(self)
self.tabs.addTab(mfr, self.tr("Character"))
mvbox = QtGui.QVBoxLayout(mfr)
mvbox.setContentsMargins(0,0,0,0)
def add_pc_info(row, col):
fr_pc_info = QtGui.QFrame(self)
fr_pc_info.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
grid = QtGui.QGridLayout(fr_pc_info)
self.tx_pc_name = QtGui.QLineEdit(self)
self.tx_pc_rank = QtGui.QLineEdit(self)
self.cb_pc_clan = QtGui.QComboBox(self)
self.cb_pc_family = QtGui.QComboBox(self)
self.cb_pc_school = QtGui.QComboBox(self)
self.tx_pc_exp = QtGui.QLineEdit(self)
self.tx_pc_ins = QtGui.QLineEdit(self)
# 1st column
#fr_school = QtGui.QFrame(self)
#hb_school = QtGui.QHBoxLayout(fr_school)
#hb_school.setContentsMargins(0,0,0,0)
lb_school = QtGui.QLabel(self.tr("School"), self)
#bt_lock = QtGui.QToolButton( self )
#bt_lock.setCheckable(True)
#bt_lock.setToolTip(self.tr("Toggle show schools from all the clans"))
#bt_lock.setAutoRaise(True)
#bt_lock.setIcon( QtGui.QIcon(get_icon_path('lock_close',(16,16))) )
#hb_school.addWidget(lb_school)
#hb_school.addWidget(bt_lock)
# Place "generate random name" near the Name label
lb_name = QtGui.QLabel(self.tr("Name"), self)
bt_generate_male = QtGui.QToolButton( self )
bt_generate_male.setIcon( QtGui.QIcon(get_icon_path('male',(16,16))) )
bt_generate_female = QtGui.QToolButton( self )
bt_generate_female.setIcon( QtGui.QIcon(get_icon_path('female',(16,16))) )
bt_generate_male .setAutoRaise(True)
bt_generate_male .setToolTip (self.tr("Random male name"))
bt_generate_female.setAutoRaise(True)
bt_generate_female.setToolTip (self.tr("Random female name"))
hb_name = QtGui.QHBoxLayout()
hb_name.addWidget(lb_name)
hb_name.addWidget(bt_generate_male)
hb_name.addWidget(bt_generate_female)
# gender tag, connect signals
bt_generate_male .setProperty('gender', 'male')
bt_generate_female.setProperty('gender', 'female')
bt_generate_male .clicked.connect( self.sink1.generate_name )
bt_generate_female.clicked.connect( self.sink1.generate_name )
#grid.addWidget( QtGui.QLabel(self.tr("Name" ), self), 0, 0 )
grid.addLayout( hb_name, 0, 0 )
grid.addWidget( QtGui.QLabel(self.tr("Clan" ), self), 1, 0 )
grid.addWidget( QtGui.QLabel(self.tr("Family"), self), 2, 0 )
grid.addWidget( lb_school, 3, 0 )
#self.bt_school_lock = bt_lock
# 3rd column
fr_exp = QtGui.QFrame(self)
hb_exp = QtGui.QHBoxLayout(fr_exp)
hb_exp.setContentsMargins(0,0,0,0)
lb_exp = QtGui.QLabel(self.tr("Exp. Points"), self)
bt_exp = QtGui.QToolButton( self )
bt_exp.setToolTip(self.tr("Edit experience points"))
bt_exp.setAutoRaise(True)
bt_exp.setIcon( QtGui.QIcon(get_icon_path('edit',(16,16))) )
hb_exp.addWidget(lb_exp)
hb_exp.addWidget(bt_exp)
grid.addWidget( QtGui.QLabel(self.tr("Rank") , self), 0, 3 )
#grid.addWidget( QtGui.QLabel(self.tr("Exp. Points"), self), 1, 3 )
grid.addWidget( fr_exp, 1, 3 )
grid.addWidget( QtGui.QLabel(self.tr("Insight") , self), 2, 3 )
self.bt_set_exp_points = bt_exp
# 2nd column
grid.addWidget( self.tx_pc_name, 0, 1, 1, 2 )
grid.addWidget( self.cb_pc_clan, 1, 1 , 1, 2 )
grid.addWidget( self.cb_pc_family, 2, 1, 1, 2)
grid.addWidget( self.cb_pc_school, 3, 1, 1, 2)
# 4th column
grid.addWidget( self.tx_pc_rank, 0, 4, 1, 2)
grid.addWidget( self.tx_pc_exp, 1, 4, 1, 2 )
grid.addWidget( self.tx_pc_ins, 2, 4, 1, 2 )
self.tx_pc_rank.setReadOnly(True)
self.tx_pc_exp.setReadOnly(True)
self.tx_pc_ins.setReadOnly(True)
fr_pc_info.setLayout(grid)
mvbox.addWidget(fr_pc_info)
def build_trait_frame():
fr = QtGui.QFrame(self)
fr.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
hbox = QtGui.QHBoxLayout(fr)
grp = QtGui.QGroupBox(self.tr("Rings and Attributes"), self)
grid = QtGui.QGridLayout(grp)
grid.setSpacing(1)
# rings
rings = []
rings.append( ( self.tr("Earth"), new_small_le(self) ) )
rings.append( ( self.tr("Air" ), new_small_le(self) ) )
rings.append( ( self.tr("Water"), new_small_le(self) ) )
rings.append( ( self.tr("Fire" ), new_small_le(self) ) )
rings.append( ( self.tr("Void" ), new_small_le(self) ) )
# keep reference to the rings
self.rings = rings
for i in xrange(0, 4):
grid.addWidget( QtGui.QLabel( rings[i][0] ), i, 0 )
grid.addWidget( rings[i][1], i, 1 )
# void ring with plus button
void_fr = QtGui.QFrame(self)
void_hbox = QtGui.QHBoxLayout(void_fr)
void_hbox.setContentsMargins(0,0,0,0)
void_bt = new_small_plus_bt(self)
void_hbox.addWidget(rings[4][1])
void_hbox.addWidget(void_bt)
void_bt.clicked.connect(self.on_void_increase)
grid.addWidget( QtGui.QLabel( rings[4][0] ), 4, 0 )
grid.addWidget( void_fr, 4, 1 )
attribs = []
# Earth ring
attribs.append( (self.tr("Stamina" ), new_small_le(self)) )
attribs.append( (self.tr("Willpower"), new_small_le(self)) )
attribs[0][1].setProperty('attrib_id', models.ATTRIBS.STAMINA)
attribs[1][1].setProperty('attrib_id', models.ATTRIBS.WILLPOWER)
# Air ring
attribs.append( (self.tr("Reflexes" ), new_small_le(self)) )
attribs.append( (self.tr("Awareness"), new_small_le(self)) )
attribs[2][1].setProperty('attrib_id', models.ATTRIBS.REFLEXES)
attribs[3][1].setProperty('attrib_id', models.ATTRIBS.AWARENESS)
# Water ring
attribs.append( (self.tr("Strength" ), new_small_le(self)) )
attribs.append( (self.tr("Perception"), new_small_le(self)) )
attribs[4][1].setProperty('attrib_id', models.ATTRIBS.STRENGTH)
attribs[5][1].setProperty('attrib_id', models.ATTRIBS.PERCEPTION)
# Fire ring
attribs.append( (self.tr("Agility" ), new_small_le(self)) )
attribs.append( (self.tr("Intelligence"), new_small_le(self)) )
attribs[6][1].setProperty('attrib_id', models.ATTRIBS.AGILITY)
attribs[7][1].setProperty('attrib_id', models.ATTRIBS.INTELLIGENCE)
self.attribs = attribs
# map increase trait signals
self.trait_sig_mapper = QtCore.QSignalMapper(self)
def _attrib_frame(i):
fr = QtGui.QFrame(self)
hbox = QtGui.QHBoxLayout(fr)
hbox.setContentsMargins(3,0,9,0)
# small plus button
tag = str(attribs[i][1].property('attrib_id'))
bt = new_small_plus_bt(self)
hbox.addWidget( attribs[i][1] )
hbox.addWidget( bt )
self.trait_sig_mapper.setMapping(bt, tag)
bt.connect(QtCore.SIGNAL("clicked()"), self.trait_sig_mapper, QtCore.SLOT("map()"))
return fr
for i in xrange(0, 8, 2):
grid.addWidget( QtGui.QLabel( attribs[i][0] ),
(i//2) , 2, 1, 1, QtCore.Qt.AlignLeft )
grid.addWidget( _attrib_frame(i), (i//2), 3, 1, 1,
QtCore.Qt.AlignLeft )
grid.addWidget( QtGui.QLabel( attribs[i+1][0] ),
(i//2), 4, 1, 1, QtCore.Qt.AlignLeft )
grid.addWidget( _attrib_frame(i+1), (i//2), 5, 1, 1,
QtCore.Qt.AlignLeft )
grid.addWidget( QtGui.QLabel(self.tr("<b>Void Points</b>")),
4, 2, 1, 3,
QtCore.Qt.AlignHCenter )
self.void_points = widgets.CkNumWidget(count=10, parent=self)
grid.addWidget( self.void_points, 5, 2, 1, 3,
QtCore.Qt.AlignHCenter)
hbox.addWidget(grp)
return fr
def build_flags_frame():
tx_flags = [self.tr("Honor" ), self.tr("Glory" ),
self.tr("Status"), self.tr("Shadowland Taint"),
self.tr("Infamy" )]
ob_flags_p = []
ob_flags_r = []
fr = QtGui.QFrame(self)
#fr.setFrameShape(QtGui.QFrame.StyledPanel)
vbox = QtGui.QVBoxLayout(fr)
vbox.setContentsMargins(0,0,0,0)
vbox.setSpacing(0)
row = 1
for f in tx_flags:
fr_ = QtGui.QFrame(self)
lay = QtGui.QGridLayout(fr_)
lay.setContentsMargins(0,0,0,0)
lay.setSpacing(0)
lay.addWidget(QtGui.QLabel('<b>%s</b>' % f), row, 0)
l = new_small_le(self, False)
lay.addWidget(l, row, 1)
w = widgets.CkNumWidget(count=9, parent=self)
lay.addWidget(w, row+1, 0, 1, 2, QtCore.Qt.AlignHCenter)
ob_flags_p.append(w)
ob_flags_r.append(l)
vbox.addWidget(fr_)
self.pc_flags_points = ob_flags_p
self.pc_flags_rank = ob_flags_r
return fr
def add_traits_and_flags():
trait_frame = build_trait_frame()
flags_frame = build_flags_frame()
fr = QtGui.QFrame(self)
hbox = QtGui.QHBoxLayout(fr)
fr.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
hbox.addWidget(trait_frame)
hbox.addWidget(flags_frame)
mvbox.addWidget(fr)
def add_pc_quantities(row, col):
fr = QtGui.QFrame(self)
fr.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
hbox = QtGui.QHBoxLayout(fr)
monos_ = QtGui.QFont('Monospace')
monos_.setStyleHint( QtGui.QFont.Courier )
# fr.setFont(monos_)
# initiative
grp = QtGui.QGroupBox(self.tr("Initiative"), self)
#grp.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
grd = QtGui.QFormLayout(grp)
self.tx_base_init = QtGui.QLineEdit(self)
self.tx_mod_init = QtGui.QLineEdit(self)
self.tx_cur_init = QtGui.QLineEdit(self)
self.tx_base_init.setReadOnly(True)
self.tx_mod_init .setReadOnly(True)
self.tx_cur_init .setReadOnly(True)
grd.addRow( self.tr("Base" ), self.tx_base_init)
grd.addRow( self.tr("Modifier"), self.tx_mod_init)
grd.addRow( self.tr("Current" ), self.tx_cur_init)
hbox.addWidget(grp, 1)
# Armor TN
grp = QtGui.QGroupBox(self.tr("Armor TN"), self)
#grp.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
grd = QtGui.QFormLayout(grp)
self.tx_armor_nm = QtGui.QLineEdit(self)
self.tx_base_tn = QtGui.QLineEdit(self)
self.tx_armor_tn = QtGui.QLineEdit(self)
self.tx_armor_rd = QtGui.QLineEdit(self)
self.tx_cur_tn = QtGui.QLineEdit(self)
self.tx_armor_nm.setReadOnly(True)
self.tx_base_tn .setReadOnly(True)
self.tx_armor_tn.setReadOnly(True)
self.tx_armor_rd.setReadOnly(True)
self.tx_cur_tn .setReadOnly(True)
grd.addRow( self.tr("Name" ), self.tx_armor_nm)
grd.addRow( self.tr("Base" ), self.tx_base_tn)
grd.addRow( self.tr("Armor" ), self.tx_armor_tn)
grd.addRow( self.tr("Reduction"), self.tx_armor_rd)
grd.addRow( self.tr("Current" ), self.tx_cur_tn)
hbox.addWidget(grp, 1)
# Wounds
grp = QtGui.QGroupBox(self.tr("Wounds"), self)
#grp.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
grd = QtGui.QGridLayout(grp)
wnd = []
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self), new_small_le(self), new_small_le(self)) )
wnd.append( (QtGui.QLabel(self.tr("Out"), self),
new_small_le(self), new_small_le(self)) )
self.wounds = wnd
self.wnd_lb = grp
row_ = 0
col_ = 0
for i in xrange(0, len(wnd)):
if i == 4:
col_ = 3
row_ = 0
grd.addWidget( wnd[i][0], row_, col_ )
grd.addWidget( wnd[i][0], row_, col_ )
grd.addWidget( wnd[i][1], row_, col_+1 )
grd.addWidget( wnd[i][2], row_, col_+2 )
row_ += 1
hbox.addWidget(grp, 2)
mvbox.addWidget(fr)
add_pc_info(0, 0)
mvbox.addWidget(new_horiz_line(self))
add_traits_and_flags()
mvbox.addWidget(new_horiz_line(self))
add_pc_quantities(4, 0)
def _build_generic_page(self, models_):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
views_ = []
for k, t, m, d, tb in models_:
grp = QtGui.QGroupBox(k, self)
hbox = QtGui.QHBoxLayout(grp)
view = None
if t == 'table':
view = QtGui.QTableView(self)
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
if d is not None and len(d) == 2:
col_ = d[0]
obj_ = d[1]
view.setItemDelegateForColumn(col_, obj_)
elif t == 'list':
view = QtGui.QListView(self)
view.setModel(m)
if d is not None:
view.setItemDelegate(d)
if tb is not None:
hbox.addWidget(tb)
hbox.addWidget(view)
vbox.addWidget(grp)
views_.append(view)
return mfr, views_
def _build_spell_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Spells"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3,3,3,3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.act_buy_spell
cb_remove = self.act_del_spell
cb_memo = self.act_memo_spell
self.add_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add new spell"), cb_buy)
self.del_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove spell"), cb_remove)
self.memo_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('book',(16,16))),
self.tr("Memorize/Forget spell"), cb_memo)
self.del_spell_bt.setEnabled(False)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(fr_)
view.setSizePolicy( QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding )
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
# FIXME: this line segfaults on PySide 1.1.2
#view.selectionModel().currentRowChanged.connect(self.on_spell_selected)
sm = view.selectionModel()
sm.currentRowChanged.connect(self.on_spell_selected)
self.spell_table_view = view
# Affinity/Deficiency
self.lb_affin = QtGui.QLabel(self.tr("None"), self)
self.lb_defic = QtGui.QLabel(self.tr("None"), self)
aff_fr = QtGui.QFrame(self)
aff_fr.setSizePolicy( QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum )
fl = QtGui.QFormLayout(aff_fr)
fl.addRow(self.tr("<b><i>Affinity</i></b>" ), self.lb_affin)
fl.addRow(self.tr("<b><i>Deficiency</i></b>"), self.lb_defic)
fl.setHorizontalSpacing(60)
fl.setVerticalSpacing ( 5)
fl.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(aff_fr)
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
view.doubleClicked.connect( self.sink4.on_spell_item_activate )
return view
def _build_tech_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Techs"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3,3,3,3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_view = self.sink4.on_tech_item_activate
cb_replace = self.sink4.act_replace_tech
#cb_replace = self.sink4.on_tech_item_activate
self.view_tech_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('view',(16,16))),
self.tr("View technique details"), cb_view)
self.replace_rank_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('switch',(16,16))),
self.tr("Replace school rank"), cb_replace)
self.view_tech_bt .setEnabled(True)
self.replace_rank_bt.setEnabled(True)
vtb.addStretch()
return vtb
# View
view = QtGui.QListView(self)
view.setModel(model)
view.setItemDelegate(models.TechItemDelegate(self))
vbox.addWidget(view)
self.tech_view = view
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
view.doubleClicked.connect( self.sink4.on_tech_item_activate )
return view
def _build_kata_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Kata"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3,3,3,3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.sink2.act_buy_kata
cb_remove = self.sink2.act_del_kata
self.add_kata_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add new Kata"), cb_buy)
self.del_kata_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove Kata"), cb_remove)
self.add_kata_bt.setEnabled(True)
self.del_kata_bt.setEnabled(True)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(self)
view.setSizePolicy( QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding )
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
self.ka_table_view = view
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
return view
def _build_kiho_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Kiho"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3,3,3,3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.sink2.act_buy_kiho
cb_remove = self.sink2.act_del_kiho
cb_buy_tattoo = self.sink2.act_buy_tattoo
self.add_kiho_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add new Kiho"), cb_buy)
self.add_tattoo_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add new Tattoo"), cb_buy_tattoo)
self.del_kiho_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove Kiho"), cb_remove)
self.add_kiho_bt.setEnabled(True)
self.del_kiho_bt.setEnabled(True)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(self)
view.setSizePolicy( QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding )
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
self.ki_table_view = view
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
return view
def build_ui_page_2(self):
self.sk_view_model = models.SkillTableViewModel(self.dstore, self)
self.ma_view_model = models.MaViewModel (self.dstore, self)
# enable sorting through a proxy model
sk_sort_model = models.ColorFriendlySortProxyModel(self)
sk_sort_model.setDynamicSortFilter(True)
sk_sort_model.setSourceModel(self.sk_view_model)
# skills vertical toolbar
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('add',(16,16))),
self.tr("Add skill rank"), self.on_buy_skill_rank)
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Buy skill emphasys"), self.show_buy_emph_dlg)
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Buy another skill"), self.show_buy_skill_dlg)
vtb.addStretch()
models_ = [ ("Skills", 'table', sk_sort_model, None, vtb),
(self.tr("Mastery Abilities"), 'list', self.ma_view_model,
models.MaItemDelegate(self), None) ]
frame_, views_ = self._build_generic_page(models_)
if len(views_) > 0:
self.skill_table_view = views_[0]
self.tabs.addTab(frame_, self.tr("Skills"))
def build_ui_page_3(self):
self.sp_view_model = models.SpellTableViewModel(self.dstore, self)
self.th_view_model = models.TechViewModel (self.dstore, self)
# enable sorting through a proxy model
sp_sort_model = models.ColorFriendlySortProxyModel(self)
sp_sort_model.setDynamicSortFilter(True)
sp_sort_model.setSourceModel(self.sp_view_model)
frame_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(frame_)
self._build_spell_frame(sp_sort_model , vbox)
self._build_tech_frame (self.th_view_model, vbox)
self.tabs.addTab(frame_, self.tr("Techniques"))
def build_ui_page_4(self):
self.ka_view_model = models.KataTableViewModel(self.dstore, self)
self.ki_view_model = models.KihoTableViewModel(self.dstore, self)
# enable sorting through a proxy model
ka_sort_model = models.ColorFriendlySortProxyModel(self)
ka_sort_model.setDynamicSortFilter(True)
ka_sort_model.setSourceModel(self.ka_view_model)
ki_sort_model = models.ColorFriendlySortProxyModel(self)
ki_sort_model.setDynamicSortFilter(True)
ki_sort_model.setSourceModel(self.ki_view_model)
frame_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(frame_)
self.kata_view = self._build_kata_frame(ka_sort_model , vbox)
self.kiho_view = self._build_kiho_frame(ki_sort_model , vbox)
self.tabs.addTab(frame_, self.tr("Powers"))
def build_ui_page_5(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb(tag, has_edit, has_remove):
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = (self.sink2.act_buy_merit if tag == 'merit'
else self.sink2.act_buy_flaw)
cb_edit = (self.sink2.act_edit_merit if tag == 'merit'
else self.sink2.act_edit_flaw)
cb_remove = (self.sink2.act_del_merit if tag == 'merit'
else self.sink2.act_del_flaw)
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add Perk"), cb_buy)
if has_edit:
vtb.addButton(QtGui.QIcon(get_icon_path('edit',(16,16))),
self.tr("Edit Perk"), cb_edit)
if has_remove:
vtb.addButton(QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove Perk"), cb_remove)
vtb.addStretch()
return vtb
self.merits_view_model = models.PerkViewModel(self.dstore, 'merit')
self.flaws_view_model = models.PerkViewModel(self.dstore, 'flaws')
merit_view = QtGui.QListView(self)
merit_view.setModel(self.merits_view_model)
merit_view.setItemDelegate(models.PerkItemDelegate(self))
merit_vtb = _make_vertical_tb('merit', True, True)
fr_ = QtGui.QFrame(self)
hb_ = QtGui.QHBoxLayout(fr_)
hb_.setContentsMargins(3,3,3,3)
hb_.addWidget(merit_vtb)
hb_.addWidget(merit_view)
vbox.addWidget(new_item_groupbox(self.tr("Advantages"), fr_))
flaw_view = QtGui.QListView(self)
flaw_view.setModel(self.flaws_view_model)
flaw_view.setItemDelegate(models.PerkItemDelegate(self))
flaw_vtb = _make_vertical_tb('flaw', True, True)
fr_ = QtGui.QFrame(self)
hb_ = QtGui.QHBoxLayout(fr_)
hb_.setContentsMargins(3,3,3,3)
hb_.addWidget(flaw_vtb)
hb_.addWidget(flaw_view)
vbox.addWidget(new_item_groupbox(self.tr("Disadvantages"), fr_))
self.merit_view = merit_view
self.flaw_view = flaw_view
self.tabs.addTab(mfr, self.tr("Perks"))
def build_ui_page_6(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
fr_ = QtGui.QFrame(self)
fr_h = QtGui.QHBoxLayout(fr_)
fr_h.setContentsMargins(0, 0, 0, 0)
fr_h.addWidget(QtGui.QLabel(self.tr("""<p><i>Select the advancement to refund and hit the button</i></p>"""), self))
bt_refund_adv = QtGui.QPushButton(self.tr("Refund"), self)
bt_refund_adv.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred )
bt_refund_adv.clicked.connect(self.sink1.refund_advancement)
fr_h.addWidget(bt_refund_adv)
vbox.addWidget(fr_)
self.adv_view_model = models.AdvancementViewModel(self)
lview = QtGui.QListView(self)
lview.setModel(self.adv_view_model)
lview.setItemDelegate(models.AdvancementItemDelegate(self))
vbox.addWidget(lview)
self.adv_view = lview
self.tabs.addTab(mfr, self.tr("Advancements"))
def build_ui_page_7(self):
self.melee_view_model = models.WeaponTableViewModel('melee' , self)
self.ranged_view_model = models.WeaponTableViewModel('ranged', self)
self.arrow_view_model = models.WeaponTableViewModel('arrow' , self)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb(has_custom, has_edit, has_qty, filt):
vtb = widgets.VerticalToolBar(self)
vtb.setProperty('filter', filt)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add weapon"), self.sink3.show_add_weapon)
if has_custom:
vtb.addButton(QtGui.QIcon(get_icon_path('custom',(16,16))),
self.tr("Add custom weapon"), self.sink3.show_add_cust_weapon)
if has_edit:
vtb.addButton(QtGui.QIcon(get_icon_path('edit',(16,16))),
self.tr("Edit weapon"), self.sink3.edit_selected_weapon)
vtb.addButton(QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove weapon"), self.sink3.remove_selected_weapon)
if has_qty:
vtb.addButton(QtGui.QIcon(get_icon_path('add',(16,16))),
self.tr("Increase Quantity"), self.sink3.on_increase_item_qty)
vtb.addButton(QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Decrease Quantity"), self.sink3.on_decrease_item_qty)
vtb.addStretch()
return vtb
melee_vtb = _make_vertical_tb(True, True, False, 'melee' )
ranged_vtb = _make_vertical_tb(True, True, False, 'ranged')
arrow_vtb = _make_vertical_tb(False, False, True,'arrow' )
models_ = [ (self.tr("Melee Weapons"), 'table', _make_sortable(self.melee_view_model),
None, melee_vtb),
(self.tr("Ranged Weapons"), 'table', _make_sortable(self.ranged_view_model),
None, ranged_vtb),
(self.tr("Arrows"), 'table', _make_sortable(self.arrow_view_model),
None, arrow_vtb)]
frame_, views_ = self._build_generic_page(models_)
melee_vtb .setProperty('source', views_[0])
ranged_vtb.setProperty('source', views_[1])
arrow_vtb .setProperty('source', views_[2])
self.tabs.addTab(frame_, self.tr("Weapons"))
def build_ui_page_8(self):
# modifiers
self.mods_view_model = models.ModifiersTableViewModel(self)
self.mods_view_model.user_change.connect(self.update_from_model)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add modifier"), self.sink4.add_new_modifier)
vtb.addButton(QtGui.QIcon(get_icon_path('edit',(16,16))),
self.tr("Edit modifier"), self.sink4.edit_selected_modifier)
vtb.addButton(QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove modifier"), self.sink4.remove_selected_modifier)
vtb.addStretch()
return vtb
vtb = _make_vertical_tb()
models_ = [ (self.tr("Modifiers"), 'table', _make_sortable(self.mods_view_model),
None, vtb) ]
frame_, views_ = self._build_generic_page(models_)
self.mod_view = views_[0]
vtb .setProperty('source', self.mod_view)
self.tabs.addTab(frame_, self.tr("Modifiers"))
def build_ui_page_9(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
#vbox.setAlignment(QtCore.Qt.AlignCenter)
#vbox.setSpacing (30)
self.tx_pc_notes = widgets.SimpleRichEditor(self)
vbox.addWidget(self.tx_pc_notes)
def build_pers_info():
grp = QtGui.QGroupBox(self.tr("Personal Informations"), self)
grp.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
hgrp = QtGui.QHBoxLayout(grp)
# anagraphic
afr = QtGui.QFrame(self)
afl = QtGui.QFormLayout(afr)
self.tx_pc_sex = QtGui.QLineEdit(self)
self.tx_pc_age = QtGui.QLineEdit(self)
self.tx_pc_height = QtGui.QLineEdit(self)
self.tx_pc_weight = QtGui.QLineEdit(self)
self.tx_pc_hair = QtGui.QLineEdit(self)
self.tx_pc_eyes = QtGui.QLineEdit(self)
afl.addRow( self.tr("Sex" ), self.tx_pc_sex )
afl.addRow( self.tr("Age" ), self.tx_pc_age )
afl.addRow( self.tr("Height" ), self.tx_pc_height)
afl.addRow( self.tr("Weight" ), self.tx_pc_weight)
afl.addRow( self.tr("Hair" ), self.tx_pc_hair )
afl.addRow( self.tr("Eyes" ), self.tx_pc_eyes )
hgrp.addWidget(afr)
# separator
hgrp.addWidget(new_vert_line())
# parents
bfr = QtGui.QFrame(self)
bfl = QtGui.QFormLayout(bfr)
self.tx_pc_father = QtGui.QLineEdit(self)
self.tx_pc_mother = QtGui.QLineEdit(self)
self.tx_pc_bro = QtGui.QLineEdit(self)
self.tx_pc_sis = QtGui.QLineEdit(self)
self.tx_pc_marsta = QtGui.QLineEdit(self)
self.tx_pc_spouse = QtGui.QLineEdit(self)
self.tx_pc_childr = QtGui.QLineEdit(self)
bfl.addRow( self.tr("Father" ), self.tx_pc_father)
bfl.addRow( self.tr("Mother" ), self.tx_pc_mother)
bfl.addRow( self.tr("Brothers" ), self.tx_pc_bro )
bfl.addRow( self.tr("Sisters" ), self.tx_pc_sis )
bfl.addRow( self.tr("Marital Status" ), self.tx_pc_marsta)
bfl.addRow( self.tr("Spouse" ), self.tx_pc_spouse)
bfl.addRow( self.tr("Children" ), self.tx_pc_childr)
hgrp.addWidget(bfr)
self.pers_info_widgets = [
self.tx_pc_sex, self.tx_pc_age,
self.tx_pc_height, self.tx_pc_weight,
self.tx_pc_hair, self.tx_pc_eyes,
self.tx_pc_father, self.tx_pc_mother,
self.tx_pc_bro, self.tx_pc_marsta,
self.tx_pc_sis, self.tx_pc_spouse, self.tx_pc_childr]
# link personal information widgets
self.tx_pc_sex.link = 'sex'
self.tx_pc_age.link = 'age'
self.tx_pc_height.link = 'height'
self.tx_pc_weight.link = 'weight'
self.tx_pc_hair.link = 'hair'
self.tx_pc_eyes.link = 'eyes'
self.tx_pc_father.link = 'father'
self.tx_pc_mother.link = 'mother'
self.tx_pc_bro.link = 'brothers'
self.tx_pc_sis.link = 'sisters'
self.tx_pc_marsta.link = 'marsta'
self.tx_pc_spouse.link = 'spouse'
self.tx_pc_childr.link = 'childr'
return grp
vbox.addWidget(build_pers_info())
self.tabs.addTab(mfr, self.tr("Notes"))
def build_ui_page_10(self):
self.equip_view_model = models.EquipmentListModel(self)
#self.equip_view_model.user_change.connect(self.update_from_model)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy',(16,16))),
self.tr("Add equipment"), self.sink4.add_equipment)
vtb.addButton(QtGui.QIcon(get_icon_path('minus',(16,16))),
self.tr("Remove equipment"), self.sink4.remove_selected_equipment)
vtb.addStretch()
return vtb
vtb = _make_vertical_tb()
models_ = [ (self.tr("Equipment"), 'list', _make_sortable(self.equip_view_model),
None, vtb) ]
frame_, views_ = self._build_generic_page(models_)
self.equip_view = views_[0]
font = self.equip_view.font()
font.setPointSize(11.5)
self.equip_view.setFont(font)
self.money_widget = widgets.MoneyWidget(self)
frame_.layout().setSpacing(12)
frame_.layout().addWidget(new_horiz_line(self))
frame_.layout().addWidget(self.money_widget)
self.money_widget.valueChanged.connect(self.sink4.on_money_value_changed)
#self.equip_view.setItemDelegate(models.EquipmentDelegate(self.dstore, self))
vtb .setProperty('source', self.equip_view)
self.tabs.addTab(frame_ , self.tr("Equipment"))
def build_ui_page_about(self):
mfr = QtGui.QFrame(self)
#bfr = QtGui.QFrame(self)
#hbox = QtGui.QHBoxLayout(mfr)
hbox = QtGui.QHBoxLayout()
hbox.setAlignment(QtCore.Qt.AlignCenter)
#hbox.setMargin (30)
hbox.setSpacing (30)
logo = QtGui.QLabel(self)
logo.setPixmap(QtGui.QPixmap(get_app_icon_path((64,64))))
hbox.addWidget(logo, 0, QtCore.Qt.AlignTop)
vbox = QtGui.QVBoxLayout(mfr)
vbox.setAlignment(QtCore.Qt.AlignCenter)
vbox.setSpacing (30)
info = """<html><style>a { color: palette(text); }</style><body><h1>%s</h1>
<p>Version %s</p>
<p><a href="%s">%s</a></p>
<p>Report bugs and send in your ideas <a href="%s">here</a></p>
<p>To know about Legend of the Five rings please visit
<a href="%s">L5R RPG Home Page</a>
</p>
<p>
All right on Legend of The Five Rings RPG are possession of
<p>
<a href="%s">Alderac Entertainment Group (AEG)</a>
</p>
</p>
<p style='color:palette(mid)'>© 2011 %s</p>
<p>Special Thanks:</p>
<p style="margin-left: 10;">
Paul Tar, Jr aka Geiko (Lots of cool stuff)</p>
<p style="margin-left: 10;">Derrick D. Cochran (OS X Distro)
</p>
</body></html>""" % ( APP_DESC,
QtGui.QApplication.applicationVersion(),
PROJECT_PAGE_LINK, PROJECT_PAGE_NAME,
BUGTRAQ_LINK, L5R_RPG_HOME_PAGE,
ALDERAC_HOME_PAGE, AUTHOR_NAME)
lb_info = QtGui.QLabel(info, self)
lb_info.setOpenExternalLinks(True)
lb_info.setWordWrap(True)
hbox.addWidget(lb_info)
def on_contact_link_activate():
url = QtCore.QUrl(L5RCM_GPLUS_PAGE)
QtGui.QDesktopServices.openUrl(url)
def on_community_link_activate():
url = QtCore.QUrl(L5RCM_GPLUS_COMM)
QtGui.QDesktopServices.openUrl(url)
bt_contact_gplus = QtGui.QCommandLinkButton("Contact me", "but bring good news", self)
bt_contact_gplus.setIcon(
QtGui.QIcon(get_icon_path('new-g-plus-icon',(16, 16))))
#bt_contact_gplus.setFlat(True)
bt_contact_gplus.clicked.connect( on_contact_link_activate )
bt_community_gplus = QtGui.QCommandLinkButton("Join the G+ Community", "for answers and support", self)
bt_community_gplus.setIcon(
QtGui.QIcon(get_icon_path('new-g-plus-icon',(16, 16))))
#bt_community_gplus.setFlat(True)
bt_community_gplus.clicked.connect( on_community_link_activate )
gplus_form = QtGui.QVBoxLayout()
gplus_form.addWidget(bt_contact_gplus )
gplus_form.addWidget(bt_community_gplus)
#gplus_form.setLabelAlignment(QtCore.Qt.AlignRight)
gplus_form.setSpacing(6)
gplus_hbox = QtGui.QHBoxLayout()
gplus_hbox.setContentsMargins(0,0,50,0)
gplus_hbox.addStretch()
gplus_hbox.addLayout(gplus_form)
vbox.addLayout(hbox)
vbox.addLayout(gplus_hbox)
self.tabs.addTab(mfr, self.tr("About"))
def build_menu(self):
settings = QtCore.QSettings()
self.app_menu_tb = QtGui.QToolButton(self.widgets)
self.app_menu = QtGui.QMenu("AppMenu", self.app_menu_tb)
# File Menu
# actions: new, open, save
new_act = QtGui.QAction(self.tr("&New Character"), self)
open_act = QtGui.QAction(self.tr("&Open Character..."), self)
save_act = QtGui.QAction(self.tr("&Save Character..."), self)
export_pdf_act = QtGui.QAction(self.tr("Ex&port as PDF..."), self)
exit_act = QtGui.QAction(self.tr("E&xit"), self)
new_act .setShortcut( QtGui.QKeySequence.New )
open_act.setShortcut( QtGui.QKeySequence.Open )
save_act.setShortcut( QtGui.QKeySequence.Save )
exit_act.setShortcut( QtGui.QKeySequence.Quit )
new_act .triggered.connect( self.sink1.new_character )
open_act.triggered.connect( self.sink1.load_character )
save_act.triggered.connect( self.sink1.save_character )
exit_act.triggered.connect( self.close )
export_pdf_act .triggered.connect( self.sink1.export_character_as_pdf )
# Advancement menu
# actions buy advancement, view advancements
resetadv_act = QtGui.QAction(self.tr("&Reset advancements" ), self)
refund_act = QtGui.QAction(self.tr("Refund last advancement"), self)
refund_act .setShortcut( QtGui.QKeySequence.Undo )
resetadv_act.triggered.connect( self.sink1.reset_adv )
refund_act .triggered.connect( self.sink1.refund_last_adv )
# Dice roller menu
dice_roll_act = QtGui.QAction(self.tr("Dice &Roller..."), self)
dice_roll_act .triggered.connect( self.sink1.show_dice_roller )
# Outfit menu
# actions, select armor, add weapon, add misc item
sel_armor_act = QtGui.QAction(self.tr("Wear Armor..." ), self)
sel_cust_armor_act = QtGui.QAction(self.tr("Wear Custom Armor..."), self)
add_weap_act = QtGui.QAction(self.tr("Add Weapon..." ), self)
add_cust_weap_act = QtGui.QAction(self.tr("Add Custom Weapon..."), self)
sel_armor_act .triggered.connect( self.sink1.show_wear_armor )
sel_cust_armor_act.triggered.connect( self.sink1.show_wear_cust_armor )
add_weap_act .triggered.connect( self.sink3.show_add_weapon )
add_cust_weap_act .triggered.connect( self.sink3.show_add_cust_weapon )
# Rules menu
set_wound_mult_act = QtGui.QAction(self.tr("Set Health Multiplier..."), self)
buy_for_free_act = QtGui.QAction(self.tr("Free Shopping" ), self)
damage_act = QtGui.QAction(self.tr("Cure/Inflict Damage...") , self)
# insight calculation submenu
m_insight_calc = self.app_menu.addMenu(self.tr("Insight Calculation"))
self.ic_act_grp = QtGui.QActionGroup(self)
ic_default_act = QtGui.QAction(self.tr("Default" ), self)
ic_no_rank1_1 = QtGui.QAction(self.tr("Ignore Rank 1 Skills" ), self)
ic_no_rank1_2 = QtGui.QAction(self.tr("Account Rank 1 School Skills"), self)
ic_default_act.setProperty('method', rules.insight_calculation_1)
ic_no_rank1_1 .setProperty('method', rules.insight_calculation_2)
ic_no_rank1_2 .setProperty('method', rules.insight_calculation_3)
ic_list = [ic_default_act, ic_no_rank1_1, ic_no_rank1_2]
for act in ic_list:
self.ic_act_grp.addAction(act)
act.setCheckable(True)
m_insight_calc.addAction (act)
ic_list[self.ic_idx].setChecked(True)
# health calculation submenu
m_health_calc = self.app_menu.addMenu(self.tr("Health Display"))
self.hm_act_grp = QtGui.QActionGroup(self)
hm_default_act = QtGui.QAction(self.tr("Default" ), self)
hm_cumulative_act = QtGui.QAction(self.tr("Health left" ), self)
hm_totwounds_act = QtGui.QAction(self.tr("Total wounds"), self)
hm_default_act .setProperty('method', 'default')
hm_cumulative_act.setProperty('method', 'stacked')
hm_totwounds_act .setProperty('method', 'wounds' )
hm_list = [hm_default_act, hm_cumulative_act, hm_totwounds_act]
hm_mode = settings.value('health_method', 'wounds')
for act in hm_list:
self.hm_act_grp.addAction(act)
act.setCheckable(True)
m_health_calc.addAction (act)
if act.property('method') == hm_mode:
act.setChecked(True)
buy_for_free_act .setCheckable(True)
buy_for_free_act .setChecked(False)
set_wound_mult_act.triggered.connect(self.sink1.on_set_wnd_mult )
damage_act .triggered.connect(self.sink1.on_damage_act )
buy_for_free_act .toggled .connect(self.sink1.on_toggle_buy_for_free)
# Data menu
import_data_act = QtGui.QAction(self.tr("Import Data pack..." ), self)
manage_data_act = QtGui.QAction(self.tr("Manage Data packs..."), self)
open_data_dir_act = QtGui.QAction(self.tr("Open Data Directory" ), self)
reload_data_act = QtGui.QAction(self.tr("Reload data" ), self)
# Background
set_background_act = QtGui.QAction(self.tr("Background image..."), self)
set_background_act.triggered.connect(self.sink4.on_set_background)
self.app_menu_tb.setAutoRaise(True)
self.app_menu_tb.setToolButtonStyle(QtCore.Qt.ToolButtonFollowStyle)
self.app_menu_tb.setPopupMode( QtGui.QToolButton.InstantPopup )
self.app_menu_tb.setIconSize( QtCore.QSize(32, 32) )
self.app_menu_tb.setIcon( QtGui.QIcon.fromTheme("application-menu", QtGui.QIcon(get_icon_path('gear', (32,32))) ))
self.app_menu_tb.setArrowType( QtCore.Qt.NoArrow )
# FILE MENU
self.app_menu.addAction(new_act )
self.app_menu.addAction(open_act)
self.app_menu.addAction(save_act)
self.app_menu.addAction(export_pdf_act)
self.app_menu.addSeparator()
# ADV
self.app_menu.addAction(resetadv_act)
self.app_menu.addAction(refund_act)
self.app_menu.addSeparator()
# TOOLS
self.app_menu.addAction(dice_roll_act)
self.app_menu.addAction(set_background_act)
self.app_menu.addSeparator()
# OUTFIT
self.app_menu.addAction(sel_armor_act)
self.app_menu.addAction(sel_cust_armor_act)
self.app_menu.addAction(add_weap_act)
self.app_menu.addAction(add_cust_weap_act)
self.app_menu.addSeparator()
# RULES
self.app_menu.addAction(set_wound_mult_act)
self.app_menu.addAction(buy_for_free_act)
self.app_menu.addSeparator()
# INSIGHT
self.app_menu.addMenu(m_insight_calc)
# HEALTH
self.app_menu.addMenu(m_health_calc)
self.app_menu.addAction(damage_act)
self.app_menu.addSeparator()
# DATA
self.app_menu.addAction(import_data_act)
self.app_menu.addAction(manage_data_act)
self.app_menu.addAction(open_data_dir_act)
self.app_menu.addAction(reload_data_act)
self.app_menu.addSeparator()
# EXIT
self.app_menu.addAction(exit_act)
self.app_menu_tb.setMenu(self.app_menu)
self.tabs.setCornerWidget(self.app_menu_tb, QtCore.Qt.TopLeftCorner)
import_data_act .triggered.connect(self.sink4.import_data_act )
manage_data_act .triggered.connect(self.sink4.manage_data_act )
open_data_dir_act.triggered.connect(self.sink4.open_data_dir_act)
reload_data_act .triggered.connect(self.sink4.reload_data_act )
def init(self):
''' second step initialization '''
pass
def setup_donate_button(self):
self.statusBar().showMessage(
self.tr("You can donate to the project by clicking on the button")
)
self.paypal_bt = QtGui.QPushButton(self)
self.paypal_bt.setIcon( QtGui.QIcon(get_icon_path('btn_donate_SM', None)) )
self.paypal_bt.setIconSize( QtCore.QSize(74, 21) )
self.paypal_bt.setFlat(True)
self.paypal_bt.clicked.connect( self.please_donate )
self.statusBar().addPermanentWidget(self.paypal_bt)
def connect_signals(self):
# only user change
self.cb_pc_clan .activated.connect( self.on_clan_change )
# user and programmatically change
self.cb_pc_family.currentIndexChanged.connect( self.on_family_change )
self.cb_pc_school.currentIndexChanged.connect( self.on_school_change )
# notify only user edit
self.tx_mod_init.editingFinished.connect( self.update_from_model )
# update model name
self.tx_pc_name.editingFinished.connect( self.on_pc_name_change )
# personal information
for widget in self.pers_info_widgets:
widget.editingFinished.connect( self.on_pers_info_change )
for widget in self.pc_flags_points:
widget.valueChanged.connect( self.on_flag_points_change )
for tx in self.pc_flags_rank:
tx.editingFinished.connect( self.on_flag_rank_change )
self.void_points.valueChanged.connect( self.on_void_points_change )
self.trait_sig_mapper.connect(QtCore.SIGNAL("mapped(const QString &)"),
self,
QtCore.SLOT("on_trait_increase(const QString &)"))
self.ic_act_grp.triggered.connect(self.on_change_insight_calculation )
self.hm_act_grp.triggered.connect(self.on_change_health_visualization)
#self.bt_school_lock.clicked.connect( self.sink1.on_unlock_school_act )
self.bt_set_exp_points.clicked.connect( self.sink1.on_set_exp_limit )
def show_nicebar(self, wdgs):
self.nicebar = QtGui.QFrame(self)
self.nicebar.setStyleSheet('''
QWidget { background: beige;}
QPushButton {
color: #333;
border: 2px solid rgb(200,200,200);
border-radius: 7px;
padding: 5px;
background: qradialgradient(cx: 0.3, cy: -0.4,
fx: 0.3, fy: -0.4, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,170,0));
min-width: 80px;
}
QPushButton:hover {
background: qradialgradient(cx: 0.3, cy: -0.4,
fx: 0.3, fy: -0.4, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,100,30));
}
QPushButton:pressed {
background: qradialgradient(cx: 0.4, cy: -0.1,
fx: 0.4, fy: -0.1, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,200,50));
}
''')
self.nicebar.setMinimumSize(0, 32)
# nicebar layout
hbox = QtGui.QHBoxLayout(self.nicebar)
hbox.setContentsMargins(9,1,9,1)
for w in wdgs:
hbox.addWidget(w)
self.mvbox.insertWidget(1, self.nicebar)
self.nicebar.setVisible(True)
def hide_nicebar(self):
if not self.nicebar:
return
self.nicebar.setVisible(False)
del self.nicebar
self.nicebar = None
def on_trait_increase(self, tag):
'''raised when user click on the small '+' button near traits'''
if ( self.increase_trait( int(tag) ) == CMErrors.NOT_ENOUGH_XP ):
self.not_enough_xp_advise(self)
def on_void_increase(self):
'''raised when user click on the small '+' button near void ring'''
if ( self.increase_void() == CMErrors.NOT_ENOUGH_XP ):
self.not_enough_xp_advise(self)
def do_buy_kata(self, kata):
'''attempt to buy a new kata'''
if ( self.buy_kata(kata) == CMErrors.NOT_ENOUGH_XP ):
self.not_enough_xp_advise(self)
def do_buy_kiho(self, kiho):
'''attempt to buy a new kiho'''
if ( self.buy_kiho(kiho) == CMErrors.NOT_ENOUGH_XP ):
self.not_enough_xp_advise(self)
def on_clan_change(self, text):
#self.cb_pc_family.clear()
index = self.cb_pc_clan.currentIndex()
if index < 0:
self.pc.clan = None
else:
clan_id = self.cb_pc_clan.itemData(index)
self.pc.clan = clan_id
self.load_families(self.pc.clan)
#if self.pc.unlock_schools:
# self.load_schools ()
#else:
# self.load_schools(self.pc.clan)
self.cb_pc_family.setCurrentIndex(0)
#self.cb_pc_school.setCurrentIndex(0)
def on_family_change(self, text):
index = self.cb_pc_family.currentIndex()
if index <= 0:
self.pc.set_family()
self.update_from_model()
return
uuid = self.cb_pc_family.itemData(index)
if uuid == self.pc.family:
return
# should modify step_1 character
# get family perk
family = dal.query.get_family(self.dstore, uuid)
clan = dal.query.get_clan (self.dstore, family.clanid)
if not family or not clan:
return
self.pc.set_family( family.id , family.trait, 1, [family.id, clan.id] )
self.update_from_model()
def on_school_change(self, text):
index = self.cb_pc_school.currentIndex()
if index <= 0:
self.pc.set_school()
self.update_from_model()
return
uuid = self.cb_pc_school.itemData(index)
if uuid == self.pc.current_school_id:
return
# should modify step_2 character
# get school perk
school = dal.query.get_school(self.dstore, uuid)
clan = dal.query.get_clan (self.dstore, school.clanid)
try:
self.pc.set_school(school.id, school.trait, 1, school.honor, school.tags + [school.id, clan.id])
except:
self.pc.set_school(uuid, None, None, None)
for sk in school.skills:
self.pc.add_school_skill(sk.id, sk.rank, sk.emph)
# player choose ( aka wildcards )
for sk in school.skills_pc:
self.pc.add_pending_wc_skill(sk)
# get school tech rank 1
tech0 = dal.query.get_school_tech(school, 1)
# rule == techid ???
if tech0:
self.pc.set_free_school_tech(tech0.id, tech0.id)
# outfit
print('outfit', school.outfit)
self.pc.set_school_outfit( school.outfit, tuple(school.money) )
# if shugenja get universal spells
# also player should choose some spells from list
if 'shugenja' in school.tags:
count = 0
for spell in school.spells:
self.pc.add_free_spell(spell.id)
count += 1
for spell in school.spells_pc:
self.pc.add_pending_wc_spell((spell.element, spell.count, spell.tag))
count += spell.count
print('starting spells count are {0}'.format(count))
self.pc.set_school_spells_qty(count)
# affinity / deficiency
print('school: {0}, affinity: {1}, deficiency: {2}'.format(school, school.affinity, school.deficiency))
self.pc.set_affinity(school.affinity)
self.pc.set_deficiency(school.deficiency)
self.pc.get_school().affinity = school.affinity
self.pc.get_school().deficiency = school.deficiency
# free kihos ?
if school.kihos:
self.pc.free_kiho_count = school.kihos.count
self.update_from_model()
def on_pc_name_change(self):
self.pc.name = self.tx_pc_name.text()
def on_pers_info_change(self):
w = self.sender()
if hasattr(w, 'link'):
self.pc.set_property(w.link, w.text())
def on_flag_points_change(self):
fl = self.sender()
pt = fl.value
if fl == self.pc_flags_points[0]:
val = int(self.pc_flags_rank[0].text())
self.pc.set_honor( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_points[1]:
val = int(self.pc_flags_rank[1].text())
self.pc.set_glory( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_points[2]:
val = int(self.pc_flags_rank[2].text())
self.pc.set_status( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_points[3]:
val = int(self.pc_flags_rank[3].text())
self.pc.taint = float(val + float(pt)/10 )
else:
val = int(self.pc_flags_rank[4].text())
self.pc.infamy = float(val + float(pt)/10 )
def on_flag_rank_change(self):
fl = self.sender()
val = int(fl.text())
if fl == self.pc_flags_rank[0]:
pt = self.pc_flags_points[0].value
self.pc.set_honor( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_rank[1]:
pt = self.pc_flags_points[1].value
self.pc.set_glory( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_rank[2]:
pt = self.pc_flags_points[2].value
self.pc.set_status( float(val + float(pt)/10 ) )
elif fl == self.pc_flags_rank[3]:
pt = self.pc_flags_points[3].value
self.pc.taint = float(val + float(pt)/10 )
else:
pt = self.pc_flags_points[4].value
self.pc.infamy = float(val + float(pt)/10 )
def on_void_points_change(self):
val = self.void_points.value
self.pc.void_points = val
def on_buy_skill_rank(self):
# get selected skill
sm_ = self.skill_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.skill_table_view.model()
skill_id = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
err_ = self.buy_next_skill_rank(skill_id)
if err_ != CMErrors.NO_ERROR:
if err_ == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
return
idx = None
for i in xrange(0, self.skill_table_view.model().rowCount()):
idx = self.skill_table_view.model().index(i, 0)
if model_.data(idx, QtCore.Qt.UserRole) == skill_id:
break
if idx.isValid():
sm_.setCurrentIndex(idx, (QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Rows))
def act_choose_skills(self):
dlg = dialogs.SelWcSkills(self.pc, self.dstore, self)
if dlg.exec_() == QtGui.QDialog.DialogCode.Accepted:
self.pc.clear_pending_wc_skills()
self.pc.clear_pending_wc_emphs ()
self.update_from_model()
def act_memo_spell(self):
# get selected spell
sm_ = self.spell_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.spell_table_view.model()
spell_itm = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
err_ = CMErrors.NO_ERROR
if spell_itm.memo:
self.remove_advancement_item(spell_itm.adv)
else:
err_ = self.memo_spell(spell_itm.spell_id)
if err_ != CMErrors.NO_ERROR:
if err_ == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
return
idx = None
for i in xrange(0, self.spell_table_view.model().rowCount()):
idx = self.spell_table_view.model().index(i, 0)
if (model_.data(idx, QtCore.Qt.UserRole).spell_id ==
spell_itm.spell_id):
break
if idx.isValid():
sm_.setCurrentIndex(idx, (QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Rows))
def act_buy_spell(self):
dlg = dialogs.SpellAdvDialog(self.pc, self.dstore, 'freeform', self)
dlg.setWindowTitle(self.tr('Add New Spell'))
dlg.set_header_text(self.tr("<center><h2>Select the spell to learn</h2></center>"))
if dlg.exec_() == QtGui.QDialog.DialogCode.Accepted:
self.update_from_model()
def act_del_spell(self):
# get selected spell
sm_ = self.spell_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.spell_table_view.model()
spell_itm = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
if spell_itm.memo: return
self.remove_spell(spell_itm.spell_id)
def on_spell_selected(self, current, previous):
# get selected spell
model_ = self.spell_table_view.model()
spell_itm = model_.data(current, QtCore.Qt.UserRole)
# toggle remove
self.del_spell_bt.setEnabled(not spell_itm.memo)
def learn_next_school_tech(self):
adv = self.pc.get_current_rank_advancement()
if not adv:
print('learn_next_school_tech, no rank advancement found')
return False
adv.school_id = self.pc.current_school_id
school_dal = dal.query.get_school(self.dstore, adv.school_id)
school_techs = sorted( school_dal.techs,
cmp =lambda x,y: cmp(x.rank, y.rank) )
learned_tech = None
for t in school_techs:
if not self.has_tech_rank( t.rank, school_dal.id ):
adv.tech_rank = t.rank
adv.tech_id = t.id
adv.school_rank = t.rank
learned_tech = t
break
print("learn next school tech of {}: {}, rank {}".format(adv.school_id, adv.tech_id, adv.tech_rank))
if not learned_tech:
print('I did not find any technique to learn')
return
try:
adv.desc = "{s1} {r1}, {t1}".format(
s1 = school_dal.name,
t1 = learned_tech.name,
r1 = adv.school_rank)
except:
print('cannot update advancement description')
self.pc.set_dirty()
#self.pc.recalc_ranks()
self.update_from_model()
def check_rank_advancement(self):
if self.nicebar: return
if self.pc.can_advance_rank:
# HEY, NEW RANK DUDE!
# get 3 spells each rank
if self.pc.has_tag('shugenja'):
self.pc.pending_spells_count = self.pc.spells_per_rank
elif self.pc.has_tag('brotherhood'):
# hey free kihos!
self.pc.free_kiho_count = 2
lb = QtGui.QLabel(self.tr("You reached the next rank, you have an opportunity"
" to decide your destiny."), self)
bt = QtGui.QPushButton(self.tr("Advance rank"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
if self.pc.get_insight_rank() > 1:
bt.clicked.connect( self.show_advance_rank_dlg )
else:
bt.clicked.connect( self.sink4.show_first_school_dlg )
self.show_nicebar([lb, bt])
elif self.pc.get_current_rank_advancement() is not None:
rank_advancement_ended = (
not self.can_get_another_tech () and
not self.pc.can_get_other_spells() and
self.pc.free_kiho_count == 0 )
if rank_advancement_ended:
self.end_rank_advancement()
def check_school_tech_and_spells(self):
if self.nicebar: return
# Show nicebar if can get another school tech
if (self.can_get_another_tech() and
self.check_tech_school_requirements()):
self.learn_next_school_tech()
if self.pc.can_get_other_spells():
lb = QtGui.QLabel(self.tr("You now fit the requirements to learn other Spells"), self)
bt = QtGui.QPushButton(self.tr("Learn Spells"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.learn_next_school_spells )
self.show_nicebar([lb, bt])
def check_free_kihos(self):
if self.nicebar: return
# Show nicebar if can get free kihos
if self.pc.free_kiho_count:
lb = QtGui.QLabel(self.tr("You can learn {0} kihos for free").format(self.pc.free_kiho_count), self)
bt = QtGui.QPushButton(self.tr("Learn Kihos"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.learn_next_free_kiho )
self.show_nicebar([lb, bt])
def check_missing_requirements(self):
if self.nicebar: return
if not self.check_tech_school_requirements():
lb = QtGui.QLabel(self.tr("You need at least one rank in all school skills"
" to learn the next School Technique"), self)
bt = QtGui.QPushButton(self.tr("Buy Requirements"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.buy_school_requirements )
self.show_nicebar([lb, bt])
def check_rules(self):
# HACK. fix old saves ???
for t in self.pc.get_techs():
school, tech = dal.query.get_tech(self.dstore, t)
if school is not None and tech is not None:
self.pc.add_tech(tech.id, tech.id)
else:
print('cannot load character technique')
for adv in self.pc.advans:
if adv.type == 'perk':
perk = dal.query.get_merit(self.dstore, adv.perk) or dal.query.get_flaw(self.dstore, adv.perk)
adv.rule = perk.rule
def check_affinity_wc(self):
if self.nicebar: return
# print('check affinity wc: {0}'.format(self.pc.get_affinity()))
if ( 'any' in self.pc.get_affinity() or
'nonvoid' in self.pc.get_affinity() ):
lb = QtGui.QLabel(self.tr("You school grant you to choose an elemental affinity."), self)
bt = QtGui.QPushButton(self.tr("Choose Affinity"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.show_select_affinity )
self.show_nicebar([lb, bt])
elif ('any' in self.pc.get_deficiency() or
'nonvoid' in self.pc.get_deficiency() ):
lb = QtGui.QLabel(self.tr("You school grant you to choose an elemental deficiency."), self)
bt = QtGui.QPushButton(self.tr("Choose Deficiency"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.show_select_deficiency )
self.show_nicebar([lb, bt])
def learn_next_school_spells(self):
#self.pc.recalc_ranks()
#dlg = dialogs.SelWcSpells(self.pc, self.dstore, self)
dlg = dialogs.SpellAdvDialog(self.pc, self.dstore, 'bounded', self)
dlg.setWindowTitle(self.tr('Choose School Spells'))
dlg.set_header_text(self.tr("<center><h2>Your school has granted you \
the right to choose some spells.</h2> \
<h3><i>Choose with care.</i></h3></center>"))
if dlg.exec_() == QtGui.QDialog.DialogCode.Accepted:
self.pc.clear_pending_wc_spells()
self.pc.pending_spells_count = 0
self.update_from_model()
def learn_next_free_kiho(self):
dlg = dialogs.KihoDialog( self.pc, self.dstore, self )
if dlg.exec_() == QtGui.QDialog.DialogCode.Accepted:
self.update_from_model()
def show_advance_rank_dlg(self):
dlg = dialogs.NextRankDlg(self.pc, self.dstore, self)
if dlg.exec_() == QtGui.QDialog.DialogCode.Accepted:
self.start_rank_advancement(dlg.new_school)
self.update_from_model()
def show_buy_skill_dlg(self):
dlg = dialogs.BuyAdvDialog(self.pc, 'skill',
self.dstore, self)
dlg.exec_()
self.update_from_model()
def show_buy_emph_dlg(self):
# get selected skill
sm_ = self.skill_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.skill_table_view.model()
skill_id = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
dlg = dialogs.BuyAdvDialog(self.pc, 'emph',
self.dstore, self)
dlg.fix_skill_id(skill_id)
dlg.exec_()
self.update_from_model()
def show_select_affinity(self):
chooses = None
if 'nonvoid' in self.pc.get_affinity():
chooses = [ models.ring_name_from_id(x).capitalize() for x in xrange(0,4) ]
else:
chooses = [ models.ring_name_from_id(x).capitalize() for x in xrange(0,5) ]
affinity, is_ok = QtGui.QInputDialog.getItem(self,
"L5R: CM",
self.tr("Select your elemental affinity"),
chooses, 0, False)
# print affinity, is_ok
if is_ok:
self.set_pc_affinity(affinity)
def show_select_deficiency(self):
chooses = None
if 'nonvoid' in self.pc.get_deficiency():
chooses = [ models.ring_name_from_id(x).capitalize() for x in xrange(0,4) ]
else:
chooses = [ models.ring_name_from_id(x).capitalize() for x in xrange(0,5) ]
deficiency, is_ok = QtGui.QInputDialog.getItem(self,
"L5R: CM",
self.tr("Select your elemental deficiency"),
chooses, 0, False)
if is_ok:
self.set_pc_deficiency(deficiency)
def load_character_from(self, path):
pause_signals( [self.tx_pc_name, self.cb_pc_clan, self.cb_pc_family,
self.cb_pc_school] )
pause_signals( self.pers_info_widgets )
if not self.pc:
self.create_new_character()
from models.chmodel import CharacterLoader
cl = CharacterLoader()
if cl.load_from_file(path):
self.pc = cl.model()
self.save_path = path
if self.debug:
self.set_debug_observer()
print('successfully load character from {}, insight rank: {}'.format(self.save_path, self.pc.get_insight_rank()))
#TODO: checks for books / data extensions
self.load_families(self.pc.clan)
#if self.pc.unlock_schools:
# self.load_schools ()
#else:
# self.load_schools (self.pc.clan)
self.tx_pc_notes.set_content(self.pc.extra_notes)
self.pc.insight_calculation = self.ic_calc_method
#self.check_rules()
self.update_from_model()
else:
print('character load failure')
resume_signals( [self.tx_pc_name, self.cb_pc_clan, self.cb_pc_family,
self.cb_pc_school] )
resume_signals( self.pers_info_widgets )
def load_clans(self):
# clans
self.cb_pc_clan.clear()
self.cb_pc_clan.addItem( self.tr("No Clan"), None )
for c in self.dstore.clans:
self.cb_pc_clan.addItem( c.name, c.id )
def load_schools(self, clan_id = None):
print('load schools for clan_id {0}'.format(clan_id))
self.cb_pc_school.clear()
schools = []
# TODO: Sort
schools = dal.query.get_base_schools(self.dstore)
if clan_id is not None:
schools = [x for x in schools if x.clanid == clan_id]
self.cb_pc_school.addItem( self.tr("No School"), None )
for s in schools:
self.cb_pc_school.addItem( s.name, s.id )
def load_families(self, clan_id):
print('load families for clan_id {0}'.format(clan_id))
families = []
self.cb_pc_family.clear()
if clan_id:
families = [ x for x in self.dstore.families if x.clanid == clan_id ]
self.cb_pc_family.addItem( self.tr("No Family"), None )
for f in families:
self.cb_pc_family.addItem( f.name, f.id )
def set_clan(self, clan_id):
idx = self.cb_pc_clan.currentIndex()
c_uuid = self.cb_pc_clan.itemData(idx)
if c_uuid == clan_id:
return
for i in xrange(0, self.cb_pc_clan.count()):
if self.cb_pc_clan.itemData(i) == clan_id:
self.cb_pc_clan.setCurrentIndex(i)
return
def set_family(self, family_id):
idx = self.cb_pc_family.currentIndex()
f_uuid = self.cb_pc_family.itemData(idx)
if f_uuid == family_id:
return
for i in xrange(0, self.cb_pc_family.count()):
if self.cb_pc_family.itemData(i) == family_id:
self.cb_pc_family.setCurrentIndex(i)
return
def set_school(self, school_id):
idx = self.cb_pc_school.currentIndex()
s_uuid = self.cb_pc_school.itemData(idx)
if s_uuid == school_id:
return
print('set school to {0}, current school is {1}'.format(school_id, s_uuid))
found = False
self.cb_pc_school.blockSignals(True)
for i in xrange(0, self.cb_pc_school.count()):
if self.cb_pc_school.itemData(i) == school_id:
self.cb_pc_school.setCurrentIndex(i)
found = True
break
if not found:
school = dal.query.get_school(self.dstore, school_id)
if school:
self.cb_pc_school.addItem( school.name, school.id )
self.cb_pc_school.setCurrentIndex( self.cb_pc_school.count() - 1 )
self.cb_pc_school.blockSignals(False)
def set_void_points(self, value):
if self.void_points.value == value:
return
self.void_points.set_value(value)
def set_flag(self, flag, value):
rank, points = rules.split_decimal(value)
# set rank
self.pc_flags_rank[flag].setText( str(rank) )
# set points
self.pc_flags_points[flag].set_value( int(points*10) )
def set_honor (self, value): self.set_flag(0, value)
def set_glory (self, value): self.set_flag(1, value)
def set_status (self, value): self.set_flag(2, value)
def set_taint (self, value): self.set_flag(3, value)
def set_infamy (self, value): self.set_flag(4, value)
def update_from_model(self):
pause_signals( [self.tx_pc_name, self.cb_pc_clan, self.cb_pc_family,
self.cb_pc_school] )
pause_signals( self.pers_info_widgets )
self.tx_pc_name.setText( self.pc.name )
self.set_clan ( self.pc.clan )
self.set_family ( self.pc.family )
self.set_school ( self.pc.current_school_id )
for w in self.pers_info_widgets:
if hasattr(w, 'link'):
w.setText(self.pc.get_property(w.link))
resume_signals( [self.tx_pc_name, self.cb_pc_clan, self.cb_pc_family,
self.cb_pc_school] )
resume_signals( self.pers_info_widgets )
pc_xp = self.pc.get_px()
self.tx_pc_exp.setText( '{0} / {1}'.format( pc_xp, self.pc.exp_limit ) )
# rings
for i in xrange(0, 5):
self.rings[i][1].setText( str(self.pc.get_ring_rank(i)) )
# attributes
for i in xrange(0, 8):
self.attribs[i][1].setText( str(self.pc.get_mod_attrib_rank(i)) )
# pc rank
self.tx_pc_rank.setText( str(self.pc.get_insight_rank()) )
self.tx_pc_ins .setText( str(self.pc.get_insight()) )
# pc flags
pause_signals( self.pc_flags_points )
pause_signals( self.pc_flags_rank )
pause_signals( [self.void_points] )
self.set_honor ( self.pc.get_honor () )
self.set_glory ( self.pc.get_glory () )
self.set_status ( self.pc.get_status() )
self.set_infamy ( self.pc.infamy )
self.set_taint ( self.pc.taint )
self.set_void_points( self.pc.void_points )
resume_signals( [self.void_points] )
resume_signals( self.pc_flags_points )
resume_signals( self.pc_flags_rank )
# armor
self.tx_armor_nm .setText( str(self.pc.get_armor_name()) )
self.tx_base_tn .setText( str(self.pc.get_base_tn ()) )
self.tx_armor_tn .setText( str(self.pc.get_armor_tn ()) )
self.tx_armor_rd .setText( str(self.pc.get_full_rd ()) )
self.tx_cur_tn .setText( str(self.pc.get_cur_tn ()) )
# armor description
self.tx_armor_nm.setToolTip( str(self.pc.get_armor_desc()) )
self.display_health()
self.update_wound_penalties()
self.wnd_lb.setTitle(self.tr("Health / Wounds (x%d)") % self.pc.health_multiplier)
# initiative
self.tx_base_init.setText( rules.format_rtk_t(self.pc.get_base_initiative()) )
self.tx_mod_init.setText( rules.format_rtk_t(self.pc.get_init_modifiers()) )
self.tx_cur_init.setText( rules.format_rtk_t(self.pc.get_tot_initiative()) )
# affinity / deficiency
self.lb_affin.setText(', '.join( [x.capitalize() for x in self.pc.get_affinity ()] ) )
self.lb_defic.setText(', '.join( [x.capitalize() for x in self.pc.get_deficiency()] ) )
# money
pause_signals( [self.money_widget] )
self.money_widget.set_value( self.pc.get_property('money', (0,0,0)) )
resume_signals( [self.money_widget] )
self.hide_nicebar()
# Show nicebar if pending wildcard skills
wcs = self.pc.get_pending_wc_skills()
wce = self.pc.get_pending_wc_emphs ()
if len(wcs) > 0 or len(wce) > 0:
lb = QtGui.QLabel(self.tr("Your school gives you the choice of certain skills"), self)
bt = QtGui.QPushButton(self.tr("Choose Skills"), self)
bt.setSizePolicy( QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect( self.act_choose_skills )
self.show_nicebar([lb, bt])
self.check_affinity_wc ()
self.check_rank_advancement ()
self.check_missing_requirements ()
self.check_school_tech_and_spells()
self.check_free_kihos ()
# disable step 0-1-2 if any xp are spent
has_adv = len(self.pc.advans) > 0
#self.cb_pc_clan .setEnabled( not has_adv )
#self.cb_pc_school.setEnabled( not has_adv )
#self.cb_pc_family.setEnabled( not has_adv )
# FIXME, this is temporary
self.cb_pc_school.setEnabled( False )
# Update view-models
self.sk_view_model .update_from_model(self.pc)
self.ma_view_model .update_from_model(self.pc)
self.adv_view_model .update_from_model(self.pc)
self.th_view_model .update_from_model(self.pc)
self.merits_view_model.update_from_model(self.pc)
self.flaws_view_model .update_from_model(self.pc)
self.sp_view_model .update_from_model(self.pc)
self.melee_view_model .update_from_model(self.pc)
self.ranged_view_model.update_from_model(self.pc)
self.arrow_view_model .update_from_model(self.pc)
self.mods_view_model .update_from_model(self.pc)
self.ka_view_model .update_from_model(self.pc)
self.ki_view_model .update_from_model(self.pc)
self.equip_view_model .update_from_model(self.pc)
def update_wound_penalties(self):
penalties = [0, 3, 5, 10, 15, 20, 40]
wounds = [self.tr("Healthy"), self.tr("Nicked"), self.tr("Grazed"),
self.tr("Hurt"), self.tr("Injured"), self.tr("Crippled"),
self.tr("Down")]
if self.pc.has_rule('strength_of_earth'):
# penalties are reduced by 3
penalties = [ max(0,x-3) for x in penalties]
for i in xrange(0, len(penalties)):
self.wounds[i][0].setText(
unicode.format(u'{0} (+{1})', wounds[i], penalties[i]))
# TODO toku bushi school removes some penalties
def display_health (self):
settings = QtCore.QSettings()
method = settings.value('health_method', 'wounds')
if method == 'default':
self.display_health_default()
elif method == 'wounds':
self.display_total_wounds ()
else:
self.display_health_stacked()
def display_health_default(self):
# health
for i in xrange(0, 8):
h = self.pc.get_health_rank(i)
self.wounds[i][1].setText( str(h) )
self.wounds[i][2].setText( '' )
# wounds
pc_wounds = self.pc.wounds
hr = 0
while pc_wounds and hr < 8:
w = min(pc_wounds, self.pc.get_health_rank(hr))
self.wounds[hr][2].setText( str(w) )
pc_wounds -= w
hr += 1
def display_health_stacked(self):
# fill health level list
hl = [0]*8
for i in reversed( range(0, 8) ):
if i == 7: hl[i] = self.pc.get_health_rank(i)
else: hl[i] = self.pc.get_health_rank(i) + hl[i+1]
self.wounds[i][1].setText( str(hl[i]) )
wounds = self.pc.wounds
# fill the health left for each wound level
for i in range(0, 8):
h = self.pc.get_health_rank(i)
if h > wounds: self.wounds[i][2].setText( str(h-wounds) )
else: self.wounds[i][2].setText("")
wounds -= h
if wounds < 0: wounds = 0
def display_total_wounds(self):
# fill health level list
hl = [0]*8
for i in range(0, 8):
if i == 0: hl[i] = self.pc.get_health_rank(i)
else: hl[i] = self.pc.get_health_rank(i) + hl[i-1]
self.wounds[i][1].setText( str(hl[i]) )
wounds = self.pc.wounds
h = 0
# fill the health left for each wound level
for i in range(0, 8):
h += self.pc.get_health_rank(i)
wound_rank = min(h, wounds)
if wound_rank > 0:
self.wounds[i][2].setText( str(wound_rank) )
if wounds <= h: break
def advise_successfull_import(self):
settings = QtCore.QSettings()
if settings.value('advise_successfull_import', 'true') == 'false':
return
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(self.tr("Data pack imported succesfully."))
do_not_prompt_again = QtGui.QCheckBox(self.tr("Do not prompt again"), msgBox)
do_not_prompt_again.blockSignals(True) # PREVENT MSGBOX TO CLOSE ON CLICK
msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.addButton(do_not_prompt_again, QtGui.QMessageBox.ActionRole)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.setIcon(QtGui.QMessageBox.Information)
msgBox.exec_()
if do_not_prompt_again.checkState() == QtCore.Qt.Checked:
settings.setValue('advise_successfull_import', 'false')
def advise_error(self, message, dtl = None):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
def advise_warning(self, message, dtl = None):
msgBox = QtGui.QMessageBox(self)
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
def ask_warning(self, message, dtl = None):
msgBox = QtGui.QMessageBox(self)
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
return msgBox.exec_() == QtGui.QMessageBox.Ok
def ask_to_save(self):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(self.tr("The character has been modified."))
msgBox.setInformativeText(self.tr("Do you want to save your changes?"))
msgBox.addButton( QtGui.QMessageBox.Save )
msgBox.addButton( QtGui.QMessageBox.Discard )
msgBox.addButton( QtGui.QMessageBox.Cancel )
msgBox.setDefaultButton(QtGui.QMessageBox.Save)
return msgBox.exec_()
def ask_to_upgrade(self, target_version):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(self.tr("L5R: CM v%s is available for download.") % target_version)
msgBox.setInformativeText(self.tr("Do you want to open the download page?"))
msgBox.addButton( QtGui.QMessageBox.Yes )
msgBox.addButton( QtGui.QMessageBox.No )
msgBox.setDefaultButton(QtGui.QMessageBox.No)
return msgBox.exec_()
def not_enough_xp_advise(self, parent = None):
if parent == None: parent = self
QtGui.QMessageBox.warning(parent, self.tr("Not enough XP"),
self.tr("Cannot purchase.\nYou've reached the XP Limit."))
return
def closeEvent(self, ev):
# update interface last time, to set unsaved states
self.update_from_model()
# SAVE GEOMETRY
settings = QtCore.QSettings()
settings.setValue('geometry', self.saveGeometry())
if self.pc.insight_calculation == rules.insight_calculation_2:
settings.setValue('insight_calculation', 2)
elif self.pc.insight_calculation == rules.insight_calculation_3:
settings.setValue('insight_calculation', 3)
else:
settings.setValue('insight_calculation', 1)
#print('is model dirty? {0}'.format(self.pc.is_dirty()))
if self.pc.is_dirty():
resp = self.ask_to_save()
if resp == QtGui.QMessageBox.Save:
self.sink1.save_character()
elif resp == QtGui.QMessageBox.Cancel:
ev.ignore()
else:
super(L5RMain, self).closeEvent(ev)
else:
super(L5RMain, self).closeEvent(ev)
def select_save_path(self):
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
fileName = QtGui.QFileDialog.getSaveFileName(
self,
self.tr("Save Character"),
last_dir,
self.tr("L5R Character files (*.l5r)"))
if len(fileName) != 2 or fileName[0] == u'':
return ''
last_dir = os.path.dirname(fileName[0])
if last_dir != '':
#print 'save last_dir: %s' % last_dir
settings.setValue('last_open_dir', last_dir)
if fileName[0].endswith('.l5r'):
return fileName[0]
return fileName[0] + '.l5r'
def select_load_path(self):
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
fileName = QtGui.QFileDialog.getOpenFileName(
self,
self.tr("Load Character"),
last_dir,
self.tr("L5R Character files (*.l5r)"))
if len(fileName) != 2:
return ''
last_dir = os.path.dirname(fileName[0])
if last_dir != '':
#print 'save last_dir: %s' % last_dir
settings.setValue('last_open_dir', last_dir)
return fileName[0]
def select_export_file(self, file_ext = '.txt'):
char_name = self.pc.name
supported_ext = ['.pdf']
supported_filters = [self.tr("PDF Files(*.pdf)")]
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
fileName = QtGui.QFileDialog.getSaveFileName(
self,
self.tr("Export Character"),
os.path.join(last_dir,char_name),
";;".join(supported_filters))
if len(fileName) != 2:
return ''
last_dir = os.path.dirname(fileName[0])
if last_dir != '':
settings.setValue('last_open_dir', last_dir)
if fileName[0].endswith(file_ext):
return fileName[0]
return fileName[0] + file_ext
def select_import_data_pack(self):
supported_ext = ['.zip', '.l5rcmpack']
supported_filters = [self.tr("L5R:CM Data Pack(*.l5rcmpack *.zip)"),
self.tr("Zip Archive(*.zip)")]
settings = QtCore.QSettings()
last_data_dir = settings.value('last_open_data_dir', QtCore.QDir.homePath())
fileName = QtGui.QFileDialog.getOpenFileName(
self,
self.tr("Load data pack"),
last_data_dir,
";;".join(supported_filters))
if len(fileName) != 2:
return None
last_data_dir = os.path.dirname(fileName[0])
if last_data_dir != '':
#print 'save last_dir: %s' % last_dir
settings.setValue('last_open_data_dir', last_data_dir)
return fileName[0]
def check_updates(self):
update_info = autoupdate.get_last_version()
need_update = False
if update_info is None:
return
version_str = ''
# check extended module version
if 'versionex' in update_info:
need_update = autoupdate.need_update(APP_VERSION, update_info['versionex'])
version_str = update_info['versionex']
else:
need_update = autoupdate.need_update(APP_VERSION, update_info['version'])
version_str = update_info['version']
if need_update and self.ask_to_upgrade(version_str) == QtGui.QMessageBox.Yes:
import osutil
osutil.portable_open(PROJECT_DOWNLOADS_LINK)
def on_change_insight_calculation(self):
method = self.sender().checkedAction().property('method')
self.pc.insight_calculation = method
self.update_from_model()
def on_change_health_visualization(self):
method = self.sender().checkedAction().property('method')
settings = QtCore.QSettings()
settings.setValue('health_method', method)
self.update_from_model()
def create_new_character(self):
self.sink1.new_character()
def get_health_rank(self, idx):
return self.wounds[idx][1].text()
### MAIN ###
def dump_slots(obj, out_file):
with open(out_file, 'wt') as fobj:
mobj = obj.metaObject()
for i in xrange( mobj.methodOffset(), mobj.methodCount() ):
if mobj.method(i).methodType() == QtCore.QMetaMethod.Slot:
fobj.write(mobj.method(i).signature() + ' ' + mobj.method(i).tag() + '\n')
OPEN_CMD_SWITCH = '--open'
IMPORT_CMD_SWITCH = '--import'
DATA_CHECK_SWITCH = '--datacheck'
DATA_REPT_SWITCH = '--datareport'
DEBUG_SWITCH = '--debug'
MIME_L5R_CHAR = "applications/x-l5r-character"
MIME_L5R_PACK = "applications/x-l5r-pack"
def main():
#try:
app = QtGui.QApplication(sys.argv)
# setup mimetypes
mimetypes.add_type(MIME_L5R_CHAR, ".l5r")
mimetypes.add_type(MIME_L5R_PACK, ".l5rcmpack")
if DATA_CHECK_SWITCH in sys.argv:
import dal_check
dc = dal_check.DataCheck()
dc.check()
return
if DATA_REPT_SWITCH in sys.argv:
import dal.report
dr = dal.report.ReportBuilder('./data_packs', './data_report')
dr.build()
return
QtCore.QCoreApplication.setApplicationName(APP_NAME)
QtCore.QCoreApplication.setApplicationVersion(APP_VERSION)
QtCore.QCoreApplication.setOrganizationName(APP_ORG)
app.setWindowIcon( QtGui.QIcon( get_app_icon_path() ) )
# Setup translation
settings = QtCore.QSettings()
use_machine_locale = settings.value('use_machine_locale', 1)
app_translator = QtCore.QTranslator()
qt_translator = QtCore.QTranslator()
print('use_machine_locale', use_machine_locale, QtCore.QLocale.system().name())
if use_machine_locale == 1:
use_locale = QtCore.QLocale.system().name()
else:
use_locale = settings.value('use_locale')
print('current locale is {0}'.format(use_locale))
qt_loc = 'qt_{0}'.format(use_locale[:2])
print(qt_loc)
app_loc = get_app_file('i18n/{0}'.format(use_locale))
print(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
qt_translator .load(qt_loc, QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
app.installTranslator(qt_translator )
app_translator.load(app_loc)
app.installTranslator(app_translator)
# start main form
print("create main form")
l5rcm = L5RMain(use_locale)
l5rcm.setWindowTitle(APP_DESC + ' v' + APP_VERSION)
l5rcm.show()
l5rcm.init()
if len(sys.argv) > 1 and DEBUG_SWITCH in sys.argv:
l5rcm.debug = True
# initialize new character
l5rcm.create_new_character()
if len(sys.argv) > 1:
if OPEN_CMD_SWITCH in sys.argv:
of = sys.argv.index(OPEN_CMD_SWITCH)
l5rcm.load_character_from(sys.argv[of+1])
elif IMPORT_CMD_SWITCH in sys.argv:
imf = sys.argv.index(IMPORT_CMD_SWITCH)
l5rcm.import_data_pack(sys.argv[imf+1])
else:
# check mimetype
mime = mimetypes.guess_type(sys.argv[1])
if mime[0] == MIME_L5R_CHAR:
l5rcm.load_character_from(sys.argv[1])
elif mime[0] == MIME_L5R_PACK:
l5rcm.import_data_pack(sys.argv[1])
# alert if not datapacks are installed
l5rcm.check_datapacks()
# check for updates
#if sys.platform != 'linux2':
l5rcm.check_updates()
sys.exit(app.exec_())
#except Exception as e:
# print("HOLYMOLY!", e)
#finally:
# print("KTHXBYE")
if __name__ == '__main__':
main()
|
KukojinOyama/l5rcm
|
l5rcm.py
|
Python
|
gpl-3.0
| 106,819
|
[
"VisIt"
] |
32c0a9db7671aae9325fea7280e1b3dd25ff371204652ba0ec2459c70b7efec0
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import (Sortable, elapsed_time_to_string, html_escape,
is_string, normalize, py2to3, PY3)
from .tags import TagPattern
if PY3:
unicode = str
@py2to3
class Stat(Sortable):
"""Generic statistic object used for storing all the statistic values."""
def __init__(self, name):
#: Human readable identifier of the object these statistics
#: belong to. Either `All Tests` or `Critical Tests` for
#: :class:`~robot.model.totalstatistics.TotalStatistics`,
#: long name of the suite for
#: :class:`~robot.model.suitestatistics.SuiteStatistics`
#: or name of the tag for
#: :class:`~robot.model.tagstatistics.TagStatistics`
self.name = name
#: Number of passed tests.
self.passed = 0
#: Number of failed tests.
self.failed = 0
#: Number of milliseconds it took to execute.
self.elapsed = 0
self._norm_name = normalize(name, ignore='_')
def get_attributes(self, include_label=False, include_elapsed=False,
exclude_empty=True, values_as_strings=False,
html_escape=False):
attrs = {'pass': self.passed, 'fail': self.failed}
attrs.update(self._get_custom_attrs())
if include_label:
attrs['label'] = self.name
if include_elapsed:
attrs['elapsed'] = elapsed_time_to_string(self.elapsed,
include_millis=False)
if exclude_empty:
attrs = dict((k, v) for k, v in attrs.items() if v not in ('', None))
if values_as_strings:
attrs = dict((k, unicode(v if v is not None else ''))
for k, v in attrs.items())
if html_escape:
attrs = dict((k, self._html_escape(v)) for k, v in attrs.items())
return attrs
def _get_custom_attrs(self):
return {}
def _html_escape(self, item):
return html_escape(item) if is_string(item) else item
@property
def total(self):
return self.passed + self.failed
def add_test(self, test):
self._update_stats(test)
self._update_elapsed(test)
def _update_stats(self, test):
if test.passed:
self.passed += 1
else:
self.failed += 1
def _update_elapsed(self, test):
self.elapsed += test.elapsedtime
@property
def _sort_key(self):
return self._norm_name
def __nonzero__(self):
return not self.failed
def visit(self, visitor):
visitor.visit_stat(self)
class TotalStat(Stat):
"""Stores statistic values for a test run."""
type = 'total'
class SuiteStat(Stat):
"""Stores statistics values for a single suite."""
type = 'suite'
def __init__(self, suite):
Stat.__init__(self, suite.longname)
#: Identifier of the suite, e.g. `s1-s2`.
self.id = suite.id
#: Number of milliseconds it took to execute this suite,
#: including sub-suites.
self.elapsed = suite.elapsedtime
self._name = suite.name
def _get_custom_attrs(self):
return {'id': self.id, 'name': self._name}
def _update_elapsed(self, test):
pass
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
class TagStat(Stat):
"""Stores statistic values for a single tag."""
type = 'tag'
def __init__(self, name, doc='', links=None, critical=False,
non_critical=False, combined=None):
Stat.__init__(self, name)
#: Documentation of tag as a string.
self.doc = doc
#: List of tuples in which the first value is the link URL and
#: the second is the link title. An empty list by default.
self.links = links or []
#: ``True`` if tag is considered critical, ``False`` otherwise.
self.critical = critical
#: ``True`` if tag is considered non-critical, ``False`` otherwise.
self.non_critical = non_critical
#: Pattern as a string if the tag is combined, ``None`` otherwise.
self.combined = combined
@property
def info(self):
"""Returns additional information of the tag statistics
are about. Either `critical`, `non-critical`, `combined` or an
empty string.
"""
if self.critical:
return 'critical'
if self.non_critical:
return 'non-critical'
if self.combined:
return 'combined'
return ''
def _get_custom_attrs(self):
return {'doc': self.doc, 'links': self._get_links_as_string(),
'info': self.info, 'combined': self.combined}
def _get_links_as_string(self):
return ':::'.join('%s:%s' % (title, url) for url, title in self.links)
@property
def _sort_key(self):
return (not self.critical,
not self.non_critical,
not self.combined,
self._norm_name)
class CombinedTagStat(TagStat):
def __init__(self, pattern, name=None, doc='', links=None):
TagStat.__init__(self, name or pattern, doc, links, combined=pattern)
self.pattern = TagPattern(pattern)
def match(self, tags):
return self.pattern.match(tags)
class CriticalTagStat(TagStat):
def __init__(self, tag_pattern, name=None, critical=True, doc='',
links=None):
TagStat.__init__(self, name or unicode(tag_pattern), doc, links,
critical=critical, non_critical=not critical)
self.pattern = tag_pattern
def match(self, tags):
return self.pattern.match(tags)
|
henriqueguchi/SikuliServer
|
new/Lib/robot/model/stats.py
|
Python
|
mit
| 6,359
|
[
"VisIt"
] |
989e136326445eca54405dcef76a55a68ebb536163eb76509b613d724961197d
|
from fontbakery.checkrunner import Section
from fontbakery.fonts_profile import profile_factory
def check_filter(item_type, item_id, item):
# Filter out external tool checks for testing purposes.
if item_type == "check" and item_id in (
"com.google.fonts/check/ftxvalidator",
"com.google.fonts/check/ots",
"com.google.fonts/check/fontvalidator",
):
return False
return True
def test_external_profile():
"""Test the creation of external profiles."""
profile = profile_factory(default_section=Section("Dalton Maag OpenType"))
profile.auto_register(
globals(),
profile_imports=["fontbakery.profiles.opentype"],
filter_func=check_filter)
# Probe some tests
expected_tests = ["com.google.fonts/check/family/panose_proportion",
"com.google.fonts/check/varfont/regular_opsz_coord"]
profile.test_expected_checks(expected_tests)
# Probe tests we don't want
assert "com.google.fonts/check/ftxvalidator" not in profile._check_registry.keys()
assert len(profile.sections) > 1
def test_profile_imports():
"""
When a names array in profile_imports contained sub module names, the import
would fail.
https://github.com/googlefonts/fontbakery/issues/1886
"""
def _test(profile_imports, expected_tests,expected_conditions=tuple()):
profile = profile_factory(default_section=Section("Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_expected_checks(expected_tests)
if expected_conditions:
registered_conditions = profile.conditions.keys()
for name in expected_conditions:
assert name in registered_conditions, \
f'"{name}" is expected to be registered as a condition.'
# this is in docs/writing profiles
profile_imports = [
['fontbakery.profiles', ['cmap', 'head']]
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# the example from issue #1886
profile_imports = (
(
"fontbakery.profiles",
(
"cmap",
"head",
"os2",
"post",
"name",
"hhea",
"dsig",
"hmtx",
"gpos",
"kern",
"glyf",
"fvar",
"shared_conditions",
),
),
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# make sure the suggested workaround still works:
# https://github.com/googlefonts/fontbakery/issues/1886#issuecomment-392535435
profile_imports = (
"fontbakery.profiles.cmap",
"fontbakery.profiles.head",
"fontbakery.profiles.os2",
"fontbakery.profiles.post",
"fontbakery.profiles.name",
"fontbakery.profiles.hhea",
"fontbakery.profiles.dsig",
"fontbakery.profiles.hmtx",
"fontbakery.profiles.gpos",
"fontbakery.profiles.kern",
"fontbakery.profiles.glyf",
"fontbakery.profiles.fvar",
"fontbakery.profiles.shared_conditions"
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# cherry pick attributes from a module (instead of getting submodules)
# also from this is in docs/writing profiles
# Import just certain attributes from modules.
# Also, using absolute import module names:
profile_imports = [
# like we do in fontbakery.profiles.fvar
('fontbakery.profiles.shared_conditions', ('is_variable_font',
'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord',
'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord')),
# just as an example: import a check and a dependency/condition of
# that check from the googlefonts specific profile:
('fontbakery.profiles.googlefonts', (
# "License URL matches License text on name table?"
'com_google_fonts_check_name_license_url',
# This condition is a dependency of the check above:
'familyname',
))
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/name/license_url" # in googlefonts
]
expected_conditions = ('is_variable_font', 'regular_wght_coord',
'regular_wdth_coord', 'regular_slnt_coord', 'regular_ital_coord',
'regular_opsz_coord', 'bold_wght_coord', 'familyname')
_test(profile_imports, expected_tests, expected_conditions)
def test_opentype_checks_load():
profile_imports = ("fontbakery.profiles.opentype", )
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
def test_googlefonts_checks_load():
profile_imports = ("fontbakery.profiles.googlefonts", )
profile = profile_factory(default_section=Section("Google Fonts Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
def test_in_and_exclude_checks():
profile_imports = ("fontbakery.profiles.opentype", )
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
explicit_checks = ["06", "07"] # "06" or "07" in check ID
exclude_checks = ["065", "079"] # "065" or "079" in check ID
iterargs = {"font": 1}
check_names = {
c[1].id for c in \
profile.execution_order(iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in profile.sections:
for check in section.checks:
if any(i in check.id
for i in explicit_checks) and \
not any(x in check.id
for x in exclude_checks):
check_names_expected.add(check.id)
assert check_names == check_names_expected
def test_in_and_exclude_checks_default():
profile_imports = ("fontbakery.profiles.opentype",)
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
explicit_checks = None # "All checks aboard"
exclude_checks = None # "No checks left behind"
iterargs = {"font": 1}
check_names = {
c[1].id for c in \
profile.execution_order(iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in profile.sections:
for check in section.checks:
check_names_expected.add(check.id)
assert check_names == check_names_expected
|
graphicore/fontbakery
|
tests/profiles/external_profile_test.py
|
Python
|
apache-2.0
| 7,478
|
[
"Dalton"
] |
ffe0e0b27b5a7577bf146d5c51754a4a865849b1fddf2d144ea6d6e771b0e96c
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.domain.sale import Delivery
from stoqlib.gui.search.deliverysearch import DeliverySearch
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.lib.dateutils import localdate
class TestDeliverySearch(GUITest):
def _show_search(self):
search = DeliverySearch(self.store)
search.search.refresh()
search.results.select(search.results[0])
return search
def _create_domain(self):
address = self.create_address()
service_item = self.create_sale_item()
service_item.sale.identifier = 10
transporter = self.create_transporter(name=u'Hall')
delivery = Delivery(transporter=transporter,
address=address,
service_item=service_item,
open_date=localdate(2012, 1, 1).date(),
store=self.store)
delivery.tracking_code = u'45'
service_item = self.create_sale_item()
service_item.sale.identifier = 20
transporter = self.create_transporter(name=u'Torvalds')
delivery = Delivery(transporter=transporter,
address=address,
service_item=service_item,
open_date=localdate(2012, 2, 2).date(),
deliver_date=localdate(2012, 3, 3).date(),
receive_date=localdate(2012, 4, 4).date(),
store=self.store)
delivery.tracking_code = u'78'
delivery.status = Delivery.STATUS_RECEIVED
def test_search(self):
self._create_domain()
search = self._show_search()
self.check_search(search, 'delivery-no-filter')
search.set_searchbar_search_string('45')
search.search.refresh()
self.check_search(search, 'delivery-string-filter')
search.set_searchbar_search_string('')
search.status_filter.set_state(Delivery.STATUS_RECEIVED)
search.search.refresh()
self.check_search(search, 'delivery-status-filter')
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_deliverysearch.py
|
Python
|
gpl-2.0
| 2,977
|
[
"VisIt"
] |
21c51b7a7b1205ee2e3c680717eefada28ad2a83cb6b99c163f5cae132ec822a
|
"""Convolutional/Variational autoencoder, including demonstration of
training such a network on MNIST, CelebNet and the film, "Sita Sings The Blues"
using an image pipeline.
Copyright Parag K. Mital, January 2016
"""
import tensorflow as tf
import numpy as np
import os
from libs.dataset_utils import create_input_pipeline
from libs.datasets import CELEB, MNIST
from libs.batch_norm import batch_norm
from libs import utils
def VAE(input_shape=[None, 784],
n_filters=[64, 64, 64],
filter_sizes=[4, 4, 4],
n_hidden=32,
n_code=2,
activation=tf.nn.tanh,
dropout=False,
denoising=False,
convolutional=False,
variational=False):
"""(Variational) (Convolutional) (Denoising) Autoencoder.
Uses tied weights.
Parameters
----------
input_shape : list, optional
Shape of the input to the network. e.g. for MNIST: [None, 784].
n_filters : list, optional
Number of filters for each layer.
If convolutional=True, this refers to the total number of output
filters to create for each layer, with each layer's number of output
filters as a list.
If convolutional=False, then this refers to the total number of neurons
for each layer in a fully connected network.
filter_sizes : list, optional
Only applied when convolutional=True. This refers to the ksize (height
and width) of each convolutional layer.
n_hidden : int, optional
Only applied when variational=True. This refers to the first fully
connected layer prior to the variational embedding, directly after
the encoding. After the variational embedding, another fully connected
layer is created with the same size prior to decoding. Set to 0 to
not use an additional hidden layer.
n_code : int, optional
Only applied when variational=True. This refers to the number of
latent Gaussians to sample for creating the inner most encoding.
activation : function, optional
Activation function to apply to each layer, e.g. tf.nn.relu
dropout : bool, optional
Whether or not to apply dropout. If using dropout, you must feed a
value for 'keep_prob', as returned in the dictionary. 1.0 means no
dropout is used. 0.0 means every connection is dropped. Sensible
values are between 0.5-0.8.
denoising : bool, optional
Whether or not to apply denoising. If using denoising, you must feed a
value for 'corrupt_prob', as returned in the dictionary. 1.0 means no
corruption is used. 0.0 means every feature is corrupted. Sensible
values are between 0.5-0.8.
convolutional : bool, optional
Whether or not to use a convolutional network or else a fully connected
network will be created. This effects the n_filters parameter's
meaning.
variational : bool, optional
Whether or not to create a variational embedding layer. This will
create a fully connected layer after the encoding, if `n_hidden` is
greater than 0, then will create a multivariate gaussian sampling
layer, then another fully connected layer. The size of the fully
connected layers are determined by `n_hidden`, and the size of the
sampling layer is determined by `n_code`.
Returns
-------
model : dict
{
'cost': Tensor to optimize.
'Ws': All weights of the encoder.
'x': Input Placeholder
'z': Inner most encoding Tensor (latent features)
'y': Reconstruction of the Decoder
'keep_prob': Amount to keep when using Dropout
'corrupt_prob': Amount to corrupt when using Denoising
'train': Set to True when training/Applies to Batch Normalization.
}
"""
# network input / placeholders for train (bn) and dropout
x = tf.placeholder(tf.float32, input_shape, 'x')
phase_train = tf.placeholder(tf.bool, name='phase_train')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
corrupt_prob = tf.placeholder(tf.float32, [1])
# apply noise if denoising
x_ = (utils.corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)) if denoising else x
# 2d -> 4d if convolution
x_tensor = utils.to_tensor(x_) if convolutional else x_
current_input = x_tensor
Ws = []
shapes = []
# Build the encoder
for layer_i, n_output in enumerate(n_filters):
with tf.variable_scope('encoder/{}'.format(layer_i)):
shapes.append(current_input.get_shape().as_list())
if convolutional:
h, W = utils.conv2d(x=current_input,
n_output=n_output,
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
Ws.append(W)
current_input = h
shapes.append(current_input.get_shape().as_list())
with tf.variable_scope('variational'):
if variational:
dims = current_input.get_shape().as_list()
flattened = utils.flatten(current_input)
if n_hidden:
h = utils.linear(flattened, n_hidden, name='W_fc')[0]
h = activation(batch_norm(h, phase_train, 'fc/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = flattened
z_mu = utils.linear(h, n_code, name='mu')[0]
z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]
# Sample from noise distribution p(eps) ~ N(0, 1)
epsilon = tf.random_normal(
tf.stack([tf.shape(x)[0], n_code]))
# Sample from posterior
z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))
if n_hidden:
h = utils.linear(z, n_hidden, name='fc_t')[0]
h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = z
size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
h = utils.linear(h, size, name='fc_t2')[0]
current_input = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
if dropout:
current_input = tf.nn.dropout(current_input, keep_prob)
if convolutional:
current_input = tf.reshape(
current_input, tf.stack([
tf.shape(current_input)[0],
dims[1],
dims[2],
dims[3]]))
else:
z = current_input
shapes.reverse()
n_filters.reverse()
Ws.reverse()
n_filters += [input_shape[-1]]
# %%
# Decoding layers
for layer_i, n_output in enumerate(n_filters[1:]):
with tf.variable_scope('decoder/{}'.format(layer_i)):
shape = shapes[layer_i + 1]
if convolutional:
h, W = utils.deconv2d(x=current_input,
n_output_h=shape[1],
n_output_w=shape[2],
n_output_ch=shape[3],
n_input_ch=shapes[layer_i][3],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
current_input = h
y = current_input
x_flat = utils.flatten(x)
y_flat = utils.flatten(y)
# l2 loss
loss_x = tf.reduce_sum(tf.squared_difference(x_flat, y_flat), 1)
if variational:
# variational lower bound, kl-divergence
loss_z = -0.5 * tf.reduce_sum(
1.0 + 2.0 * z_log_sigma -
tf.square(z_mu) - tf.exp(2.0 * z_log_sigma), 1)
# add l2 loss
cost = tf.reduce_mean(loss_x + loss_z)
else:
# just optimize l2 loss
cost = tf.reduce_mean(loss_x)
return {'cost': cost, 'Ws': Ws,
'x': x, 'z': z, 'y': y,
'keep_prob': keep_prob,
'corrupt_prob': corrupt_prob,
'train': phase_train}
def train_vae(files,
input_shape,
learning_rate=0.0001,
batch_size=100,
n_epochs=50,
n_examples=10,
crop_shape=[64, 64, 3],
crop_factor=0.8,
n_filters=[100, 100, 100, 100],
n_hidden=256,
n_code=50,
convolutional=True,
variational=True,
filter_sizes=[3, 3, 3, 3],
dropout=True,
keep_prob=0.8,
activation=tf.nn.relu,
img_step=100,
save_step=100,
ckpt_name="vae.ckpt"):
"""General purpose training of a (Variational) (Convolutional) Autoencoder.
Supply a list of file paths to images, and this will do everything else.
Parameters
----------
files : list of strings
List of paths to images.
input_shape : list
Must define what the input image's shape is.
learning_rate : float, optional
Learning rate.
batch_size : int, optional
Batch size.
n_epochs : int, optional
Number of epochs.
n_examples : int, optional
Number of example to use while demonstrating the current training
iteration's reconstruction. Creates a square montage, so make
sure int(sqrt(n_examples))**2 = n_examples, e.g. 16, 25, 36, ... 100.
crop_shape : list, optional
Size to centrally crop the image to.
crop_factor : float, optional
Resize factor to apply before cropping.
n_filters : list, optional
Same as VAE's n_filters.
n_hidden : int, optional
Same as VAE's n_hidden.
n_code : int, optional
Same as VAE's n_code.
convolutional : bool, optional
Use convolution or not.
variational : bool, optional
Use variational layer or not.
filter_sizes : list, optional
Same as VAE's filter_sizes.
dropout : bool, optional
Use dropout or not
keep_prob : float, optional
Percent of keep for dropout.
activation : function, optional
Which activation function to use.
img_step : int, optional
How often to save training images showing the manifold and
reconstruction.
save_step : int, optional
How often to save checkpoints.
ckpt_name : str, optional
Checkpoints will be named as this, e.g. 'model.ckpt'
"""
batch = create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
ae = VAE(input_shape=[None] + crop_shape,
convolutional=convolutional,
variational=variational,
n_filters=n_filters,
n_hidden=n_hidden,
n_code=n_code,
dropout=dropout,
filter_sizes=filter_sizes,
activation=activation)
# Create a manifold of our inner most layer to show
# example reconstructions. This is one way to see
# what the "embedding" or "latent space" of the encoder
# is capable of encoding, though note that this is just
# a random hyperplane within the latent space, and does not
# encompass all possible embeddings.
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# This will handle our threaded image pipeline
coord = tf.train.Coordinator()
# Ensure no more changes to graph
tf.get_default_graph().finalize()
# Start up the queues for handling the image pipeline
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if os.path.exists(ckpt_name + '.index') or os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
# Fit all training data
t_i = 0
batch_i = 0
epoch_i = 0
cost = 0
n_files = len(files)
test_xs = sess.run(batch) / 255.0
utils.montage(test_xs, 'test_xs.png')
try:
while not coord.should_stop() and epoch_i < n_epochs:
batch_i += 1
batch_xs = sess.run(batch) / 255.0
train_cost = sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True,
ae['keep_prob']: keep_prob})[0]
print(batch_i, train_cost)
cost += train_cost
if batch_i % n_files == 0:
print('epoch:', epoch_i)
print('average cost:', cost / batch_i)
cost = 0
batch_i = 0
epoch_i += 1
if batch_i % img_step == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
utils.montage(recon.reshape([-1] + crop_shape),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
print('reconstruction (min, max, mean):',
recon.min(), recon.max(), recon.mean())
utils.montage(recon.reshape([-1] + crop_shape),
'reconstruction_%08d.png' % t_i)
t_i += 1
if batch_i % save_step == 0:
# Save the variables to disk.
saver.save(sess, ckpt_name,
global_step=batch_i,
write_meta_graph=False)
except tf.errors.OutOfRangeError:
print('Done.')
finally:
# One of the threads has issued an exception. So let's tell all the
# threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
# %%
def test_mnist():
"""Train an autoencoder on MNIST.
This function will train an autoencoder on MNIST and also
save many image files during the training process, demonstrating
the latent space of the inner most dimension of the encoder,
as well as reconstructions of the decoder.
"""
# load MNIST
n_code = 2
mnist = MNIST(split=[0.8, 0.1, 0.1])
ae = VAE(input_shape=[None, 784], n_filters=[512, 256],
n_hidden=64, n_code=n_code, activation=tf.nn.sigmoid,
convolutional=False, variational=True)
n_examples = 100
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
learning_rate = 0.02
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Fit all training data
t_i = 0
batch_i = 0
batch_size = 200
n_epochs = 10
test_xs = mnist.test.images[:n_examples]
utils.montage(test_xs.reshape((-1, 28, 28)), 'test_xs.png')
for epoch_i in range(n_epochs):
train_i = 0
train_cost = 0
for batch_xs, _ in mnist.train.next_batch(batch_size):
train_cost += sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True, ae['keep_prob']: 1.0})[0]
train_i += 1
if batch_i % 10 == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
m = utils.montage(recon.reshape((-1, 28, 28)),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
m = utils.montage(recon.reshape(
(-1, 28, 28)), 'reconstruction_%08d.png' % t_i)
t_i += 1
batch_i += 1
valid_i = 0
valid_cost = 0
for batch_xs, _ in mnist.valid.next_batch(batch_size):
valid_cost += sess.run([ae['cost']], feed_dict={
ae['x']: batch_xs, ae['train']: False, ae['keep_prob']: 1.0})[0]
valid_i += 1
print('train:', train_cost / train_i, 'valid:', valid_cost / valid_i)
def test_celeb():
"""Train an autoencoder on Celeb Net.
"""
files = CELEB()
train_vae(
files=files,
input_shape=[218, 178, 3],
batch_size=100,
n_epochs=50,
crop_shape=[64, 64, 3],
crop_factor=0.8,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./celeb.ckpt')
def test_sita():
"""Train an autoencoder on Sita Sings The Blues.
"""
if not os.path.exists('sita'):
os.system('wget http://ossguy.com/sita/Sita_Sings_the_Blues_640x360_XviD.avi')
os.mkdir('sita')
os.system('ffmpeg -i Sita_Sings_the_Blues_640x360_XviD.avi -r 60 -f' +
' image2 -s 160x90 sita/sita-%08d.jpg')
files = [os.path.join('sita', f) for f in os.listdir('sita')]
train_vae(
files=files,
input_shape=[90, 160, 3],
batch_size=100,
n_epochs=50,
crop_shape=[90, 160, 3],
crop_factor=1.0,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./sita.ckpt')
if __name__ == '__main__':
test_celeb()
|
alvaroing12/CADL
|
session-5/libs/vae.py
|
Python
|
apache-2.0
| 19,303
|
[
"Gaussian"
] |
5783ea54ceeec7c8a07e8c53e698abb8fd5626745801153631bd295a1b5592fe
|
#!/usr/bin/env python
# add paths
import os, sys
for p in os.environ['PATH'].split(':'): sys.path.append(p)
# import modules
from re import findall
from os.path import split
from itertools import product
from optparse import OptionParser
from netCDF4 import Dataset as nc
from numpy.ma import masked_array
from metrics import MetricsWrapper
from filespecs import MultimetricsFile
from numpy import where, ones, zeros, logical_and, arange
import ruamel.yaml
parser = OptionParser()
parser.add_option("-i", "--infile", dest = "infile", default = "", type = "string",
help = "Input bias-corrected file", metavar = "FILE")
parser.add_option("-r", "--reffile", dest = "reffile", default = "", type = "string",
help = "Reference data netcdf file", metavar = "FILE")
parser.add_option("-a", "--agglvl", dest = "agglvl", default = "gadm0", type = "string",
help = "Aggregation level (e.g., gadm0, fpu, kg)")
parser.add_option("-m", "--metric", dest = "metric", default = "rmse", type = "string",
help = "Metric name")
parser.add_option("-u", "--munits", dest = "munits", default = "t ha-1 yr-1", type = "string",
help = "Metric units")
parser.add_option("-l", "--mlongname", dest = "mlongname", default = "root mean squared error", type = "string",
help = "Metric long name")
parser.add_option("-o", "--outfile", dest = "outfile", default = "", type = "string",
help = "Output file")
parser.add_option("-p", "--params", dest = "params", default = "", type = "string",
help = "YAML param file")
options, args = parser.parse_args()
infile = options.infile
reffile = options.reffile
agglvl = options.agglvl
metric = options.metric
munits = options.munits
mlongname = options.mlongname
outfile = options.outfile
params = ruamel.yaml.load(open(options.params, 'r'), ruamel.yaml.RoundTripLoader)
tranges = params['time_ranges']
ntimes = len(tranges)
crop = split(infile)[1].split('_')[3]
with nc(reffile) as fref:
aref = fref.variables[agglvl][:]
aggunits = fref.variables[agglvl].units
agglongname = fref.variables[agglvl].long_name
dtref = fref.variables['dt'].long_name.split(', ')
mpref = fref.variables['mp'].long_name.split(', ')
tref = fref.variables['time'][:]
tref_units = fref.variables['time'].units
var = 'yield_' + crop
if var in fref.variables:
yield_ref = fref.variables[var][:]
else:
print 'Crop %s unavailable in reference file %s. Exiting . . .' % (crop, reffile)
sys.exit()
with nc(infile) as fin:
ain = fin.variables[agglvl][:]
scen = fin.variables['scen'].long_name.split(', ')
dt = fin.variables['dt'].long_name.split(', ')
mp = fin.variables['mp'].long_name.split(', ')
cr = fin.variables['cr'].long_name.split(', ')
yield_in = fin.variables['yield_detrend'][:]
tin = fin.variables['time'][:]
tin_units = fin.variables['time'].units
tref += int(findall(r'\d+', tref_units)[0]) # get reference time
tin += int(findall(r'\d+', tin_units)[0]) # get simulation time
naggs, nscen, ndt, nmp, ncr = len(ain), len(scen), len(dt), len(mp), len(cr)
sh = (naggs, nscen, ndt, nmp, ncr, ntimes)
times = [tin]
dtidx, mpidx = dtref.index('none'), mpref.index('true')
mobj = MetricsWrapper(metric)
mmat = masked_array(zeros(sh), mask = ones(sh))
for t in range(ntimes):
tmin = max([tin[0], tref[0], times[t][0]])
tmax = min([tin[-1], tref[-1], times[t][-1]])
yield_refc = yield_ref[:, logical_and(tref >= tmin, tref <= tmax)]
yield_inc = yield_in[:, logical_and(tin >= tmin, tin <= tmax)]
for d, m, c in product(range(ndt), range(nmp), range(ncr)):
for a, s in product(range(naggs), range(nscen)):
refidx = dtref.index(dt[d])
aidx = where(aref == ain[a])[0][0]
dref = yield_refc[aidx, :, refidx, m]
drefnone = yield_refc[aidx, :, dtidx, mpidx]
dsim = yield_inc[a, :, s, d, m, c]
mmat[a, s, d, m, c, t] = mobj.eval(dsim, dref, drefnone, arange(tmin, tmax + 1))
fout = MultimetricsFile(outfile, ain, agglvl, aggunits, agglongname, scen, tranges, dt, mp, cr)
fout.append(metric, mmat, (agglvl, 'scen', 'dt', 'mp', 'cr', 'time_range'), munits, mlongname) # append to file
|
RDCEP/ggcmi
|
bin/multimetrics/multimetrics.py
|
Python
|
agpl-3.0
| 4,428
|
[
"NetCDF"
] |
9186cdf95c4802523b4b73aef43a15ae56143750a5bd7d589f9c972d64f6d097
|
from lya_vm import VirtualMachine
import lexer as lex
from parser import Parser
from semantic import *
import sys
def main():
if len(sys.argv) < 2 or len(sys.argv) > 4:
print("Usage: python3 compile.py file.lya <-d> <-o>")
print("-d: debug mode")
print("-o: generate lvm code only")
return 1
file_name = sys.argv[1]
debug = '-d' in sys.argv
code = '-o' in sys.argv
# Read given file
file = open(file_name, "r")
s = file.read()
result = Parser()
ast = result.parse(s)
nv = Visitor()
nv.visit(ast)
if nv.semantic_error:
print("Error found. Terminating execution")
exit(1)
ast.generate_code()
if debug:
# Print undecorated AST
print("Printing Undecorated AST")
ast.print(False,'')
print("Printing Decorated AST")
ast.print(True,'')
print("Printing LVM Code")
if code or debug:
print('[')
for st in AST.code:
print(st)
print(']')
H = nv.string_literals
if not code:
VirtualMachine.execute(AST.code, H, False)
if __name__ == "__main__": main()
|
gmCrivelli/Lya-Compiler
|
compile.py
|
Python
|
mit
| 1,161
|
[
"VisIt"
] |
cd683322858188f2b982feb0e45be3f54b34c8eed769bec3de5653287c2f137b
|
#!/usr/bin/env python
###############################################################################
# $Id: ogr_shape_qix.py 32118 2015-12-11 00:40:44Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test shapefile spatial index mechanism (.qix files). This can serve
# as a test for the functionality of shapelib's shptree.c
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import random
sys.path.append( '../pymod' )
import gdaltest
from osgeo import ogr
###############################################################################
#
def check_qix_non_overlapping_geoms(lyr):
geoms = []
lyr.SetSpatialFilter(None)
extents = lyr.GetExtent()
fc_ref = lyr.GetFeatureCount()
feat = lyr.GetNextFeature()
while feat is not None:
geom = feat.GetGeometryRef()
geoms.append(geom.Clone())
feat = lyr.GetNextFeature()
# Test getting each geom 1 by 1
for geom in geoms:
bbox = geom.GetEnvelope()
lyr.SetSpatialFilterRect(bbox[0], bbox[2], bbox[1], bbox[3])
lyr.ResetReading()
feat = lyr.GetNextFeature()
got_geom = feat.GetGeometryRef()
if got_geom.Equals(geom) == 0:
gdaltest.post_reason('expected %s. got %s' % (geom.ExportToWkt(), got_geom.ExportToWkt()))
return 'fail'
# Get all geoms in a single gulp. We do not use exactly the extent bounds, because
# there is an optimization in the shapefile driver to skip the spatial index in that
# case. That trick can only work with non point geometries of course
lyr.SetSpatialFilterRect(extents[0]+0.001, extents[2]+0.001, extents[1]-0.001, extents[3]-0.001)
lyr.ResetReading()
fc = lyr.GetFeatureCount()
if fc != fc_ref:
gdaltest.post_reason('expected %d. got %d' % (fc_ref, fc))
return 'fail'
return 'success'
###############################################################################
def build_rectangle_from_point(x, y, radius = 0.1):
return ogr.CreateGeometryFromWkt('POLYGON((%f %f,%f %f,%f %f,%f %f,%f %f))' % \
(x-radius,y-radius,x-radius,y+radius,x+radius,y+radius,x+radius,y-radius,x-radius,y-radius))
###############################################################################
# Test geoms on a 10x10 grid
def ogr_shape_qix_1():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource('/vsimem/ogr_shape_qix.shp')
lyr = ds.CreateLayer("ogr_shape_qix")
for x in range(10):
for y in range(10):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(build_rectangle_from_point(x,y))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_shape_qix')
ds = None
ds = ogr.Open('/vsimem/ogr_shape_qix.shp')
lyr = ds.GetLayer(0)
ret = check_qix_non_overlapping_geoms(lyr)
shape_drv.DeleteDataSource('/vsimem/ogr_shape_qix.shp')
return ret
###############################################################################
# Test geoms on a 100x100 grid
def ogr_shape_qix_2():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource('/vsimem/ogr_shape_qix.shp')
lyr = ds.CreateLayer("ogr_shape_qix")
for x in range(100):
for y in range(100):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(build_rectangle_from_point(x,y))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_shape_qix')
ds = None
ds = ogr.Open('/vsimem/ogr_shape_qix.shp')
lyr = ds.GetLayer(0)
ret = check_qix_non_overlapping_geoms(lyr)
shape_drv.DeleteDataSource('/vsimem/ogr_shape_qix.shp')
return ret
###############################################################################
# Test 2 separated regions of 10x10 geoms
def ogr_shape_qix_3():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource('/vsimem/ogr_shape_qix.shp')
lyr = ds.CreateLayer("ogr_shape_qix")
for x in range(10):
for y in range(10):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(build_rectangle_from_point(x,y))
lyr.CreateFeature(feat)
feat = None
for x in range(10):
for y in range(10):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(build_rectangle_from_point(x+1000,y))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_shape_qix')
ds = None
ds = ogr.Open('/vsimem/ogr_shape_qix.shp')
lyr = ds.GetLayer(0)
ret = check_qix_non_overlapping_geoms(lyr)
shape_drv.DeleteDataSource('/vsimem/ogr_shape_qix.shp')
return ret
###############################################################################
#
def check_qix_random_geoms(lyr):
geoms = []
lyr.SetSpatialFilter(None)
extents = lyr.GetExtent()
fc_ref = lyr.GetFeatureCount()
feat = lyr.GetNextFeature()
while feat is not None:
geom = feat.GetGeometryRef()
geoms.append(geom.Clone())
feat = lyr.GetNextFeature()
# Test getting each geom 1 by 1
for geom in geoms:
bbox = geom.GetEnvelope()
lyr.SetSpatialFilterRect(bbox[0], bbox[2], bbox[1], bbox[3])
lyr.ResetReading()
found_geom = False
feat = lyr.GetNextFeature()
while feat is not None and found_geom is False:
got_geom = feat.GetGeometryRef()
if got_geom.Equals(geom) == 1:
found_geom = True
else:
feat = lyr.GetNextFeature()
if not found_geom:
gdaltest.post_reason('did not find geometry for %s' % (geom.ExportToWkt()))
return 'fail'
# Get all geoms in a single gulp. We do not use exactly the extent bounds, because
# there is an optimization in the shapefile driver to skip the spatial index in that
# case. That trick can only work with non point geometries of course
lyr.SetSpatialFilterRect(extents[0]+0.001, extents[2]+0.001, extents[1]-0.001, extents[3]-0.001)
lyr.ResetReading()
fc = lyr.GetFeatureCount()
if fc != fc_ref:
gdaltest.post_reason('expected %d. got %d' % (fc_ref, fc))
return 'fail'
return 'success'
###############################################################################
def build_rectangle(x1,y1,x2,y2):
return ogr.CreateGeometryFromWkt('POLYGON((%f %f,%f %f,%f %f,%f %f,%f %f))' % \
(x1,y1,x1,y2,x2,y2,x2,y1,x1,y1))
###############################################################################
# Test random geometries
def ogr_shape_qix_4():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource('/vsimem/ogr_shape_qix.shp')
lyr = ds.CreateLayer("ogr_shape_qix")
# The 1000,200,10 figures are such that there are
# a bit of overlapping between the geometries
for x in range(1000):
feat = ogr.Feature(lyr.GetLayerDefn())
x1 = random.randint(0,200)
y1 = random.randint(0,200)
x2 = x1 + random.randint(1,10)
y2 = y1 + random.randint(1,10)
feat.SetGeometry(build_rectangle(x1,y1,x2,y2))
lyr.CreateFeature(feat)
feat = None
# And add statistically non overlapping features
for x in range(1000):
feat = ogr.Feature(lyr.GetLayerDefn())
x1 = random.randint(0,10000)
y1 = random.randint(0,10000)
x2 = x1 + random.randint(1,10)
y2 = y1 + random.randint(1,10)
feat.SetGeometry(build_rectangle(x1,y1,x2,y2))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_shape_qix')
ret = check_qix_random_geoms(lyr)
shape_drv.DeleteDataSource('/vsimem/ogr_shape_qix.shp')
return ret
gdaltest_list = [
ogr_shape_qix_1,
ogr_shape_qix_2,
ogr_shape_qix_3,
ogr_shape_qix_4,
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_shape_qix' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/ogr/ogr_shape_qix.py
|
Python
|
gpl-2.0
| 9,477
|
[
"GULP"
] |
de9a87e6007930ca4640d29f03562be4b3e95113c69e5d9248cfd738ba7a14e7
|
#! /usr/bin/env python
from __pyosshell__ import *
from __lxml__ import *
from ctp__options__ import *
from ctp__cluster__ import *
from ctp__jobfile__ import *
from momo import osio, endl, flush
def safe_remove(path):
cdx = raw_input("Remove '%s' ? (yes/no)" % path)
if cdx == 'yes':
os.system('rm -rf %s' % path)
else:
print "Nothing happened"
return
def safe_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
return
def write_header(title):
try:
height, width = os.popen('stty size', 'r').read().split()
width = int(width)
leftright = int((width - len(title)-2)/2)
except ValueError:
leftright = 40
print "="*leftright, title, "="*leftright
return
def countdown(t_sec):
osio.os_print_config(tl='')
colours = [osio.ww, osio.mb, osio.mg, osio.my, osio.mr]
N = int(t_sec+0.5)
n = N*10
osio << "|" << flush
for i in range(n):
t = i*0.1
colour = colours[int(t/t_sec*(len(colours)))]
if i % 10 == 0:
osio << colour << " %1.0fs " % t << flush
else:
osio << colour << "\b=>" << flush
time.sleep(0.1)
osio << colours[-1] << " %1.0fs |" % t_sec << endl
osio.os_print_reset()
par = arg.ArgumentParser(description='CTP LITTLE HELPER')
par.add_argument('--gen', dest='gen', action='store_const', const=1, default=0)
par.add_argument('--cpy', dest='cpy', action='store_const', const=1, default=0)
par.add_argument('--exe', dest='exe', action='store_const', const=1, default=0)
par.add_argument('--cln', dest='cln', action='store_const', const=1, default=0)
par.add_argument('--sub', dest='sub', action='store_const', const=1, default=0)
opts = par.parse_args()
RELATIVE_BASE = 'APE_ISO'
RELATIVE_WORKGROUND = 'WORKGROUND'
EWDBGPOL_FOLDER = 'EWDBGPOL'
MP_FILES = 'MP_FILES'
JOBFILE = 'jobs.ewald.xml'
PARTITION_JOBFILE = True
ACCESS_LOC_JOBFILE = True
n_threads = 16
n_procs = n_threads
queue = 'PE_16'
t_job = 30.0/60. # hours
T_wall = 36 # hours
t_comm = 1 # hours
votcarc = '/people/thnfs/homes/poelking/VOTCA_SUSE_12/bin/VOTCARC.csh'
#z_xx = ['TEST']
#z_xx = ['C60_ZNPC_S']
#z_xx = get_dirs('./', '^L..G_2D$')
z_xx = get_dirs('./', '^\d*K_confout')
z_xx = sorted(z_xx)
exclude = []
pres = [ z[:] for z in z_xx ]
n_jobs_total = 0
n_nodes_total = 0
for folder, pre in zip(z_xx, pres):
if folder in exclude: continue
write_header(folder)
os.chdir(folder)
ROOT = os.path.abspath(os.getcwd())
BASE = os.path.join(ROOT, RELATIVE_BASE)
WORKGROUND = os.path.join(BASE, RELATIVE_WORKGROUND)
print "Root = ", ROOT
print "Base = ", BASE
print "Work = ", WORKGROUND
# CLEAN IF APPLICABLE
if opts.cln:
safe_remove(BASE)
# SUPPLY FROM SOURCE
if opts.gen:
# Base directory (local)
safe_mkdir(BASE)
os.chdir(BASE)
print "Copy files to base directory"
sql = os.path.join(ROOT, 'system.sql')
job = os.path.join(ROOT, JOBFILE)
os.system('cp %s .' % sql)
os.system('cp %s jobs.xml' % job)
# Work directory (local)
safe_mkdir(WORKGROUND)
os.chdir(WORKGROUND)
print "Copy files to workground"
sysxml = os.path.join(ROOT, 'system.xml')
mpstable = os.path.join(ROOT, 'mps.tab')
mpfiles = os.path.join(ROOT, MP_FILES)
ptop = os.path.join(ROOT, '%s/bgp_main.ptop' % EWDBGPOL_FOLDER)
os.system('cp %s .' % sysxml)
os.system('cp %s .' % mpstable)
os.system('cp -r %s MP_FILES' % mpfiles)
if os.path.exists(ptop):
os.system('cp %s .' % ptop)
polar_bg = 'bgp_main.ptop'
else:
print "No background polarization available, set ptop = ''"
polar_bg = ''
# Absolute paths to input files
abs_jobxml = os.path.join(BASE, 'jobs.xml')
abs_sql = os.path.join(BASE, 'system.sql')
abs_jobxml_shared_nonloc = abs_jobxml
abs_sql_shared_nonloc = abs_sql
# Calcatulate number of jobs / node and number of nodes
n_jobs = count_jobs(abs_jobxml)
n_jobs_total += n_jobs
jobs_per_thread = int(T_wall/t_job)
jobs_per_machine = int(n_procs*T_wall/t_job)
n_nodes = int(n_jobs / jobs_per_machine) + (1 if n_jobs % jobs_per_machine > 0 else 0)
n_nodes_total += n_nodes
# Distribute overhang equally
jobs_per_machine = int(n_jobs/n_nodes)+1
jobs_cache = int(n_procs*t_comm/t_job)
# PARTITION JOBFILES IF APPLICABLE
jobfiles = []
if PARTITION_JOBFILE:
tree = XmlTree(abs_jobxml)
jobs = tree.GetAll('job')
job_idx = -1
for i in range(n_nodes):
jobfile = 'jobs.%d.xml' % (i+1)
root = etree.Element('jobs')
for j in range(jobs_per_machine):
job_idx += 1
if job_idx == len(jobs):
break
root.append(jobs[job_idx].node)
ofs = open(jobfile, 'w')
ofs.write(etree.tostring(root, pretty_print=True))
ofs.close()
if ACCESS_LOC_JOBFILE:
pass
else:
jobfile = os.path.abspath(jobfile)
jobfiles.append(jobfile)
else:
jobfiles = [ abs_jobxml for i in range(n_nodes) ]
# Options
print "Generate options"
for i in range(n_nodes):
write_pewald3d_options(
filename='options.%d.xml' % (i+1),
job_file=jobfiles[i],
mapping='system.xml',
mps_table='mps.tab',
polar_bg=polar_bg,
pdb_check=0,
ewald_cutoff=8,
shape='none',
save_nblist='false',
induce=1,
thole_cutoff=3,
thole_tolerance=0.001,
calculate_fields='true',
polarize_fg='true',
evaluate_energy='true',
cg_background='false',
cg_foreground='false',
cg_radius=3,
cg_anisotropic='true',
energy=1e-5,
kfactor=100,
rfactor=6)
if i == 0:
options_writer_toggle_verbose()
print "... options.?.xml ..."
options_writer_toggle_verbose()
# Generate commmand
print "Generate command"
cmd_dict = {\
'exe' : 'ctp_parallel',
'calc' : 'pewald3d',
'sql' : abs_sql_shared_nonloc,
'job' : abs_jobxml_shared_nonloc,
'map' : 'system.xml',
'opt' : 'options.{ID:d}.xml',
'n_thd' : n_threads,
'cache' : jobs_cache,
'max' : jobs_per_machine,
'log' : 'ctp_{ID:02d}.log'}
cmd = '{exe} -e {calc} -o {opt} -f {sql} -s 0 -t {n_thd} -c {cache} -m {max} >& {log}'.format(**cmd_dict)
# Generate batch files
tag = pre.upper()+'_{ID:02d}_PEWD3D'
print "Tag", tag
print "Cmd", cmd
print "{0} jobs => {1} nodes @ {2} jobs per node, cache {3}, max {4}".format(n_jobs, n_nodes, jobs_per_machine, jobs_cache, jobs_per_machine)
batch_files = multi_write_cluster_batch(n=n_nodes, command=cmd, tag=tag, queue=queue, source=False,module=['gaussian/g03','votca/icc_cluster'], procs=n_procs)
if opts.sub:
for batch in batch_files:
os.system('qsub %s' % batch)
if PARTITION_JOBFILE:
time.sleep(1)
continue
elif batch == batch_files[-1]:
pass
else:
time.sleep(60)
os.chdir(ROOT)
os.chdir('../')
print "N(jobs,total) =", n_jobs_total
print "N(nodes,total) =", n_nodes_total
sys.exit(0)
|
12AngryMen/votca-scripts
|
Jobadmin/4_partition_pewald3d.py
|
Python
|
apache-2.0
| 6,819
|
[
"Gaussian"
] |
bab81c2d19924441d2b3bf473eca6a865ac73f93667ed3c5bd1984c1b46c9b9e
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to start and stop Android emulator.
Assumes system environment ANDROID_NDK_ROOT has been set.
Emulator: The class provides the methods to launch/shutdown the emulator with
the android virtual device named 'avd_armeabi' .
"""
import logging
import os
import shutil
import signal
import subprocess
import sys
import time
import time_profile
# TODO(craigdh): Move these pylib dependencies to pylib/utils/.
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
import errors
import run_command
# Android API level
API_TARGET = 'android-%s' % constants.ANDROID_SDK_VERSION
class EmulatorLaunchException(Exception):
"""Emulator failed to launch."""
pass
def _KillAllEmulators():
"""Kill all running emulators that look like ones we started.
There are odd 'sticky' cases where there can be no emulator process
running but a device slot is taken. A little bot trouble and and
we're out of room forever.
"""
emulators = android_commands.GetEmulators()
if not emulators:
return
for emu_name in emulators:
cmd_helper.RunCmd(['adb', '-s', emu_name, 'emu', 'kill'])
logging.info('Emulator killing is async; give a few seconds for all to die.')
for i in range(5):
if not android_commands.GetEmulators():
return
time.sleep(1)
def DeleteAllTempAVDs():
"""Delete all temporary AVDs which are created for tests.
If the test exits abnormally and some temporary AVDs created when testing may
be left in the system. Clean these AVDs.
"""
avds = android_commands.GetAVDs()
if not avds:
return
for avd_name in avds:
if 'run_tests_avd' in avd_name:
cmd = ['android', '-s', 'delete', 'avd', '--name', avd_name]
cmd_helper.RunCmd(cmd)
logging.info('Delete AVD %s' % avd_name)
class PortPool(object):
"""Pool for emulator port starting position that changes over time."""
_port_min = 5554
_port_max = 5585
_port_current_index = 0
@classmethod
def port_range(cls):
"""Return a range of valid ports for emulator use.
The port must be an even number between 5554 and 5584. Sometimes
a killed emulator "hangs on" to a port long enough to prevent
relaunch. This is especially true on slow machines (like a bot).
Cycling through a port start position helps make us resilient."""
ports = range(cls._port_min, cls._port_max, 2)
n = cls._port_current_index
cls._port_current_index = (n + 1) % len(ports)
return ports[n:] + ports[:n]
def _GetAvailablePort():
"""Returns an available TCP port for the console."""
used_ports = []
emulators = android_commands.GetEmulators()
for emulator in emulators:
used_ports.append(emulator.split('-')[1])
for port in PortPool.port_range():
if str(port) not in used_ports:
return port
def LaunchEmulators(emulator_count, abi, wait_for_boot=True):
"""Launch multiple emulators and wait for them to boot.
Args:
emulator_count: number of emulators to launch.
abi: the emulator target platform
wait_for_boot: whether or not to wait for emulators to boot up
Returns:
List of emulators.
"""
emulators = []
for n in xrange(emulator_count):
t = time_profile.TimeProfile('Emulator launch %d' % n)
# Creates a temporary AVD.
avd_name = 'run_tests_avd_%d' % n
logging.info('Emulator launch %d with avd_name=%s', n, avd_name)
emulator = Emulator(avd_name, abi)
emulator.Launch(kill_all_emulators=n == 0)
t.Stop()
emulators.append(emulator)
# Wait for all emulators to boot completed.
if wait_for_boot:
for emulator in emulators:
emulator.ConfirmLaunch(True)
return emulators
class Emulator(object):
"""Provides the methods to launch/shutdown the emulator.
The emulator has the android virtual device named 'avd_armeabi'.
The emulator could use any even TCP port between 5554 and 5584 for the
console communication, and this port will be part of the device name like
'emulator-5554'. Assume it is always True, as the device name is the id of
emulator managed in this class.
Attributes:
emulator: Path of Android's emulator tool.
popen: Popen object of the running emulator process.
device: Device name of this emulator.
"""
# Signals we listen for to kill the emulator on
_SIGNALS = (signal.SIGINT, signal.SIGHUP)
# Time to wait for an emulator launch, in seconds. This includes
# the time to launch the emulator and a wait-for-device command.
_LAUNCH_TIMEOUT = 120
# Timeout interval of wait-for-device command before bouncing to a a
# process life check.
_WAITFORDEVICE_TIMEOUT = 5
# Time to wait for a "wait for boot complete" (property set on device).
_WAITFORBOOT_TIMEOUT = 300
def __init__(self, avd_name, abi):
"""Init an Emulator.
Args:
avd_name: name of the AVD to create
abi: target platform for emulator being created
"""
android_sdk_root = os.path.join(constants.EMULATOR_SDK_ROOT,
'android_tools', 'sdk')
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
self.android = os.path.join(android_sdk_root, 'tools', 'android')
self.popen = None
self.device = None
self.abi = abi
self.avd_name = avd_name
self._CreateAVD()
def _DeviceName(self):
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port)
def _CreateAVD(self):
"""Creates an AVD with the given name.
Return avd_name.
"""
if self.abi == 'arm':
abi_option = 'armeabi-v7a'
else:
abi_option = 'x86'
avd_command = [
self.android,
'--silent',
'create', 'avd',
'--name', self.avd_name,
'--abi', abi_option,
'--target', API_TARGET,
'--force',
]
avd_cmd_str = ' '.join(avd_command)
logging.info('Create AVD command: %s', avd_cmd_str)
avd_process = pexpect.spawn(avd_cmd_str)
# Instead of creating a custom profile, we overwrite config files.
avd_process.expect('Do you wish to create a custom hardware profile')
avd_process.sendline('no\n')
avd_process.expect('Created AVD \'%s\'' % self.avd_name)
# Setup test device as default Galaxy Nexus AVD
avd_config_dir = os.path.join(constants.CHROME_DIR, 'build', 'android',
'avd_configs')
avd_config_ini = os.path.join(avd_config_dir,
'AVD_for_Galaxy_Nexus_by_Google_%s.avd' %
self.abi, 'config.ini')
# Replace current configuration with default Galaxy Nexus config.
avds_dir = os.path.join(os.path.expanduser('~'), '.android', 'avd')
ini_file = os.path.join(avds_dir, '%s.ini' % self.avd_name)
new_config_ini = os.path.join(avds_dir, '%s.avd' % self.avd_name,
'config.ini')
# Remove config files with defaults to replace with Google's GN settings.
os.unlink(ini_file)
os.unlink(new_config_ini)
# Create new configuration files with Galaxy Nexus by Google settings.
with open(ini_file, 'w') as new_ini:
new_ini.write('avd.ini.encoding=ISO-8859-1\n')
new_ini.write('target=%s\n' % API_TARGET)
new_ini.write('path=%s/%s.avd\n' % (avds_dir, self.avd_name))
new_ini.write('path.rel=avd/%s.avd\n' % self.avd_name)
shutil.copy(avd_config_ini, new_config_ini)
return self.avd_name
def _DeleteAVD(self):
"""Delete the AVD of this emulator."""
avd_command = [
self.android,
'--silent',
'delete',
'avd',
'--name', self.avd_name,
]
logging.info('Delete AVD command: %s', ' '.join(avd_command))
cmd_helper.RunCmd(avd_command)
def Launch(self, kill_all_emulators):
"""Launches the emulator asynchronously. Call ConfirmLaunch() to ensure the
emulator is ready for use.
If fails, an exception will be raised.
"""
if kill_all_emulators:
_KillAllEmulators() # just to be sure
self._AggressiveImageCleanup()
(self.device, port) = self._DeviceName()
emulator_command = [
self.emulator,
# Speed up emulator launch by 40%. Really.
'-no-boot-anim',
# The default /data size is 64M.
# That's not enough for 8 unit test bundles and their data.
'-partition-size', '512',
# Use a familiar name and port.
'-avd', self.avd_name,
'-port', str(port),
# Wipe the data. We've seen cases where an emulator gets 'stuck' if we
# don't do this (every thousand runs or so).
'-wipe-data',
# Enable GPU by default.
'-gpu', 'on',
'-qemu', '-m', '1024',
]
if self.abi == 'x86':
emulator_command.extend([
# For x86 emulator --enable-kvm will fail early, avoiding accidental
# runs in a slow mode (i.e. without hardware virtualization support).
'--enable-kvm',
])
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
self.popen = subprocess.Popen(args=emulator_command,
stderr=subprocess.STDOUT)
self._InstallKillHandler()
def _AggressiveImageCleanup(self):
"""Aggressive cleanup of emulator images.
Experimentally it looks like our current emulator use on the bot
leaves image files around in /tmp/android-$USER. If a "random"
name gets reused, we choke with a 'File exists' error.
TODO(jrg): is there a less hacky way to accomplish the same goal?
"""
logging.info('Aggressive Image Cleanup')
emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
if not os.path.exists(emulator_imagedir):
return
for image in os.listdir(emulator_imagedir):
full_name = os.path.join(emulator_imagedir, image)
if 'emulator' in full_name:
logging.info('Deleting emulator image %s', full_name)
os.unlink(full_name)
def ConfirmLaunch(self, wait_for_boot=False):
"""Confirm the emulator launched properly.
Loop on a wait-for-device with a very small timeout. On each
timeout, check the emulator process is still alive.
After confirming a wait-for-device can be successful, make sure
it returns the right answer.
"""
seconds_waited = 0
number_of_waits = 2 # Make sure we can wfd twice
adb_cmd = "adb -s %s %s" % (self.device, 'wait-for-device')
while seconds_waited < self._LAUNCH_TIMEOUT:
try:
run_command.RunCommand(adb_cmd,
timeout_time=self._WAITFORDEVICE_TIMEOUT,
retry_count=1)
number_of_waits -= 1
if not number_of_waits:
break
except errors.WaitForResponseTimedOutError as e:
seconds_waited += self._WAITFORDEVICE_TIMEOUT
adb_cmd = "adb -s %s %s" % (self.device, 'kill-server')
run_command.RunCommand(adb_cmd)
self.popen.poll()
if self.popen.returncode != None:
raise EmulatorLaunchException('EMULATOR DIED')
if seconds_waited >= self._LAUNCH_TIMEOUT:
raise EmulatorLaunchException('TIMEOUT with wait-for-device')
logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
if wait_for_boot:
# Now that we checked for obvious problems, wait for a boot complete.
# Waiting for the package manager is sometimes problematic.
a = android_commands.AndroidCommands(self.device)
a.WaitForSystemBootCompleted(self._WAITFORBOOT_TIMEOUT)
def Shutdown(self):
"""Shuts down the process started by launch."""
self._DeleteAVD()
if self.popen:
self.popen.poll()
if self.popen.returncode == None:
self.popen.kill()
self.popen = None
def _ShutdownOnSignal(self, signum, frame):
logging.critical('emulator _ShutdownOnSignal')
for sig in self._SIGNALS:
signal.signal(sig, signal.SIG_DFL)
self.Shutdown()
raise KeyboardInterrupt # print a stack
def _InstallKillHandler(self):
"""Install a handler to kill the emulator when we exit unexpectedly."""
for sig in self._SIGNALS:
signal.signal(sig, self._ShutdownOnSignal)
|
plxaye/chromium
|
src/build/android/pylib/utils/emulator.py
|
Python
|
apache-2.0
| 12,430
|
[
"Galaxy"
] |
47e4469629c290080981b51e2408c81175b1145d856530879c4ef196ecaaa3e0
|
# -*- coding: utf-8 -*-
import unittest
from typing import Set, Tuple
from pybel import BELGraph
from pybel.constants import ANNOTATIONS
from pybel.dsl import BaseEntity, Protein
from pybel.struct.filters import (
and_edge_predicates,
concatenate_node_predicates,
count_passed_edge_filter,
count_passed_node_filter,
filter_edges,
get_nodes,
invert_edge_predicate,
)
from pybel.struct.filters.edge_predicate_builders import (
_annotation_dict_all_filter,
_annotation_dict_any_filter,
build_annotation_dict_all_filter,
build_annotation_dict_any_filter,
)
from pybel.struct.filters.edge_predicates import true_edge_predicate
from pybel.struct.filters.node_predicates import true_node_predicate
from pybel.struct.filters.typing import EdgeIterator
from pybel.testing.utils import n
def make_edge_iterator_set(it: EdgeIterator) -> Set[Tuple[BaseEntity, BaseEntity]]:
return {(u, v) for u, v, _ in it}
class TestNodeFilters(unittest.TestCase):
def setUp(self):
self.universe = BELGraph()
self.universe.add_edge(1, 2)
self.universe.add_edge(2, 3)
self.universe.add_edge(3, 7)
self.universe.add_edge(1, 4)
self.universe.add_edge(1, 5)
self.universe.add_edge(5, 6)
self.universe.add_edge(8, 2)
self.graph = BELGraph()
self.graph.add_edge(1, 2)
self.all_universe_nodes = {1, 2, 3, 4, 5, 6, 7, 8}
self.all_graph_nodes = {1, 2}
def test_no_node_filter_argument(self):
nodes = get_nodes(self.universe, [])
self.assertEqual(self.all_universe_nodes, nodes)
def test_keep_node_permissive(self):
nodes = get_nodes(self.universe, true_node_predicate)
self.assertEqual(self.all_universe_nodes, nodes)
def test_missing_node_filter(self):
nodes = get_nodes(self.universe, concatenate_node_predicates([]))
self.assertEqual(self.all_universe_nodes, nodes)
def test_concatenate_single_node_filter(self):
nodes = get_nodes(self.universe, [true_node_predicate])
self.assertEqual(self.all_universe_nodes, nodes)
def test_concatenate_multiple_node_filters(self):
def even(_, node) -> bool:
return node % 2 == 0
def big(_, node) -> bool:
return node > 3
nodes = get_nodes(self.universe, [even, big])
self.assertEqual({4, 6, 8}, nodes)
self.assertEqual(3, count_passed_node_filter(self.universe, [even, big]))
def test_no_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, []))
self.assertEqual({(1, 2)}, edges)
def test_keep_edge_permissive(self):
edges = make_edge_iterator_set(filter_edges(self.graph, true_edge_predicate))
self.assertEqual({(1, 2)}, edges)
def test_keep_edge_unpermissive(self):
keep_edge_restrictive = invert_edge_predicate(true_edge_predicate)
edges = make_edge_iterator_set(filter_edges(self.graph, keep_edge_restrictive))
self.assertEqual(set(), edges)
def test_missing_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, and_edge_predicates([])))
self.assertEqual(({(1, 2)}), edges)
def test_concatenate_single_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, [true_edge_predicate]))
self.assertEqual({(1, 2)}, edges)
def test_concatenate_multiple_edge_filter(self):
def has_odd_source(graph, u, v, k):
return u % 2 != 0
def has_even_target(graph, u, v, k):
return v % 2 == 0
edges = make_edge_iterator_set(filter_edges(self.universe, [has_odd_source, has_even_target]))
self.assertEqual({(1, 2), (1, 4), (5, 6)}, edges)
self.assertEqual(
3,
count_passed_edge_filter(self.universe, [has_odd_source, has_even_target]),
)
has_even_source = invert_edge_predicate(has_odd_source)
edges = make_edge_iterator_set(filter_edges(self.universe, has_even_source))
self.assertEqual({(2, 3), (8, 2)}, edges)
class TestEdgeFilters(unittest.TestCase):
def test_a(self):
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1"}}))
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2", "3"}}))
self.assertTrue(
_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}}, {"A": {"3"}, "B": {"X"}})
)
self.assertFalse(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"3"}}))
self.assertFalse(
_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}}, {"A": {"3"}, "B": {"Y"}})
)
def test_any_filter_no_query(self):
"""Test that the all filter returns true when there's no argument"""
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(1, count_passed_edge_filter(graph, build_annotation_dict_any_filter({})))
def test_any_filter_no_annotations(self):
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_any_filter({"A": {"1"}})),
)
def test_any_filter_empty_annotations(self):
graph = BELGraph()
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={},
)
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_any_filter({"A": {"1"}})),
)
def test_any_filter(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={"A": {"1", "2", "3"}},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1", "2"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1", "2", "3"}})),
),
)
def test_b(self):
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"1"}}))
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(
_annotation_dict_all_filter(
{ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}},
{"A": {"1", "2"}, "B": {"X"}},
)
)
self.assertFalse(
_annotation_dict_all_filter(
{ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}},
{"A": {"1", "2", "3"}, "B": {"X", "Y"}},
)
)
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"1", "2"}}))
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"2"}}))
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"B": {"1"}}))
def test_all_filter_no_query(self):
"""Test that the all filter returns true when there's no argument"""
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(1, count_passed_edge_filter(graph, build_annotation_dict_all_filter({})))
def test_all_filter_no_annotations(self):
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_all_filter({"A": {"1"}})),
)
def test_all_filter_empty_annotations(self):
graph = BELGraph()
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={},
)
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_all_filter({"A": {"1"}})),
)
def test_all_filter(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={
"A": {"1", "2", "3"},
},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2", "3"}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2", "3", "4"}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"4"}})),
),
)
def test_all_filter_dict(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
a, b = Protein(namespace="hgnc", identifier="1", name="A"), Protein(namespace="hgnc", identifier="2", name="B")
graph.add_increases(
a,
b,
citation=n(),
evidence=n(),
annotations={
"A": {"1", "2", "3"},
},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True, "2": True}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True, "2": True, "3": True}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(
graph._clean_annotations({"A": {"1": True, "2": True, "3": True, "4": True}})
),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"4": True}})),
),
)
|
pybel/pybel
|
tests/test_struct/test_filters/test_struct_filters.py
|
Python
|
mit
| 12,140
|
[
"Pybel"
] |
c1bbd4e1c52881e07317c288a3d14de44f66b2bf72cb827f877cf26af5bb3205
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "bkmedasani@lbl.gov"
__date__ = "Aug 2, 2013"
import unittest2 as unittest
import os
import re
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.io.zeopp import ZeoCssr, ZeoVoronoiXYZ, get_voronoi_nodes, \
get_high_accuracy_voronoi_nodes, get_void_volume_surfarea, \
get_free_sphere_params
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.analysis.bond_valence import BVAnalyzer
try:
import zeo
except ImportError:
zeo = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.zeocssr = ZeoCssr(p.structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
#@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure)
self.zeocssr = ZeoCssr(structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_oxistate_decorated.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoVoronoiXYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
prop = [0.4, 0.2, 0.2, 0.2, 0.2]
self.mol = Molecule(
["C", "H", "H", "H", "H"], coords,
site_properties={"voronoi_radius": prop})
self.xyz = ZeoVoronoiXYZ(self.mol)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000 0.400000
H 1.089000 0.000000 0.000000 0.200000
H -0.363000 1.026719 0.000000 0.200000
H -0.363000 -0.513360 -0.889165 0.200000
H -0.363000 -0.513360 0.889165 0.200000"""
self.assertEqual(str(self.xyz), ans)
self.assertEqual(str(self.xyz), ans)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_voro.xyz")
vor = ZeoVoronoiXYZ.from_file(filename)
self.assertIsInstance(vor.molecule, Molecule)
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Specie(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
print (len(vor_node_struct.sites))
print (len(vor_face_center_struct.sites))
@unittest.skipIf(not zeo, "zeo not present.")
class GetFreeSphereParamsTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'free_sph.cif')
self.structure = Structure.from_file(filepath)
self.rad_dict = {'Ge':0.67,'P':0.52,'S':1.7,
'La':1.17,'Zr':0.86,'O':1.26}
def test_get_free_sphere_params(self):
free_sph_params = get_free_sphere_params(self.structure,
rad_dict=self.rad_dict)
# Zeo results can change in future. Hence loose comparison
self.assertAlmostEqual(
free_sph_params['inc_sph_max_dia'], 2.58251, places=1)
self.assertAlmostEqual(
free_sph_params['free_sph_max_dia'], 1.29452, places=1)
self.assertAlmostEqual(
free_sph_params['inc_sph_along_free_sph_path_max_dia'],
2.58251, places=1)
@unittest.skipIf(not zeo, "zeo not present.")
class GetHighAccuracyVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Specie(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
#vor_node_struct, vor_ec_struct, vor_fc_struct = \
# get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
vor_node_struct = \
get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
#self.assertIsInstance(vor_ec_struct, Structure)
#self.assertIsInstance(vor_fc_struct, Structure)
print(len(vor_node_struct.sites))
#print(len(vor_fc_struct.sites))
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesMultiOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
self.structure = bv.get_oxi_state_decorated_structure(self.structure)
valences = bv.get_valences(self.structure)
radii = []
for i in range(len(valences)):
el = self.structure.sites[i].specie.symbol
radius = Specie(el, valences[i]).ionic_radius
radii.append(radius)
el = [site.species_string for site in self.structure.sites]
self.rad_dict = dict(zip(el, radii))
for el in self.rad_dict.keys():
print((el, self.rad_dict[el].real))
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct =\
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
@unittest.skip("The function is deprecated")
class GetVoidVolumeSurfaceTest(unittest.TestCase):
def setUp(self):
filepath1 = os.path.join(test_dir, 'Li2O.cif')
p = CifParser(filepath1).get_structures(False)[0]
bv = BVAnalyzer()
valences = bv.get_valences(p)
el = [site.species_string for site in p.sites]
val_dict = dict(zip(el, valences))
self._radii = {}
for k, v in val_dict.items():
k1 = re.sub('[1-9,+,\-]', '', k)
self._radii[k1] = float(Specie(k1, v).ionic_radius)
p.remove(0)
self._vac_struct = p
def test_void_volume_surface_area(self):
pass
vol, sa = get_void_volume_surfarea(self._vac_struct, self._radii)
#print "vol: ", vol, "sa: ", sa
self.assertIsInstance(vol, float)
self.assertIsInstance(sa, float)
if __name__ == "__main__":
unittest.main()
|
aykol/pymatgen
|
pymatgen/io/tests/test_zeopp.py
|
Python
|
mit
| 11,117
|
[
"VASP",
"pymatgen"
] |
bf221d85bd21f53e047be24a26c10b954cc36c93392d29b055ab5351f3843c72
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
__RCSID__ = "$Id$"
@Script()
def main():
Script.parseCommandLine(ignoreErrors=True)
fieldsToShow = ("ComponentName", "Type", "Host", "Port", "Status", "Message")
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
result = gMonitor.getComponentsStatusWebFormatted(sortingList=[["ComponentName", "ASC"]])
if not result["OK"]:
print("ERROR: %s" % result["Message"])
sys.exit(1)
paramNames = result["Value"]["ParameterNames"]
records = result["Value"]["Records"]
fieldLengths = []
for param in paramNames:
fieldLengths.append(len(param))
for record in records:
for i, _ in enumerate(record):
if paramNames[i] in fieldsToShow:
fieldLengths[i] = max(fieldLengths[i], len(str(record[i])))
# Print time!
line = []
sepLine = []
for i, param in enumerate(paramNames):
if param in fieldsToShow:
line.append("%s%s" % (param, " " * (fieldLengths[i] - len(param))))
sepLine.append("-" * fieldLengths[i])
print("|".join(line))
sepLine = "+".join(sepLine)
print(sepLine)
for record in records:
line = []
for i, _ in enumerate(record):
if paramNames[i] in fieldsToShow:
val = str(record[i])
line.append("%s%s" % (val, " " * (fieldLengths[i] - len(val))))
print("|".join(line))
# print sepLine
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_monitoring_get_components_status.py
|
Python
|
gpl-3.0
| 1,689
|
[
"DIRAC"
] |
2565a9217776fcd95d9cfb26be605484d849e1298ae13ac055a5ebc96fe8e573
|
"""MSMBuilder: Statistical models for Biomolecular Dynamics
"""
from __future__ import print_function, absolute_import
DOCLINES = __doc__.split("\n")
import sys
import traceback
import numpy as np
from os.path import join as pjoin
from setuptools import setup, Extension, find_packages
try:
sys.dont_write_bytecode = True
sys.path.insert(0, '.')
from basesetup import write_version_py, CompilerDetection, \
check_dependencies
finally:
sys.dont_write_bytecode = False
try:
import mdtraj
mdtraj_capi = mdtraj.capi()
except (ImportError, AttributeError):
print('=' * 80)
print('MDTraj version 1.1.X or later is required')
print('=' * 80)
traceback.print_exc()
sys.exit(1)
if '--debug' in sys.argv:
sys.argv.remove('--debug')
DEBUG = True
else:
DEBUG = False
if '--disable-openmp' in sys.argv:
sys.argv.remove('--disable-openmp')
DISABLE_OPENMP = True
else:
DISABLE_OPENMP = False
try:
import Cython
from Cython.Distutils import build_ext
if Cython.__version__ < '0.18':
raise ImportError()
except ImportError:
print(
'Cython version 0.18 or later is required. Try "conda install cython"')
sys.exit(1)
# #########################
VERSION = '3.9.0.dev0'
ISRELEASED = False
__version__ = VERSION
# #########################
CLASSIFIERS = """\
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
Programming Language :: C++
Programming Language :: Python
Development Status :: 5 - Production/Stable
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
"""
if any(cmd in sys.argv for cmd in ('install', 'build', 'develop')):
check_dependencies((
('numpy',),
('scipy',),
('pandas',),
('six',),
('mdtraj',),
('sklearn', 'scikit-learn'),
('numpydoc',),
('tables', 'pytables'),
))
# Where to find extensions
MSMDIR = 'msmbuilder/msm/'
HMMDIR = 'msmbuilder/hmm/'
CLUSTERDIR = 'msmbuilder/cluster/'
compiler = CompilerDetection(DISABLE_OPENMP)
with open('msmbuilder/src/config.pxi', 'w') as f:
f.write('''
DEF DEBUG = {debug}
DEF OPENMP = {openmp}
'''.format(openmp=compiler.openmp_enabled, debug=DEBUG))
extensions = []
extensions.append(
Extension('msmbuilder.tpt.hub_scores',
sources=[pjoin('msmbuilder', 'tpt', 'hub_scores.pyx')],
include_dirs=[np.get_include()]))
extensions.append(
Extension('msmbuilder.example_datasets._muller',
sources=[pjoin('msmbuilder', 'example_datasets', '_muller.pyx')],
include_dirs=[np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._markovstatemodel',
sources=[pjoin(MSMDIR, '_markovstatemodel.pyx'),
pjoin(MSMDIR, 'src/transmat_mle_prinz.c')],
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.tests.test_cyblas',
sources=['msmbuilder/tests/test_cyblas.pyx'],
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._ratematrix',
sources=[pjoin(MSMDIR, '_ratematrix.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.decomposition._speigh',
sources=[pjoin('msmbuilder', 'decomposition', '_speigh.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._metzner_mcmc_fast',
sources=[pjoin(MSMDIR, '_metzner_mcmc_fast.pyx'),
pjoin(MSMDIR, 'src/metzner_mcmc.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_openmp,
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.libdistance',
language='c++',
sources=['msmbuilder/libdistance/libdistance.pyx'],
# msvc needs to be told "libtheobald", gcc wants just "theobald"
libraries=['%stheobald' % ('lib' if compiler.msvc else '')],
include_dirs=["msmbuilder/libdistance/src",
mdtraj_capi['include_dir'], np.get_include()],
library_dirs=[mdtraj_capi['lib_dir']],
))
extensions.append(
Extension('msmbuilder.cluster._kmedoids',
language='c++',
sources=[pjoin(CLUSTERDIR, '_kmedoids.pyx'),
pjoin(CLUSTERDIR, 'src', 'kmedoids.cc')],
include_dirs=[np.get_include()]))
# To get debug symbols on Windows, use
# extra_link_args=['/DEBUG']
# extra_compile_args=['/Zi']
extensions.append(
Extension('msmbuilder.hmm.gaussian',
language='c++',
sources=[pjoin(HMMDIR, 'gaussian.pyx'),
pjoin(HMMDIR, 'src/GaussianHMMFitter.cpp')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/')]))
extensions.append(
Extension('msmbuilder.hmm.vonmises',
language='c++',
sources=[pjoin(HMMDIR, 'vonmises.pyx'),
pjoin(HMMDIR, 'src/VonMisesHMMFitter.cpp'),
pjoin(HMMDIR, 'cephes/i0.c'),
pjoin(HMMDIR, 'cephes/chbevl.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/'),
pjoin(HMMDIR, 'cephes/')]))
write_version_py(VERSION, ISRELEASED, filename='msmbuilder/version.py')
setup(name='msmbuilder',
author='Robert McGibbon',
author_email='rmcgibbo@gmail.com',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
url='https://github.com/msmbuilder/msmbuilder',
platforms=['Linux', 'Mac OS-X', 'Unix'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={
'msmbuilder.tests': ['workflows/*'],
'msmbuilder': ['project_templates/*.*',
'project_templates/*/*',
'io_templates/*',
],
},
entry_points={'console_scripts':
['msmb = msmbuilder.scripts.msmb:main']},
zip_safe=False,
ext_modules=extensions,
cmdclass={'build_ext': build_ext})
|
Eigenstate/msmbuilder
|
setup.py
|
Python
|
lgpl-2.1
| 7,661
|
[
"Gaussian",
"MDTraj"
] |
4c565611c6ff4ddf97a4731171141e859a52ab00400aaede0f5a00f272cdbde6
|
from django.contrib import admin
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.template.loader import get_template
from django.template import Context
from knowledge.models import Question, Response, Category, Company, Author
def make_public(modeladmin, request, queryset):
queryset.update(status='public')
make_public.short_description = "Mark selected articles public"
def make_rejected(modeladmin, request, queryset):
queryset.update(status='rejected')
for q in queryset:
ctx = {
'article': q.title,
'email': q.email,
'comment': q.comment,
}
#message = 'Your article: '+q.title+' has been rejected, Visit Portalpractices for more information'
message = get_template('registration/article_rejected_template_email.html').render(Context(ctx))
send_mail('Portalpractices: Article rejected', message, 'no-reply@cantemo.com', [q.email])
make_rejected.short_description = "Mark selected articles rejected"
def make_draft(modeladmin, request, queryset):
queryset.update(status='draft')
make_draft.short_description = "Mark selected articles draft"
def make_review(modeladmin, request, queryset):
queryset.update(status='review')
make_review.short_description = "Mark selected articles review"
def make_active(modeladmin, reqeust, queryset):
queryset.update(is_active=True)
for q in queryset:
ctx = {
'username': q.username,
'email': q.email,
}
message = get_template('registration/activate_user_template_email.html').render(Context(ctx))
send_mail('Portalpractices: Account activated', message, 'no-reply@cantemo.com', [q.email])
make_active.short_description = "Mark selected users active"
def make_author_active(modeladmin, reqeust, queryset):
for query in queryset:
query.user.is_active=True
query.user.save(update_fields=['is_active'])
ctx = {
'username': query.user.username,
'email': query.user.email,
}
message = get_template('registration/activate_user_template_email.html').render(Context(ctx))
send_mail('Portalpractices: Account activated', message, 'no-reply@cantemo.com', [query.user.email])
make_active.short_description = "Mark selected users active"
class CategoryAdmin(admin.ModelAdmin):
list_display = [f.name for f in Category._meta.fields]
prepopulated_fields = {'slug': ('title', )}
admin.site.register(Category, CategoryAdmin)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('id', 'added', 'lastchanged', 'user', 'alert', 'name', 'email', 'title', 'comment', 'status', 'locked', 'recommended', 'hits' )
list_select_related = True
list_filter = ['status']
raw_id_fields = ['user']
actions = [make_public, make_draft, make_review, make_rejected]
admin.site.register(Question, QuestionAdmin)
class ResponseAdmin(admin.ModelAdmin):
list_display = [f.name for f in Response._meta.fields]
list_select_related = True
raw_id_fields = ['user', 'question']
admin.site.register(Response, ResponseAdmin)
class CompanyAdmin(admin.ModelAdmin):
list_display = [f.name for f in Company._meta.fields]
list_select_related = True
raw_id_fields = ['external_id']
admin.site.register(Company, CompanyAdmin)
class AuthorAdmin(admin.TabularInline):
model = Author
#admin.site.register(Author, AuthorAdmin)
@receiver(pre_save, sender=User)
def send_user_email(sender, instance=None, **kwargs):
try:
old_instance = None
old_instance = User.objects.get(pk=instance.pk)
if old_instance.is_active == False and instance.is_active == True:
ctx = {
'username': instance.username,
'email': instance.email,
}
message = get_template('registration/activate_user_template_email.html').render(Context(ctx))
send_mail('Portalpractices: Account activated', message, 'no-reply@cantemo.com', [instance.email])
except:
pass
pre_save.connect(send_user_email, sender=User, weak=False)
class UserAdmin(admin.ModelAdmin):
inlines = [AuthorAdmin]
actions = [make_active]
list_filter = ['is_active']
list_display = ('email', 'first_name', 'last_name', 'date_joined', 'is_active', 'is_staff', 'is_superuser')
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
CantemoInternal/django-knowledge
|
knowledge/admin.py
|
Python
|
isc
| 4,571
|
[
"VisIt"
] |
d53c3d68e05318d55fa70072d013d05a1fce60150a3b7b531d2482481a0ff3b8
|
from math import sqrt
import numpy as np
from scipy import stats
from matplotlib import pyplot
#(0) Set parameters:
np.random.seed(0)
nResponses = 6
nIterations = 10000
### derived parameters:
df = nResponses - 1
sqrtN = sqrt(nResponses)
#(1) Generate Gaussian data and compute test statistic:
T = []
for i in range(nIterations):
y = np.random.randn(nResponses)
t = y.mean() / y.std(ddof=1) * sqrtN
T.append(t)
T = np.asarray(T)
#(2) Survival functions:
heights = np.linspace(0, 5, 21)
sf = np.array( [ (T>h).mean() for h in heights] )
sfE = stats.t.sf(heights, df)
sfN = stats.norm.sf(heights) #standard normal (for comparison)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sfN, 'r-', label='Standard normal')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (t > u)$', size=20)
ax.legend()
ax.set_title('One-sample t validation (0D)', size=20)
pyplot.show()
|
0todd0000/spm1d
|
spm1d/rft1d/examples/val_max_1_onesample_t_0d.py
|
Python
|
gpl-3.0
| 1,113
|
[
"Gaussian"
] |
f981bcf01833d427fefcd482cf1dd298b87f8bb33ed93e48e0143cd46af69b63
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# xmlrpcsslclient - XMLRPC client with HTTPS user certificate support
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""XMLRPC client with support for HTTPS using client certificates"""
import sys
from xmlrpcsslclient import xmlrpcgetserver
if '__main__' == __name__:
if len(sys.argv) > 1:
vgrid_name = sys.argv[1:]
else:
vgrid_name = ['eScience']
print 'Testing XMLRPC client over HTTPS with user certificates for triggers'
print 'You may get prompted for your MiG key/certificate passphrase before you can continue'
server = xmlrpcgetserver()
methods = server.system.listMethods()
print 'supported remote methods:\n%s' % '\n'.join(methods)
print
print 'submit() signature: %s'\
% server.system.methodSignature('submit')
print 'the signature is a tuple of output object type and a list of expected/default input values'
print 'submit() help: %s' % server.system.methodHelp('submit')
print 'please note that help is not yet available for all methods'
print
print 'Testing some trigger methods:'
print 'checking triggers for vgrid: %s' % vgrid_name
(inlist, retval) = server.lsvgridtriggers({'vgrid_name': vgrid_name})
(returnval, returnmsg) = retval
if returnval != 0:
print 'Error %s:%s ' % (returnval, returnmsg)
for ele in inlist:
if ele['object_type'] == 'list':
for el in ele['list']:
print '%(rule_id)s\t%(path)s\t%(changes)s\t%(action)s\t%(arguments)s\t%(run_as)s\t%(rate_limit)s'% el
print 'adding dummy trigger for vgrid: %s' % vgrid_name
(inlist, retval) = server.addvgridtrigger({'vgrid_name': vgrid_name,
'rule_id': ['xmlrpcdummytrigger'],
'path': 'xmldummy-*.txt',
'changes': ['created'],
'action': ['trigger-modified'],
'arguments': ['xmlrpcdummy.out'],
'rate_limit': ['1/m']})
(returnval, returnmsg) = retval
if returnval != 0:
print 'Error %s:%s' % (returnval, returnmsg)
for ele in inlist:
if ele['object_type'] == 'text':
print "Success: %s" % ele['text']
if ele['object_type'] == 'error_text':
print "ERROR: %s" % ele['text']
print 'removing dummy trigger for vgrid: %s' % vgrid_name
(inlist, retval) = server.rmvgridtrigger({'vgrid_name': vgrid_name,
'rule_id': ['xmlrpcdummytrigger']})
(returnval, returnmsg) = retval
if returnval != 0:
print 'Error %s:%s' % (returnval, returnmsg)
for ele in inlist:
if ele['object_type'] == 'text':
print "Success: %s" % ele['text']
if ele['object_type'] == 'error_text':
print "ERROR: %s" % ele['text']
|
heromod/migrid
|
mig/user/xmlrpctriggers.py
|
Python
|
gpl-2.0
| 3,816
|
[
"Brian"
] |
0b7fe7ce55996246a4dc1d8339d96aafa596443cff3ee484352d52757ce113bd
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
from spack import *
class Autofact(Package):
"""An Automatic Functional Annotation and Classification Tool"""
homepage = "https://megasun.bch.umontreal.ca/Software/AutoFACT.htm"
url = "https://megasun.bch.umontreal.ca/Software/AutoFACT_v3_4.tar"
version('3_4', sha256='1465d263b19adb42f01f6e636ac40ef1c2e3dbd63461f977b89da9493fe9c6f4')
depends_on('perl', type='run')
depends_on('perl-bioperl', type='run')
depends_on('perl-io-string', type='run')
depends_on('perl-libwww-perl', type='run')
depends_on('blast-legacy', type='run')
def patch(self):
with working_dir('scripts'):
files = glob.iglob("*.pl")
for file in files:
change = FileFilter(file)
change.filter('usr/bin/perl', 'usr/bin/env perl')
def install(self, spec, prefix):
install_tree(self.stage.source_path, prefix)
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.scripts)
env.set('PATH2AUTOFACT', self.prefix)
|
LLNL/spack
|
var/spack/repos/builtin/packages/autofact/package.py
|
Python
|
lgpl-2.1
| 1,257
|
[
"BLAST",
"BioPerl"
] |
9ac901740b1161b666a5b512963596e60ea67fea456c56650c684d169c01b741
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing DIRAC, implemented as an easyblock
"""
import os
import re
import shutil
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class EB_DIRAC(CMakeMake):
"""Support for building/installing DIRAC."""
def configure_step(self):
"""Custom configuration procedure for DIRAC."""
# make very sure the install directory isn't there yet, since it may cause problems if it used (forced rebuild)
if os.path.exists(self.installdir):
self.log.warning("Found existing install directory %s, removing it to avoid problems", self.installdir)
try:
shutil.rmtree(self.installdir)
except OSError as err:
raise EasyBuildError("Failed to remove existing install directory %s: %s", self.installdir, err)
self.cfg['separate_build_dir'] = True
self.cfg.update('configopts', "-DENABLE_MPI=ON -DCMAKE_BUILD_TYPE=release")
# complete configuration with configure_method of parent
super(EB_DIRAC, self).configure_step()
def test_step(self):
"""Custom built-in test procedure for DIRAC."""
if self.cfg['runtest']:
# set up test environment
# see http://diracprogram.org/doc/release-14/installation/testing.html
env.setvar('DIRAC_TMPDIR', tempfile.mkdtemp(prefix='dirac-test-'))
env.setvar('DIRAC_MPI_COMMAND', self.toolchain.mpi_cmd_for('', self.cfg['parallel']))
# run tests (may take a while, especially if some tests take a while to time out)
self.log.info("Running tests may take a while, especially if some tests timeout (default timeout is 1500s)")
cmd = "make test"
out, ec = run_cmd(cmd, simple=False, log_all=False, log_ok=False)
# check that majority of tests pass
# some may fail due to timeout, but that's acceptable
# cfr. https://groups.google.com/forum/#!msg/dirac-users/zEd5-xflBnY/OQ1pSbuX810J
# over 90% of tests should pass
passed_regex = re.compile('^(9|10)[0-9.]+% tests passed', re.M)
if not passed_regex.search(out) and not self.dry_run:
raise EasyBuildError("Too many failed tests; '%s' not found in test output: %s",
passed_regex.pattern, out)
# extract test results
test_result_regex = re.compile(r'^\s*[0-9]+/[0-9]+ Test \s*#[0-9]+: .*', re.M)
test_results = test_result_regex.findall(out)
if test_results:
self.log.info("Found %d test results: %s", len(test_results), test_results)
elif self.dry_run:
# dummy test result
test_results = ["1/1 Test #1: dft_alda_xcfun ............................. Passed 72.29 sec"]
else:
raise EasyBuildError("Couldn't find *any* test results?")
test_count_regex = re.compile(r'^\s*[0-9]+/([0-9]+)')
res = test_count_regex.search(test_results[0])
if res:
test_count = int(res.group(1))
elif self.dry_run:
# a single dummy test result
test_count = 1
else:
raise EasyBuildError("Failed to determine total test count from %s using regex '%s'",
test_results[0], test_count_regex.pattern)
if len(test_results) != test_count:
raise EasyBuildError("Expected to find %s test results, but found %s", test_count, len(test_results))
# check test results, only 'Passed' or 'Timeout' are acceptable outcomes
faulty_tests = []
for test_result in test_results:
if ' Passed ' not in test_result:
self.log.warning("Found failed test: %s", test_result)
if '***Timeout' not in test_result:
faulty_tests.append(test_result)
if faulty_tests:
raise EasyBuildError("Found tests failing due to something else than timeout: %s", faulty_tests)
def sanity_check_step(self):
"""Custom sanity check for DIRAC."""
custom_paths = {
'files': ['bin/pam-dirac'],
'dirs': ['share/dirac'],
}
super(EB_DIRAC, self).sanity_check_step(custom_paths=custom_paths)
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/d/dirac.py
|
Python
|
gpl-2.0
| 5,751
|
[
"DIRAC"
] |
5d5b98998ddc75160a1e02fdfb605f59399d41b5b5eb99849d81829454ed92ab
|
# $Id: importsym.py 5148 2015-08-06 06:37:49Z ming $
#
# importsym.py: Import C symbol decls (structs, enums, etc) and write them
# to another file
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pycparser
from pycparser import c_generator
import sys
import os
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if sys.platform == 'win32' and not program.endswith(".exe"):
program += ".exe"
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
#
PJ_ROOT_PATH = "../../../"
# CPP is needed by pycparser.
CPP_PATH = which("cpp")
if not CPP_PATH:
print 'Error: need to have cpp in PATH'
sys.exit(1)
# Hardcoded!
if sys.platform == 'win32':
PYCPARSER_DIR="C:/devs/tools/pycparser"
elif sys.platform == "linux2":
PYCPARSER_DIR="/home/bennylp/Desktop/opt/src/pycparser-master"
else:
PYCPARSER_DIR="/Library/Python/2.7/site-packages/pycparser"
if not os.path.exists(PYCPARSER_DIR + '/utils/fake_libc_include'):
print "Error: couldn't find pycparser utils in '%s'" % PYCPARSER_DIR
sys.exit(1)
# Heading, to be placed before the source files
C_HEADING_SECTION = """
#define PJ_AUTOCONF 1
#define jmp_buf int
#define __attribute__(x)
"""
# CPP (C preprocessor) settings
CPP_CFLAGS = [
'-I' + PYCPARSER_DIR + '/utils/fake_libc_include',
"-I" + PJ_ROOT_PATH + "pjlib/include",
"-I" + PJ_ROOT_PATH + "pjlib-util/include",
"-I" + PJ_ROOT_PATH + "pjnath/include",
"-I" + PJ_ROOT_PATH + "pjmedia/include",
"-I" + PJ_ROOT_PATH + "pjsip/include"
]
class SymbolVisitor(pycparser.c_ast.NodeVisitor):
def __init__(self, names):
self.nodeDict = {}
for name in names:
self.nodeDict[name] = None
def _add(self, node):
if self.nodeDict.has_key(node.name):
self.nodeDict[node.name] = node
def visit_Struct(self, node):
self._add(node)
def visit_Enum(self, node):
self._add(node)
def visit_Typename(self, node):
self._add(node)
def visit_Typedef(self, node):
self._add(node)
TEMP_FILE="tmpsrc.h"
class SymbolImporter:
"""
Import C selected declarations from C source file and move it
to another file.
Parameters:
- listfile Path of file containing list of C source file
and identifier names to be imported. The format
of the listfile is:
filename name1 name2 name3
for example:
pj/sock_qos.h pj_qos_type pj_qos_flag
pj/types.h pj_status_t PJ_SUCCESS
"""
def __init__(self):
pass
def process(self, listfile, outfile):
# Read listfile
f = open(listfile)
lines = f.readlines()
f.close()
# Process each line in list file, while generating the
# temporary C file to be processed by pycparser
f = open(TEMP_FILE, "w")
f.write(C_HEADING_SECTION)
names = []
fcnt = 0
for line in lines:
spec = line.split()
if len(spec) < 2:
continue
fcnt += 1
f.write("#include <%s>\n" % spec[0])
names.extend(spec[1:])
f.close()
print 'Parsing %d symbols from %d files..' % (len(names), fcnt)
# Parse the temporary C file
ast = pycparser.parse_file(TEMP_FILE, use_cpp=True, cpp_path=CPP_PATH, cpp_args=CPP_CFLAGS)
os.remove(TEMP_FILE)
# Filter the declarations that we wanted
print 'Filtering..'
visitor = SymbolVisitor(names)
visitor.visit(ast)
# Print symbol declarations to outfile
print 'Writing declarations..'
f = open(outfile, 'w')
f.write("// This file is autogenerated by importsym script, do not modify!\n\n")
gen = pycparser.c_generator.CGenerator()
for name in names:
node = visitor.nodeDict[name]
if not node:
print " ** Warning: declaration for '%s' is not found **" % k
else:
print " writing '%s'.." % name
output = gen.visit(node) + ";\n\n"
f.write(output)
f.close()
print "Done."
if __name__ == "__main__":
print "Importing symbols: 'symbols.lst' --> 'symbols.i'"
si = SymbolImporter()
si.process("symbols.lst", "symbols.i")
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("yacctab.py")
except OSError:
pass
|
StrikeForceZero/PJSip-CSharp
|
pjsip-apps/src/swig/importsym.py
|
Python
|
gpl-2.0
| 5,109
|
[
"VisIt"
] |
868cfe009cbb37e83a51ca0db8afa1d23ebcb200087f83712a668b693321198d
|
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Slovak-specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from gprime.lib import Person
import gprime.relationship
#-------------------------------------------------------------------------
#
#GRAMPS 3.x - Slovak-specific terms by Lubo Vasko
#
#-------------------------------------------------------------------------
# hĺbka použitá pre označenie / zistenie vzťahov od genenácie :
# ku generácii,
_level_name = [ "prvého", "druhého", "tretieho", "štvrtého", "piateho", "šiesteho",
"siedmeho", "ôsmeho", "deviateho", "desiateho", "jedenásteho", "dvanásteho",
"trinásteho", "štrnásteho", "pätnásteho", "šestnásteho",
"sedemnásteho", "osemnásteho", "devätnásteho", "dvadsiateho", "dvadsiatehoprvého", "dvadsiatehodruhého",
"dvadsiatehotretieho", "dvadsiatehoštvrtého","dvadsiatehopiateho","dvadsiatehošiesteho","dvadsiatehosiedmeho",
"dvadsiatehoôsmeho","dvadsiatehodeviateho","tridsiateho" ]
# vzdialení príbuzní
_removed_level = [ "prvého", "druhého", "tretieho", "štvrtého", "piateho", "šiesteho",
"siedmeho", "ôsmeho", "deviateho", "desiateho", "jedenásteho", "dvanásteho",
"trinásteho", "štrnásteho", "pätnásteho", "šestnásteho",
"sedemnásteho", "osemnásteho", "devätnásteho", "dvadsiateho", "dvadsiatehoprvého", "dvadsiatehodruhého",
"dvadsiatehotretieho", "dvadsiatehoštvrtého","dvadsiatehopiateho","dvadsiatehošiesteho","dvadsiatehosiedmeho",
"dvadsiatehoôsmeho","dvadsiatehodeviateho","tridsiateho" ]
# small lists, use generation level if > [5]
_father_level = [ "", "otec%s", "starý otec%s", "prastarý otec%s", "prapredok%s", ]
_mother_level = [ "", "matka%s", "stará matka%s",
"prastará matka%s", "prapredok%s", ]
_son_level = [ "", "syn%s", "vnuk%s", "pravnuk%s", ]
_daughter_level = [ "", "dcéra%s", "vnučka%s",
"pravnučka%s", ]
_sister_level = [ "", "sestra%s", "teta%s", "prateta%s", "praprateta%s", ]
_brother_level = [ "", "brat%s", "strýko%s", "prastrýko%s", "praprastrýko%s", ]
_nephew_level = [ "", "synovec%s", "prasynovec%s", "praprasynovec%s", ]
_niece_level = [ "", "neter%s", "praneter%s", "prapraneter%s", ]
# kinship report
_parents_level = [ "", "rodičia", "starí rodičia",
"prastarí rodičia", "predkovia", ]
_children_level = [ "", "deti", "vnúčatá",
"pravnúčatá",
"pra-pravnúčatá", ]
_siblings_level = [ "", "bratia a sestry",
"strýkovia a tety",
"prastrýkovia a pratety",
"pra-prastrýkovia a pra-pratety",
]
_nephews_nieces_level = [ "", "synovci a netere",
"prasynovci a pranetere",
"pra-prasynovci a pra-pranetere",
]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
INLAW = ' (m. zväzok)'
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
# od aktívnej osoby vzhľadom k spoločnému predkovi Ga=[level]
# pre vyhodnotenie vzťahov
def get_cousin(self, level, removed, dir = '', inlaw=''):
if removed == 0 and level < len(_level_name):
return "bratranec %s %sstupňa" % (_removed_level[level-1],
inlaw)
elif (level) < (removed):
rel_str = self.get_uncle(level-1, inlaw)
else:
# limitation gen = 29
return "vzdialený bratranec, spojený s %s generáciou" % (
_level_name[removed])
def get_cousine(self, level, removed, dir = '', inlaw=''):
if removed == 0 and level < len(_level_name):
return "sesternica %s %sstupňa" % (_level_name[level-1],
inlaw)
elif (level) < (removed):
rel_str = self.get_aunt(level-1, inlaw)
else:
return "vzdialená sesternica, spojená s %s generáciou" % (
_level_name[removed])
def get_parents(self, level):
if level > len(_parents_level)-1:
return "vzdialení predkovia z %s generácie" % (
_level_name[level])
else:
return _parents_level[level]
def get_father(self, level, inlaw=''):
if level > len(_father_level)-1:
return "vzdialený predok z %s generácie" % (
_level_name[level])
else:
return _father_level[level] % inlaw
def get_mother(self, level, inlaw=''):
if level > len(_mother_level)-1:
return "vzdialený príbuzný, predok z %s generácie" % (
_level_name[level])
else:
return _mother_level[level] % inlaw
def get_parent_unknown(self, level, inlaw=''):
if level > len(_level_name)-1:
return "vzdialený príbuzný, predok z %s generácie" % (
_level_name[level])
else:
return "vzdialený príbuzný%s" % (inlaw)
def get_son(self, level, inlaw=''):
if level > len(_son_level)-1:
return "vzdialený potomok z %s generácie" % (
_level_name[level+1])
else:
return _son_level[level] % (inlaw)
def get_daughter(self, level, inlaw=''):
if level > len(_daughter_level)-1:
return "vzdialený potomok z %s generácie" % (
_level_name[level+1])
else:
return _daughter_level[level] % (inlaw)
def get_child_unknown(self, level, inlaw=''):
if level > len(_level_name)-1:
return "vzdialený potomok z %s generácie" % (
_level_name[level+1])
else:
return "vzdialený potomok%s" % (inlaw)
def get_sibling_unknown(self, level, inlaw=''):
return "vzdialený príbuzný%s" % (inlaw)
def get_uncle(self, level, inlaw=''):
if level > len(_brother_level)-1:
return "vzdialený strýko z %s generácie" % (
_level_name[level])
else:
return _brother_level[level] % (inlaw)
def get_aunt(self, level, inlaw=''):
if level > len(_sister_level)-1:
return "vzdialená teta z %s generácie" % (
_level_name[level])
else:
return _sister_level[level] % (inlaw)
def get_nephew(self, level, inlaw=''):
if level > len(_nephew_level)-1:
return "vzdialený synovec z %s generácie" % (
_level_name[level])
else:
return _nephew_level[level] % (inlaw)
def get_niece(self, level, inlaw=''):
if level > len(_niece_level)-1:
return "vzdialená neter z %s generácie" % (
_level_name[level])
else:
return _niece_level[level] % (inlaw)
# kinship report
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""
see relationship.py
"""
rel_str = "vzdialení príbuzní"
gen = " z %s-ej generácie"
bygen = " na %-u generáciu"
cmt = " (bratia alebo sestry predka" + gen % (
Ga) + ")"
if Ga == 0:
# These are descendants
if Gb < len(_children_level):
rel_str = _children_level[Gb]
else:
rel_str = "potomkovia" + gen % (
Gb+1)
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_parents_level):
rel_str = _parents_level[Ga]
else:
rel_str = "predkovia" + gen % (
Ga+1)
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_siblings_level):
rel_str = _siblings_level[Ga]
else:
rel_str = "deti predka" + gen % (
Ga+1) + cmt
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_nephews_nieces_level):
rel_str = _nephews_nieces_level[Gb-1]
else:
rel_str = "synovci a netere" + gen % (
Gb)
elif Ga > 1 and Ga == Gb:
# These are cousins in the same generation
# use custom level for latin words
if Ga == 2:
rel_str = "vlastní bratranci a sesternice"
elif Ga <= len(_level_name):
# %ss for plural
rel_str = " %ss bratranci a sesternice" % _level_name[Ga-2]
# security
else:
rel_str = "bratranci a sesternice"
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
# use custom level for latin words and specific relation
if Ga == 3 and Gb == 2:
desc = " (vlastní bratranci niektorého z rodičov)"
rel_str = "strýkovia a tety z ďalšieho kolena" + desc
elif Gb <= len(_level_name) and (Ga-Gb) < len(_removed_level) and (Ga+Gb+1) < len(_removed_level):
can = " z %s do %s stupňa (kan.)" % (
_removed_level[Gb], _removed_level[Ga] )
civ = " a do %s stupňa (civ.)" % ( _removed_level[Ga+Gb+1] )
rel_str = "strýkovia a tety" + can + civ
elif Ga < len(_level_name):
rel_str = "prastrýkovia a pratety" + bygen % (
Ga+1)
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
# use custom level for latin words and specific relation
if Ga == 2 and Gb == 3:
info = " (potomok bratranca-sesternice)"
rel_str = "synovci a netere z ďalšieho kolena" + info
elif Ga <= len(_level_name) and (Gb-Ga) < len(_removed_level) and (Ga+Gb+1) < len(_removed_level):
can = " z %s do %s stupňa (kan.)" % (
_removed_level[Gb], _removed_level[Ga] )
civ = " a do %s stupňa (civ.)" % ( _removed_level[Ga+Gb+1] )
rel_str = "synovci a netere" + can + civ
elif Ga < len(_level_name):
rel_str = "synovci a netere" + bygen % (
Gb)
if in_law_b == True:
# TODO: Translate this!
rel_str = "spouses of %s" % rel_str
return rel_str
# quick report (missing on RelCalc tool - Status Bar)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
"""
see relationship.py
"""
if only_birth:
step = ''
else:
step = self.STEP
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
rel_str = "vzdialený príbuznýs%s" % (inlaw)
bygen = " z %s generácie"
if Ga == 0:
# b is descendant of a
if Gb == 0 :
rel_str = 'tá istá osoba'
elif gender_b == Person.MALE and Gb < len(_son_level):
# spouse of daughter
if inlaw and Gb == 1 and not step:
rel_str = "zať"
else:
rel_str = self.get_son(Gb)
elif gender_b == Person.FEMALE and Gb < len(_daughter_level):
# spouse of son
if inlaw and Gb == 1 and not step:
rel_str = "nevesta"
else:
rel_str = self.get_daughter(Gb)
# don't display inlaw
elif Gb < len(_level_name) and gender_b == Person.MALE:
rel_str = "vzdialený potomok (%d generácia)" % (
Gb+1)
elif Gb < len(_level_name) and gender_b == Person.FEMALE:
rel_str = "vzdialený potomok(žena) (%d generácia)" % (
Gb+1)
else:
return self.get_child_unknown(Gb)
elif Gb == 0:
# b is parents/grand parent of a
if gender_b == Person.MALE and Ga < len(_father_level):
# other spouse of father (new parent)
if Ga == 1 and inlaw and self.STEP_SIB:
rel_str = "svokor"
# father of spouse (family of spouse)
elif Ga == 1 and inlaw:
rel_str = "otec partnera"
else:
rel_str = self.get_father(Ga, inlaw)
elif gender_b == Person.FEMALE and Ga < len(_mother_level):
# other spouse of mother (new parent)
if Ga == 1 and inlaw and self.STEP_SIB:
rel_str = "svokra"
# mother of spouse (family of spouse)
elif Ga == 1 and inlaw:
rel_str = "matka partnera"
else:
rel_str = self.get_mother(Ga, inlaw)
elif Ga < len(_level_name) and gender_b == Person.MALE:
rel_str = "vzdialený predok%s (%d generácia)" % (
inlaw, Ga+1)
elif Ga < len(_level_name) and gender_b == Person.FEMALE:
rel_str = "vzdialený predok(žena)%s (%d generácia)" % (
inlaw, Ga+1)
else:
return self.get_parent_unknown(Ga, inlaw)
elif Gb == 1:
# b is sibling/aunt/uncle of a
if gender_b == Person.MALE and Ga < len(_brother_level):
rel_str = self.get_uncle(Ga, inlaw)
elif gender_b == Person.FEMALE and Ga < len(_sister_level):
rel_str = self.get_aunt(Ga, inlaw)
else:
# don't display inlaw
if gender_b == Person.MALE:
rel_str = "vzdialený strýko" + bygen % (
Ga+1)
elif gender_b == Person.FEMALE:
rel_str = "vzdialená teta" + bygen % (
Ga+1)
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga == 1:
# b is niece/nephew of a
if gender_b == Person.MALE and Gb < len(_nephew_level):
rel_str = self.get_nephew(Gb-1, inlaw)
elif gender_b == Person.FEMALE and Gb < len(_niece_level):
rel_str = self.get_niece(Gb-1, inlaw)
else:
if gender_b == Person.MALE:
rel_str = "vzdialený synovec%s (%d generácia)" % (
inlaw, Gb)
elif gender_b == Person.FEMALE:
rel_str = "vzdialená neter%s (%d generácia)" % (
inlaw, Gb)
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga == Gb:
# a and b cousins in the same generation
if gender_b == Person.MALE:
rel_str = self.get_cousin(Ga-1, 0, dir = '',
inlaw=inlaw)
elif gender_b == Person.FEMALE:
rel_str = self.get_cousine(Ga-1, 0, dir = '',
inlaw=inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga-1, inlaw)
else:
return rel_str
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
if Ga == 3 and Gb == 2:
if gender_b == Person.MALE:
desc = " (bratranec niektorého z rodičov)"
rel_str = "strýko z druhého kolena" + desc
elif gender_b == Person.FEMALE:
desc = " (sesternica niektorého z rodičov)"
rel_str = "teta z druhého kolena" + desc
elif gender_b == Person.UNKNOWN:
return self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Gb <= len(_level_name) and (Ga-Gb) < len(_removed_level) and (Ga+Gb+1) < len(_removed_level):
can = " z %s do %s stupňa (kan.)" % (
_removed_level[Gb], _removed_level[Ga] )
civ = " a do %s stupňa (civ.)" % ( _removed_level[Ga+Gb+1] )
if gender_b == Person.MALE:
rel_str = "strýko" + can + civ
elif gender_b == Person.FEMALE:
rel_str = "teta" + can + civ
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
else:
if gender_b == Person.MALE:
rel_str = self.get_uncle(Ga, inlaw)
elif gender_b == Person.FEMALE:
rel_str = self.get_aunt(Ga, inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
if Ga == 2 and Gb == 3:
info = " (potomok bratranca/sesternice)"
if gender_b == Person.MALE:
rel_str = "synovec z druhého kolena" + info
elif gender_b == Person.FEMALE:
rel_str = "neter z druhého kolena" + info
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga <= len(_level_name) and (Gb-Ga) < len(_removed_level) and (Ga+Gb+1) < len(_removed_level):
can = " z %s do %s stupňa (kan.)" % (
_removed_level[Gb], _removed_level[Ga] )
civ = " a do %s stupňa (civ.)" % ( _removed_level[Ga+Gb+1] )
if gender_b == Person.MALE:
rel_str = "synovec" + can + civ
if gender_b == Person.FEMALE:
rel_str = "neter" + can + civ
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga > len(_level_name):
return rel_str
else:
if gender_b == Person.MALE:
rel_str = self.get_nephew(Ga, inlaw)
elif gender_b ==Person.FEMALE:
rel_str = self.get_niece(Ga, inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = self.get_sibling_unknown(Ga, inlaw)
else:
return rel_str
return rel_str
# RelCalc tool - Status Bar
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
if sib_type == self.NORM_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = 'brat (vlastný)'
elif gender_b == Person.FEMALE:
rel_str = 'sestra (vlastná)'
else:
rel_str = 'vlastný brat alebo sestra'
else:
if gender_b == Person.MALE:
rel_str = "švagor"
elif gender_b == Person.FEMALE:
rel_str = "švagriná"
else:
rel_str = "švagor alebo švagriná"
elif sib_type == self.UNKNOWN_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = 'brat'
elif gender_b == Person.FEMALE:
rel_str = 'sestra'
else:
rel_str = 'brat alebo sestra'
else:
if gender_b == Person.MALE:
rel_str = "švagor"
elif gender_b == Person.FEMALE:
rel_str = "švagriná"
else:
rel_str = "švagor alebo švagriná"
# oznacenie vyberu spolocny otec, rev.
elif sib_type == self.HALF_SIB_MOTHER:
if gender_b == Person.MALE:
rel_str = "nevlastný brat -spoloč.otec"
elif gender_b == Person.FEMALE:
rel_str = "nevlastná sestra -spoloč.otec"
else:
rel_str = "nevlastný brat alebo sestra -spoloč.otec"
# oznacenie vyberu spolocna matka, rev.
elif sib_type == self.HALF_SIB_FATHER:
if gender_b == Person.MALE:
rel_str = "nevlastný brat -spoloč.matka"
elif gender_b == Person.FEMALE:
rel_str = "nevlastná sestra -spoloč.matka"
else:
rel_str = "nevlastný brat alebo sestra -spoloč.matka"
elif sib_type == self.STEP_SIB:
if gender_b == Person.MALE:
rel_str = "nevlastný brat"
elif gender_b == Person.FEMALE:
rel_str = "nevlastná sestra"
else:
rel_str = "nevlastný brat alebo sestra"
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_sk.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gprime.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
sam-m888/gprime
|
gprime/plugins/rel/rel_sk.py
|
Python
|
gpl-2.0
| 24,614
|
[
"Brian"
] |
dd773bf0f274203e61d42015b26ac00bdc59c00a6a25fe8ab31b0bd0296fe703
|
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2008 Sun Microsystems Inc."
__license__ = "LGPL"
# Whether we speak spread sheet cell coordinates as the user moves around.
#
speakSpreadsheetCoordinates = True
# Whether or not to use the structrual navigation commands (e.g. H
# for heading, T for table, and so on). At the moment, only table
# navigation commands will be enabled.
#
structuralNavigationEnabled = True
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/apps/soffice/script_settings.py
|
Python
|
lgpl-2.1
| 1,269
|
[
"ORCA"
] |
20218ab9751dbaa0109c772ab8d01f729252cc30f7f66e87bfdc39e2d0f43a89
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
from scipy import ndimage
import os
import sys
sys.path.append("../..")
# try to import the PIL Image
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.pyplot as plt
import textwrap
import logging
from facerec.feature import SpatialHistogram
from facerec.distance import ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.lbp import LPQ, ExtendedLBP
from facerec.validation import SimpleValidation, precision
from facerec.util import shuffle_array
EXPERIMENT_NAME = "LocalPhaseQuantizationExperiment"
# ITER_MAX is the number of experimental runs, as described in the
# original paper. For testing purposes, it was set to 1, but it
# should be set to a higher value to get at least a little confidence
# in the results.
ITER_MAX = 1
class FileNameFilter:
"""
Base class used for filtering files.
"""
def __init__(self, name):
self._name = name
def __call__(self, filename):
return True
def __repr__(self):
return "FileNameFilter (name=%s)" % (self._name)
class YaleBaseFilter(FileNameFilter):
"""
This Filter filters files, based on their filetype ending (.pgm) and
their azimuth and elevation. The higher the angle, the more shadows in
the face. This is useful for experiments with illumination and
preprocessing.
"""
def __init__(self, min_azimuth, max_azimuth, min_elevation, max_elevation):
FileNameFilter.__init__(self, "Filter YaleFDB Subset1")
self._min_azimuth = min_azimuth
self._max_azimuth = max_azimuth
self._min_elevation = min_elevation
self._max_elevation = max_elevation
def __call__(self, filename):
# We only want the PGM files:
filetype = filename[-4:]
if filetype != ".pgm":
return False
# There are "Ambient" PGM files, ignore them:
if "Ambient" in filename:
return False
azimuth = abs(int(filename[12:16]))
elevation = abs(int(filename[17:20]))
# Now filter based on angles:
if azimuth < self._min_azimuth or azimuth > self._max_azimuth:
return False
if elevation < self._min_elevation or elevation > self._max_elevation:
return False
return True
def __repr__(self):
return "Yale FDB Filter (min_azimuth=%s, max_azimuth=%s, min_elevation=%s, max_elevation=%s)" % (min_azimuth, max_azimuth, min_elevation, max_elevation)
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
if fileNameFilter(filename):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
def apply_gaussian(X, sigma):
"""A simple function to apply a Gaussian Blur on each image in X.
Args:
X: A list of images.
sigma: sigma to apply
Returns:
Y: The processed images
"""
return np.array([ndimage.gaussian_filter(x, sigma) for x in X])
def results_to_list(validation_results):
return [precision(result.true_positives,result.false_positives) for result in validation_results]
def partition_data(X, y):
"""
Shuffles the input data and splits it into a new set of images. This resembles the experimental setup
used in the paper on the Local Phase Quantization descriptor in:
"Recognition of Blurred Faces Using Local Phase Quantization", Timo Ahonen, Esa Rahtu, Ville Ojansivu, Janne Heikkila
What it does is to build a subset for each class, so it has 1 image for training and the rest for testing.
The original dataset is shuffled for each call, hence you always get a new partitioning.
"""
Xs,ys = shuffle_array(X,y)
# Maps index to class:
mapping = {}
for i in xrange(len(y)):
yi = ys[i]
try:
mapping[yi].append(i)
except KeyError:
mapping[yi] = [i]
# Get one image for each subject:
Xtrain, ytrain = [], []
Xtest, ytest = [], []
# Finally build partition:
for key, indices in mapping.iteritems():
# Add images:
Xtrain.extend([ Xs[i] for i in indices[:1] ])
ytrain.extend([ ys[i] for i in indices[:1] ])
Xtest.extend([ Xs[i] for i in indices[1:20]])
ytest.extend([ ys[i] for i in indices[1:20]])
# Return shuffled partitions:
return Xtrain, ytrain, Xtest, ytest
class ModelWrapper:
def __init__(model):
self.model = model
self.result = []
if __name__ == "__main__":
# This is where we write the results to, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print "USAGE: lpq_experiment.py </path/to/images>"
sys.exit()
# Define filters for the Dataset:
yale_subset_0_40 = YaleBaseFilter(0, 40, 0, 40)
# Now read in the image data. Apply filters, scale to 128 x 128 pixel:
[X,y] = read_images(sys.argv[1], yale_subset_0_40, sz=(64,64))
# Set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# The models we want to evaluate:
model0 = PredictableModel(feature=SpatialHistogram(lbp_operator=ExtendedLBP()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
model1 = PredictableModel(feature=SpatialHistogram(lbp_operator=LPQ()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
# The sigmas we'll apply for each run:
sigmas = [0]
print 'The experiment will be run %s times!' % ITER_MAX
# Initialize experiments (with empty results):
experiments = {}
experiments['lbp_model'] = { 'model': model0, 'results' : {}, 'color' : 'r', 'linestyle' : '--', 'marker' : '*'}
experiments['lpq_model'] = { 'model': model1, 'results' : {}, 'color' : 'b', 'linestyle' : '--', 'marker' : 's'}
# Loop to acquire the results for each experiment:
for sigma in sigmas:
print "Setting sigma=%s" % sigma
for key, value in experiments.iteritems():
print 'Running experiment for model=%s' % key
# Define the validators for the model:
cv0 = SimpleValidation(value['model'])
for iteration in xrange(ITER_MAX):
print "Repeating experiment %s/%s." % (iteration + 1, ITER_MAX)
# Split dataset according to the papers description:
Xtrain, ytrain, Xtest, ytest = partition_data(X,y)
# Apply a gaussian blur on the images:
Xs = apply_gaussian(Xtest, sigma)
# Run each validator with the given data:
experiment_description = "%s (iteration=%s, sigma=%.2f)" % (EXPERIMENT_NAME, iteration, sigma)
cv0.validate(Xtrain, ytrain, Xs, ytest, experiment_description)
# Get overall results:
true_positives = sum([validation_result.true_positives for validation_result in cv0.validation_results])
false_positives = sum([validation_result.false_positives for validation_result in cv0.validation_results])
# Calculate overall precision:
prec = precision(true_positives,false_positives)
# Store the result:
print key
experiments[key]['results'][sigma] = prec
# Make a nice plot of this textual output:
fig = plt.figure()
# Holds the legend items:
plot_legend = []
# Add the Validation results:
for experiment_name, experiment_definition in experiments.iteritems():
print key, experiment_definition
results = experiment_definition['results']
(xvalues, yvalues) = zip(*[(k,v) for k,v in results.iteritems()])
# Add to the legend:
plot_legend.append(experiment_name)
# Put the results into the plot:
plot_color = experiment_definition['color']
plot_linestyle = experiment_definition['linestyle']
plot_marker = experiment_definition['marker']
plt.plot(sigmas, yvalues, linestyle=plot_linestyle, marker=plot_marker, color=plot_color)
# Put the legend below the plot (TODO):
plt.legend(plot_legend, prop={'size':6}, numpoints=1, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=True, ncol=1)
# Scale y-axis between 0,1 to see the Precision:
plt.ylim(0,1)
plt.xlim(-0.2, max(sigmas) + 1)
# Finally add the labels:
plt.title(EXPERIMENT_NAME)
plt.ylabel('Precision')
plt.xlabel('Sigma')
fig.subplots_adjust(bottom=0.5)
# Save the gifure and we are out of here!
plt.savefig("lpq_experiment.png", bbox_inches='tight',dpi=100)
|
dashmoment/facerecognition
|
py/apps/scripts/lpq_experiment.py
|
Python
|
bsd-3-clause
| 10,667
|
[
"Gaussian"
] |
cba7bdfd8de70a9ea82d4d582369e7327373c7cb4626049996b014aec8954967
|
#
# CPM modulation and demodulation.
#
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
from gnuradio import gr
from gnuradio import modulation_utils
from math import pi
import numpy
from pprint import pprint
import inspect
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bits_per_symbol = 1
_def_h_numerator = 1
_def_h_denominator = 2
_def_cpm_type = 0 # 0=CPFSK, 1=GMSK, 2=RC, 3=GENERAL
_def_bt = 0.35
_def_symbols_per_pulse = 1
_def_generic_taps = numpy.empty(1)
_def_verbose = False
_def_log = False
# /////////////////////////////////////////////////////////////////////////////
# CPM modulator
# /////////////////////////////////////////////////////////////////////////////
class cpm_mod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bits_per_symbol=_def_bits_per_symbol,
h_numerator=_def_h_numerator,
h_denominator=_def_h_denominator,
cpm_type=_def_cpm_type,
bt=_def_bt,
symbols_per_pulse=_def_symbols_per_pulse,
generic_taps=_def_generic_taps,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for Continuous Phase
modulation.
The input is a byte stream (unsigned char)
representing packed bits and the
output is the complex modulated signal at baseband.
See Proakis for definition of generic CPM signals:
s(t)=exp(j phi(t))
phi(t)= 2 pi h int_0^t f(t') dt'
f(t)=sum_k a_k g(t-kT)
(normalizing assumption: int_0^infty g(t) dt = 1/2)
@param samples_per_symbol: samples per baud >= 2
@type samples_per_symbol: integer
@param bits_per_symbol: bits per symbol
@type bits_per_symbol: integer
@param h_numerator: numerator of modulation index
@type h_numerator: integer
@param h_denominator: denominator of modulation index (numerator and denominator must be relative primes)
@type h_denominator: integer
@param cpm_type: supported types are: 0=CPFSK, 1=GMSK, 2=RC, 3=GENERAL
@type cpm_type: integer
@param bt: bandwidth symbol time product for GMSK
@type bt: float
@param symbols_per_pulse: shaping pulse duration in symbols
@type symbols_per_pulse: integer
@param generic_taps: define a generic CPM pulse shape (sum = samples_per_symbol/2)
@type generic_taps: array of floats
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modulation data to files?
@type debug: bool
"""
gr.hier_block2.__init__("cpm_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._bits_per_symbol = bits_per_symbol
self._h_numerator = h_numerator
self._h_denominator = h_denominator
self._cpm_type = cpm_type
self._bt=bt
if cpm_type == 0 or cpm_type == 2 or cpm_type == 3: # CPFSK, RC, Generic
self._symbols_per_pulse = symbols_per_pulse
elif cpm_type == 1: # GMSK
self._symbols_per_pulse = 4
else:
raise TypeError, ("cpm_type must be an integer in {0,1,2,3}, is %r" % (cpm_type,))
self._generic_taps=numpy.array(generic_taps)
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
self.nsymbols = 2**bits_per_symbol
self.sym_alphabet=numpy.arange(-(self.nsymbols-1),self.nsymbols,2)
self.ntaps = self._symbols_per_pulse * samples_per_symbol
sensitivity = 2 * pi * h_numerator / h_denominator / samples_per_symbol
# Unpack Bytes into bits_per_symbol groups
self.B2s = gr.packed_to_unpacked_bb(bits_per_symbol,gr.GR_MSB_FIRST)
# Turn it into symmetric PAM data.
self.pam = gr.chunks_to_symbols_bf(self.sym_alphabet,1)
# Generate pulse (sum of taps = samples_per_symbol/2)
if cpm_type == 0: # CPFSK
self.taps= (1.0/self._symbols_per_pulse/2,) * self.ntaps
elif cpm_type == 1: # GMSK
gaussian_taps = gr.firdes.gaussian(
1.0/2, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
self.ntaps # number of taps
)
sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(gaussian_taps),numpy.array(sqwave))
elif cpm_type == 2: # Raised Cosine
# generalize it for arbitrary roll-off factor
self.taps = (1-numpy.cos(2*pi*numpy.arange(0,self.ntaps)/samples_per_symbol/self._symbols_per_pulse))/(2*self._symbols_per_pulse)
elif cpm_type == 3: # Generic CPM
self.taps = generic_taps
else:
raise TypeError, ("cpm_type must be an integer in {0,1,2,3}, is %r" % (cpm_type,))
self.filter = gr.interp_fir_filter_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = gr.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect
self.connect(self, self.B2s, self.pam, self.filter, self.fmmod, self)
#def samples_per_symbol(self):
#return self._samples_per_symbol
#def bits_per_symbol(self):
#return self._bits_per_symbol
#def h_numerator(self):
#return self._h_numerator
#def h_denominator(self):
#return self._h_denominator
#def cpm_type(self):
#return self._cpm_type
#def bt(self):
#return self._bt
#def symbols_per_pulse(self):
#return self._symbols_per_pulse
def _print_verbage(self):
print "Samples per symbol = %d" % self._samples_per_symbol
print "Bits per symbol = %d" % self._bits_per_symbol
print "h = " , self._h_numerator , " / " , self._h_denominator
print "Symbol alphabet = " , self.sym_alphabet
print "Symbols per pulse = %d" % self._symbols_per_pulse
print "taps = " , self.taps
print "CPM type = %d" % self._cpm_type
if self._cpm_type == 1:
print "Gaussian filter BT = %.2f" % self._bt
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.B2s,
gr.file_sink(gr.sizeof_float, "symbols.dat"))
self.connect(self.pam,
gr.file_sink(gr.sizeof_float, "pam.dat"))
self.connect(self.filter,
gr.file_sink(gr.sizeof_float, "filter.dat"))
self.connect(self.fmmod,
gr.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
def add_options(parser):
"""
Adds CPM modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(cpm_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# CPM demodulator
# /////////////////////////////////////////////////////////////////////////////
#
# Not yet implemented
#
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('cpm', cpm_mod)
#modulation_utils.add_type_1_demod('cpm', cpm_demod)
|
UpYou/relay
|
my_gnuradio/blks2impl/cpm.py
|
Python
|
gpl-3.0
| 8,851
|
[
"Gaussian"
] |
d5fed84ef220418afe4f24aabcb361617d3f3de18b9a242d61655825326b826d
|
# Twisted Imports
from twisted.internet import reactor, defer
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.ssl import ClientContextFactory
from twisted.python import log
# System Imports
from urllib import urlencode
# Sibling Imports
import util as notifier_util
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class ClockworkSMS (object):
def __init__ (self, api_key):
contextFactory = WebClientContextFactory()
self.agent = Agent(reactor, contextFactory)
self._api_key = api_key
def notify (self, destination, message):
destinations = destination.split(",")
if len(destinations) > 50:
log.msg("Max 50 SMS recipients allowed")
params = {
"key": self._api_key,
"to": destination,
"content": message.encode("utf_8", "replace")
}
uri = "https://api.clockworksms.com/http/send.aspx?{:s}"
d = self.agent.request(
"GET",
uri.format(urlencode(params)),
Headers({
'User-Agent': ['octopus'],
}),
None
)
def handle_response(response):
d = defer.Deferred()
response.deliverBody(notifier_util.SimpleReceiver(d))
return d
d.addCallback(handle_response)
return d
|
rasata/octopus
|
octopus/notifier/sms.py
|
Python
|
mit
| 1,283
|
[
"Octopus"
] |
0f1955b39f22b7f83b2fb2e9cb70803af1de69bc7b7fe8cfee4e0ab41f817e4c
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import espressomd
import espressomd.magnetostatics as magnetostatics
import espressomd.magnetostatic_extensions as magnetostatic_extensions
import numpy as np
import unittest as ut
import unittest_decorators as utx
from tests_common import abspath
@utx.skipIfMissingFeatures(["DIPOLES", "FFTW"])
class Dipolar_p3m_mdlc_p2nfft(ut.TestCase):
"""Tests mdlc (2d) as well as dipolar p3m and dipolar p2nfft (3d) against
stored data. Validity of the stored data:
2d: as long as this test AND the scafacos_dipolar_1d_2d test passes, we are safe.
3d: as long as the independently written p3m and p2nfft agree, we are safe.
"""
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.time_step = 0.01
s.cell_system.skin = .4
s.periodicity = [1, 1, 1]
s.thermostat.turn_off()
def test_mdlc(self):
s = self.s
s.part.clear()
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
f = open(abspath("data/mdlc_reference_data_energy.dat"))
ref_E = float(f.readline())
f.close()
# Particles
data = np.genfromtxt(
abspath("data/mdlc_reference_data_forces_torques.dat"))
for p in data[:, :]:
s.part.add(id=int(p[0]), pos=p[1:4], dip=p[4:7])
s.part[:].rotation = (1, 1, 1)
p3m = magnetostatics.DipolarP3M(prefactor=1, mesh=32, accuracy=1E-4)
dlc = magnetostatic_extensions.DLC(maxPWerror=1E-5, gap_size=2.)
s.actors.add(p3m)
s.actors.add(dlc)
s.thermostat.turn_off()
s.integrator.run(0)
err_f = np.sum(np.linalg.norm(
s.part[:].f - data[:, 7:10], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(
s.part[:].torque_lab - data[:, 10:13], axis=1)) / np.sqrt(data.shape[0])
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
del s.actors[0]
def test_p3m(self):
s = self.s
s.part.clear()
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(abspath("data/p3m_magnetostatics_system.data"))
for p in data[:, :]:
s.part.add(id=int(p[0]), pos=p[1:4], dip=p[4:7])
s.part[:].rotation = (1, 1, 1)
p3m = magnetostatics.DipolarP3M(
prefactor=1, mesh=32, accuracy=1E-6, epsilon="metallic")
s.actors.add(p3m)
s.integrator.run(0)
expected = np.genfromtxt(
abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = np.sum(np.linalg.norm(
s.part[:].f - expected[:, 0:3], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(
s.part[:].torque_lab - expected[:, 3:6], axis=1)) / np.sqrt(data.shape[0])
ref_E = 5.570
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
@utx.skipIfMissingFeatures("SCAFACOS_DIPOLES")
def test_scafacos_dipoles(self):
s = self.s
s.part.clear()
rho = 0.09
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 1000
particle_radius = 1
box_l = pow(((4 * n_particle * np.pi) / (3 * rho)),
1.0 / 3.0) * particle_radius
s.box_l = 3 * [box_l]
# Particles
data = np.genfromtxt(abspath("data/p3m_magnetostatics_system.data"))
for p in data[:, :]:
s.part.add(id=int(p[0]), pos=p[1:4],
dip=p[4:7], rotation=(1, 1, 1))
scafacos = magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "32,32,32",
"pnfft_n": "32,32,32",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "11",
"p2nfft_alpha": "0.31"})
s.actors.add(scafacos)
s.integrator.run(0)
expected = np.genfromtxt(
abspath("data/p3m_magnetostatics_expected.data"))[:, 1:]
err_f = np.sum(np.linalg.norm(
s.part[:].f - expected[:, 0:3], axis=1)) / np.sqrt(data.shape[0])
err_t = np.sum(np.linalg.norm(
s.part[:].torque_lab - expected[:, 3:6], axis=1)) / np.sqrt(data.shape[0])
ref_E = 5.570
err_e = s.analysis.energy()["dipolar"] - ref_E
print("Energy difference", err_e)
print("Force difference", err_f)
print("Torque difference", err_t)
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(abs(err_f), tol_f, "Force difference too large")
s.part.clear()
del s.actors[0]
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/dipolar_mdlc_p3m_scafacos_p2nfft.py
|
Python
|
gpl-3.0
| 7,493
|
[
"ESPResSo"
] |
32554cdbc6c629793b25159a250711fad3a4f38717e6ee9500dac74b3c103dba
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to use a browser to visit multiple URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
import scrapers
import socket
import time
from drivers import windowing
# Constants
MAX_URL = 1024
PORT = 42492
def SetupIterationCommandLine(cmd):
"""Adds the necessary flags for iteration to a command.
Args:
cmd: an object created by cmdline.AddCommand
"""
cmd.AddArgument(
["-b", "--browser"], "Browser to use (ie, firefox, chrome)",
type="string", required=True)
cmd.AddArgument(
["-b1v", "--browserver"], "Version of browser", metaname="VERSION")
cmd.AddArgument(
["-p", "--browserpath"], "Path to browser.",
type="string", required=False)
cmd.AddArgument(
["-u", "--url"], "URL to visit")
cmd.AddArgument(
["-l", "--list"], "File containing list of URLs to visit", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=300)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
def Iterate(command, iteration_func):
"""Iterates over a list of URLs, calling a function on each.
Args:
command: the command line containing the iteration flags
iteration_func: called for each URL with (proc, wnd, url, result)
"""
# Retrieve the browser scraper to use to invoke the browser
scraper = scrapers.GetScraper((command["--browser"], command["--browserver"]))
def AttachToBrowser(path, timeout):
"""Invoke the browser process and connect to the socket."""
(proc, frame, wnd) = scraper.GetBrowser(path)
if not wnd: raise ValueError("Could not invoke browser.")
# Try to connect the socket. If it fails, wait and try
# again. Do this for ten seconds
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for attempt in xrange(10):
try:
s.connect(("localhost", PORT))
except socket.error:
time.sleep(1)
continue
break
try:
s.getpeername()
except socket.error:
raise ValueError("Could not connect to browser")
if command["--size"]:
# Resize and reposition the frame
windowing.MoveAndSizeWindow(frame, (0, 0), command["--size"], wnd)
s.settimeout(timeout)
Iterate.proc = proc
Iterate.wnd = wnd
Iterate.s = s
def DetachFromBrowser():
"""Close the socket and kill the process if necessary."""
if Iterate.s:
Iterate.s.close()
Iterate.s = None
if Iterate.proc:
if not windowing.WaitForProcessExit(Iterate.proc, 0):
try:
windowing.EndProcess(Iterate.proc)
windowing.WaitForProcessExit(Iterate.proc, 0)
except pywintypes.error:
# Exception here most likely means the process died on its own
pass
Iterate.proc = None
if command["--browserpath"]:
browser = command["--browserpath"]
else:
browser = None
# Read the URLs from the file
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = []
file = open(command["--list"], "r")
for line in xrange(startline-1):
file.readline()
for line in xrange(endline-startline):
url_list.append(file.readline().strip())
timeout = command["--timeout"]
# Loop through the URLs and send them through the socket
Iterate.s = None
Iterate.proc = None
Iterate.wnd = None
for url in url_list:
# Invoke the browser if necessary
if not Iterate.proc:
AttachToBrowser(browser, timeout)
# Send the URL and wait for a response
Iterate.s.send(url + "\n")
response = ""
while (response.find("\n") < 0):
try:
recv = Iterate.s.recv(MAX_URL)
response = response + recv
# Workaround for an oddity: when Firefox closes
# gracefully, somehow Python doesn't detect it.
# (Telnet does)
if not recv:
raise socket.error
except socket.timeout:
response = url + ",hang\n"
DetachFromBrowser()
except socket.error:
# If there was a socket error, it's probably a crash
response = url + ",crash\n"
DetachFromBrowser()
# If we received a timeout response, restart the browser
if response[-9:] == ",timeout\n":
DetachFromBrowser()
# Invoke the iteration function
iteration_func(url, Iterate.proc, Iterate.wnd, response)
# We're done
DetachFromBrowser()
|
Crystalnix/house-of-life-chromium
|
tools/site_compare/utils/browser_iterate.py
|
Python
|
bsd-3-clause
| 5,863
|
[
"VisIt"
] |
3e968f7877f9e27935ee8e846f3fd8c9d8e3365667f16b06c7d90d590b8f6f05
|
#!/usr/bin/env python3
"""
========================================================
Extract info on reads that align to a given region
in draft genome assembly.
========================================================
"""
try:
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.SeqIO.QualityIO import FastqGeneralIterator
import pysam
import argparse
import subprocess
import tarfile
import gzip
import sys,os
except ImportError:
print('Missing package(s)')
quit()
verbose = False
log = list()
def main():
# --------------------------------------------------------
# PART 0: Parse input
# --------------------------------------------------------
parser = argparse.ArgumentParser(description='Extract and package reads within region')
parser.add_argument('-v', '--verbose', action="store_true", default=False, required=False, dest="verbose", help="Use for verbose output with info on progress.")
parser.add_argument('-b', '--bam', action="store", required=True, dest="bam", help="Sorted bam file created by aligning reads to the draft genome (refer to reads.sorted.bam in Nanopolish README).")
parser.add_argument('-r', '--reads', action="store", dest="fa_filename", help="Fasta, fastq, fasta.gz, or fastq.gz file (refer to reads.fa in Nanopolish README)")
parser.add_argument('-g', '--genome', action="store", required=True, dest="draft_ga", help="Draft genome assembly (refer to draft.fa in Nanopolish README).")
parser.add_argument('-w', '--window', action="store", required=True, dest="draft_ga_coords", help="Draft genome assembly coordinates wrapped in quotes ex. \"tig000001:10000-20000\".")
parser.add_argument('-o', '--output_prefix', action="store", required=False, default="reads_subset", dest="output_prefix", help="Output prefix for tar.gz file and log file.")
args = parser.parse_args()
# Check to see if user used verbose option
global verbose
if args.verbose:
verbose = True
# Infer readdb file from fasta/q file
readdb = args.fa_filename + ".index.readdb"
custom_print( "===================================================" )
custom_print( "Extract reads that align to given region" )
custom_print( "Package all necessary files to reproduce error" )
custom_print( "===================================================" )
# --------------------------------------------------------
# PART 1: Validate input
# --------------------------------------------------------
custom_print( "[ Input ]" )
custom_print( "[+] Extracting from draft genome assembly coords: " + args.draft_ga_coords )
custom_print( "[+] BAM file (reads.fa aligned to draft.fa): " + args.bam )
custom_print( "[+] Readdb file: " + readdb )
custom_print( "[+] Draft genome assembly (draft.fa): " + args.draft_ga )
custom_print( "[+] FASTA/Q file (reads.fa): " + args.fa_filename )
custom_print( "[+] Output prefix: " + args.output_prefix )
custom_print( "[ Input check ]" )
files = list()
files.append(args.bam)
files.append(readdb)
files.append(args.fa_filename)
files.append(args.draft_ga)
draft_ga_fai = args.draft_ga + ".fai"
files.append(draft_ga_fai)
for i in files:
if not os.path.exists(i) or not os.path.getsize(i) > 0 or not os.access(i, os.R_OK):
print( "Expecting " + i + ". But does not exist, is empty or is not readable." )
sys.exit(1)
custom_print( "[ Validated input ] All input files exist, are not-empty, and are readable." )
# --------------------------------------------------------
# PART 2: Reassign input argument values
# --------------------------------------------------------
# o = old/original, ga = genome assembly, fa = fasta/q file
# coords = coordinates, op = output
o_bam = args.bam
o_readdb = readdb
o_fa = args.fa_filename
op = args.output_prefix
draft_ga_coords = args.draft_ga_coords
# --------------------------------------------------------
# PART 3: With user input ref coords, extract all
# aligned reads within these coordinates,
# store read_ids, and fast5 files.
# --------------------------------------------------------
custom_print( "[ Extracting info on reads aligned to region ] \t" + draft_ga_coords )
samfile = pysam.AlignmentFile(o_bam, "rb")
region_read_ids = list()
region_num_reads = 0
# get all read ids of reads that are aligned to region in draft assembly
for read in samfile.fetch(region=draft_ga_coords):
id = read.query_name
# add to list if not already in list
if not id in region_read_ids:
# store read id in list
region_read_ids.append(id)
# count number of reads that were aligned to the given region
region_num_reads+=1
# --------------------------------------------------------
# PART 4: Parse readdb file and find path to fast5 files
# associated with each read that aligned to region
# --------------------------------------------------------
# readdb file has 2 columns: one indicating read_id and another indicating the fast5 file the read came from
# each row represents a read
custom_print( "[ Reading readdb file ]" )
region_fast5_files = dict()
with open (o_readdb, "r") as file:
for line in file:
l = line.split("\t")
read_id = l.pop(0)
if read_id in region_read_ids:
fast5_file = l.pop(0)
region_fast5_files[str(read_id)] = fast5_file.rstrip()
# --------------------------------------------------------
# PART 5: Make a region BAM and BAI file
# --------------------------------------------------------
new_bam = "reads.bam"
custom_print( "[ Writing to a new BAM file ] \t" + new_bam )
region_reads = pysam.view("-b", o_bam, draft_ga_coords, "-o", new_bam, catch_stdout=False)
new_bam_index = new_bam + ".bai"
custom_print( "[ Writing to a new BAI file ] \t" + new_bam_index )
pysam.index(new_bam, new_bam_index)
# --------------------------------------------------------
# PART 6: With user input ref coords, extract all
# aligned reads within these coordinates
# and make new FASTA file
# --------------------------------------------------------
# detect type of sequences file then handle accordingly
file_type = detect_fa_filetype(o_fa)
new_fa = "reads.fasta"
custom_print( "[ Writing to a new fasta file ]\t" + new_fa )
with open (new_fa, "w") as fout:
if ".gz" in file_type:
with gzip.open(o_fa, "rt") as fin:
if "fasta.gz" in file_type:
for title, seq in SimpleFastaParser(fin):
name = title.split(None, 1)[0]
if name in region_read_ids:
fout.write(">%s\n%s\n" % (name, seq))
elif "fastq.gz" in file_type:
for title, seq, qual in FastqGeneralIterator(fin):
name = title.split(None, 1)[0]
if name in region_read_ids:
fout.write(">%s\n%s\n" % (name, seq))
else:
with open(o_fa, "rt") as fin:
if "fasta" in file_type:
for title, seq in SimpleFastaParser(fin):
name = title.split(None, 1)[0]
if name in region_read_ids:
fout.write(">%s\n%s\n" % (name, seq))
elif "fastq" in file_type:
for title, seq, qual in FastqGeneralIterator(fin):
name = title.split(None, 1)[0]
if name in region_read_ids:
fout.write(">%s\n%s\n" % (name, seq))
# --------------------------------------------------------
# PART 7: Let's get to tarring
# --------------------------------------------------------
# While tarring, we need to fix the directory structure
# such that the original path to files are not saved.
# For each fast5 file we need to extract the basename,
# and save it in tar such that we save only the basename,
# and not the whole path from the original source.
tar_filename = op + ".tar.gz"
archive = tarfile.open(tar_filename, "w:gz")
custom_print( "[ Creating a tar.gz file ] \t" + tar_filename )
custom_print( "[+] FAST5 files: " + op + "/fast5_files/<FAST5 file(s)>" )
# track missing fast5 files
bad_f5_found = False # true if missing fast5 file
bad_read_id = ""
bad_f5_path = ""
num_bad_cases = 0
for r in list(region_fast5_files.keys()):
read_id = r
f5 = region_fast5_files[r]
# get basename of fast5 file
f5_basename = extract_basename(f5)
an = op + "/fast5_files/" + f5_basename
try:
archive.add(f5, arcname=an)
except:
bad_f5_found = True
bad_read_id = read_id
bad_f5_path = f5
num_bad_cases += 1
# handle missing fast5 files
if bad_f5_found:
print("\nERROR: For read " + read_id + ", could not add " + str(f5) + ".")
print("This path is inferred from the readdb file.")
print("Please check that this is the correct path in readdb file for this read.")
if num_bad_cases > 1:
print("There are " + str(num_bad_cases) + " other reads with this problem (out of " + str(len(region_fast5_files)) + ").")
print("\n")
sys.exit(1)
# --------------------------------------------------------
# PART 8: Add new files to tar
# new fasta, new bam, and new bai with reads
# in the region given only
# --------------------------------------------------------
an = op + "/" + new_fa
archive.add(new_fa, arcname=an)
custom_print( "[+] New FASTA: " + an )
an_new_bam = op + "/" + new_bam
archive.add(new_bam, arcname=an_new_bam)
custom_print( "[+] New BAM: " + an_new_bam )
an_new_bam_index = op + "/" + new_bam_index
archive.add(new_bam_index, arcname=an_new_bam_index)
custom_print( "[+] New BAI: " + an_new_bam_index )
# --------------------------------------------------------
# PART 9: Add original draft genome assembly file
# and the index file
# --------------------------------------------------------
an_draft_ga = op + "/draft.fa"
archive.add(args.draft_ga, arcname=an_draft_ga)
custom_print( "[+] Original draft ga: " + an_draft_ga )
an_draft_ga_fai = op + "/draft.fa.fai"
archive.add(i, arcname=an_draft_ga_fai)
custom_print( "[+] Original draft ga index: " + an_draft_ga_fai )
# --------------------------------------------------------
# PART 10: Check the number of reads in all new files
# --------------------------------------------------------
custom_print( "[ Output check ] " )
# check the length of bam file
num_reads_bam = region_num_reads
num_reads_fasta = int(float(file_length(new_fa))/2.0)
num_fast5_files = len(region_fast5_files)
values = list()
values.append(num_reads_bam)
values.append(num_reads_fasta)
custom_print( "[+] Num reads in new BAM: \t" + str(num_reads_bam) )
custom_print( "[+] Num reads in new FASTA: \t" + str(num_reads_fasta) )
custom_print( "[+] Num files in fast5_files/: \t" + str(num_fast5_files))
if not all( v == num_fast5_files for v in values ):
print( "[!] WARNING: The number of reads in the new bam, new fasta, and num of fast5 files tarred are not equal..." )
else:
custom_print( "[ Validated output ] Number of reads in the new bam, new fasta, and num of fast5 files tarred are equal!" )
# --------------------------------------------------------
# FINAL: Output log if verbose flag not used
# --------------------------------------------------------
global log
logfile = op + ".log"
with open (logfile, "w") as lfile:
for s in log:
lfile.write(s + "\n")
an_logfile = op + "/" + logfile
custom_print( "[ Log file ] " + an_logfile )
custom_print( "[ Tar file ] " + str(tar_filename) )
custom_print( "[ Finished ] " )
archive.add(logfile, arcname=an_logfile)
archive.close()
def file_length(filename):
# ========================================================
# Returns number of lines in a file
# --------------------------------------------------------
# Input: Filename
# Output: Number of lines in the file ...
# ========================================================
with open(filename) as f:
for i, l in enumerate(f):
pass
return int(i) + 1
def extract_basename(filename):
# ========================================================
# Returns base filename
# --------------------------------------------------------
# Input: Filenames with paths
# Output: Base filename
# ========================================================
# remove backslashes at the end of the file names that could return empty basenames..
a = filename.rstrip("\\")
a = a.rstrip("//")
b = os.path.basename(a)
return str(b)
def detect_fa_filetype(fa_filename):
# ========================================================
# Detects filetype of sequences input
# --------------------------------------------------------
# Input: FASTA/Q filename
# Output: Either ['fa.gz', 'fastq.gz', 'fasta.gz',
# 'fastq', 'fasta']
# ========================================================
path = fa_filename
if path.endswith('fa.gz'):
print("Possibly using the reads file generated by nanopolish index? Use original reads file...")
for ext in ['fastq.gz', 'fasta.gz', 'fastq', 'fasta']:
if path.endswith(ext):
return ext
print("Must be either fasta, fastq, fasta.gz, fastq.gz")
sys.exit(1)
def custom_print(s):
# ========================================================
# Depending on verbose flag, will save all prints to
# log list, or will print to stdout
# --------------------------------------------------------
# Input: string to print
# ========================================================
global verbose
global log
if verbose:
print(s)
log.append(s)
if __name__ == "__main__":
main()
|
jts/nanopolish
|
scripts/extract_reads_aligned_to_region.py
|
Python
|
mit
| 13,206
|
[
"pysam"
] |
8687627950c26c6ff708f4b6d6804f6c8e00cd648cc0a7c58cb266b3cecb6ccf
|
#!/usr/bin/python
from setuptools import setup
setup(
name = "dhcpz",
version = "0.2.0",
author = [
"Nicholas VonHollen",
"Brian Lamar"
],
author_email = [
"nicholas.vonhollen@rackspace.com",
"brian.lamar@rackspace.com",
],
license = "Apache License 2.0",
packages = ['dhcpz', 'dhcpz.handlers'],
package_dir = {"":"src/py"},
install_requires = ['gevent', 'netifaces'],
data_files = [
('/etc', ['conf/dhcpz.conf']),
('/etc/init.d', ['src/init.d/dhcpz']),
('/usr/bin', ['src/bin/dhcpz']),
],
)
|
blamarvt/dhcpz
|
setup.py
|
Python
|
apache-2.0
| 662
|
[
"Brian"
] |
82b684532296d0ccefbe13232e6a63e8651a50c83c2f525ebb13d9faa9920db5
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
"""
# pylint: skip-file
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
# FIXME(wang16): Change the timeout here to five minutes. Revert
# the change when slaves can run kvm enabled x86 android emulators.
return self._adb.SendCommand(install_cmd,
timeout_time=5 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
|
fujunwei/chromium-crosswalk
|
build/android/pylib/android_commands.py
|
Python
|
bsd-3-clause
| 73,159
|
[
"Galaxy"
] |
c1d1136a115c3a126fcb2ed48756ae143f8790707fa50fb617aaacd173554275
|
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Record classes to hold BLAST output.
Classes:
Blast Holds all the information from a blast search.
PSIBlast Holds all the information from a psi-blast search.
Header Holds information from the header.
Description Holds information about one hit description.
Alignment Holds information about one alignment hit.
HSP Holds information about one HSP.
MultipleAlignment Holds information about a multiple alignment.
DatabaseReport Holds information from the database report.
Parameters Holds information from the parameters.
"""
# XXX finish printable BLAST output
from Bio.Align import Generic
class Header(object):
"""Saves information from a blast header.
Members:
application The name of the BLAST flavor that generated this data.
version Version of blast used.
date Date this data was generated.
reference Reference for blast.
query Name of query sequence.
query_letters Number of letters in the query sequence. (int)
database Name of the database.
database_sequences Number of sequences in the database. (int)
database_letters Number of letters in the database. (int)
"""
def __init__(self):
self.application = ''
self.version = ''
self.date = ''
self.reference = ''
self.query = ''
self.query_letters = None
self.database = ''
self.database_sequences = None
self.database_letters = None
class Description(object):
"""Stores information about one hit in the descriptions section.
Members:
title Title of the hit.
score Number of bits. (int)
bits Bit score. (float)
e E value. (float)
num_alignments Number of alignments for the same subject. (int)
"""
def __init__(self):
self.title = ''
self.score = None
self.bits = None
self.e = None
self.num_alignments = None
def __str__(self):
return "%-66s %5s %s" % (self.title, self.score, self.e)
class Alignment(object):
"""Stores information about one hit in the alignments section.
Members:
title Name.
hit_id Hit identifier. (str)
hit_def Hit definition. (str)
length Length. (int)
hsps A list of HSP objects.
"""
def __init__(self):
self.title = ''
self.hit_id = ''
self.hit_def = ''
self.length = None
self.hsps = []
def __str__(self):
lines = self.title.split('\n')
lines.append("Length = %s\n" % self.length)
return '\n '.join(lines)
class HSP(object):
"""Stores information about one hsp in an alignment hit.
Members:
score BLAST score of hit. (float)
bits Number of bits for that score. (float)
expect Expect value. (float)
num_alignments Number of alignments for same subject. (int)
identities Number of identities (int) if using the XML parser.
Tuple of numer of identities/total aligned (int, int)
if using the (obsolete) plain text parser.
positives Number of positives (int) if using the XML parser.
Tuple of numer of positives/total aligned (int, int)
if using the (obsolete) plain text parser.
gaps Number of gaps (int) if using the XML parser.
Tuple of numer of gaps/total aligned (int, int) if
using the (obsolete) plain text parser.
align_length Length of the alignment. (int)
strand Tuple of (query, target) strand.
frame Tuple of 1 or 2 frame shifts, depending on the flavor.
query The query sequence.
query_start The start residue for the query sequence. (1-based)
query_end The end residue for the query sequence. (1-based)
match The match sequence.
sbjct The sbjct sequence.
sbjct_start The start residue for the sbjct sequence. (1-based)
sbjct_end The end residue for the sbjct sequence. (1-based)
Not all flavors of BLAST return values for every attribute:
score expect identities positives strand frame
BLASTP X X X X
BLASTN X X X X X
BLASTX X X X X X
TBLASTN X X X X X
TBLASTX X X X X X/X
Note: for BLASTX, the query sequence is shown as a protein sequence,
but the numbering is based on the nucleotides. Thus, the numbering
is 3x larger than the number of amino acid residues. A similar effect
can be seen for the sbjct sequence in TBLASTN, and for both sequences
in TBLASTX.
Also, for negative frames, the sequence numbering starts from
query_start and counts down.
"""
def __init__(self):
self.score = None
self.bits = None
self.expect = None
self.num_alignments = None
self.identities = (None, None)
self.positives = (None, None)
self.gaps = (None, None)
self.align_length = None
self.strand = (None, None)
self.frame = ()
self.query = ''
self.query_start = None
self.query_end = None
self.match = ''
self.sbjct = ''
self.sbjct_start = None
self.sbjct_end = None
def __str__(self):
lines = ["Score %i (%i bits), expectation %0.1e, alignment length %i" \
% (self.score, self.bits, self.expect, self.align_length)]
if self.align_length < 50:
lines.append("Query:%s %s %s" % (str(self.query_start).rjust(8),
str(self.query),
str(self.query_end)))
lines.append(" %s" \
% (str(self.match)))
lines.append("Sbjct:%s %s %s" % (str(self.sbjct_start).rjust(8),
str(self.sbjct),
str(self.sbjct_end)))
else:
lines.append("Query:%s %s...%s %s" \
% (str(self.query_start).rjust(8),
str(self.query)[:45],
str(self.query)[-3:],
str(self.query_end)))
lines.append(" %s...%s" \
% (str(self.match)[:45],
str(self.match)[-3:]))
lines.append("Sbjct:%s %s...%s %s" \
% (str(self.sbjct_start).rjust(8),
str(self.sbjct)[:45],
str(self.sbjct)[-3:],
str(self.sbjct_end)))
return "\n".join(lines)
class MultipleAlignment(object):
"""Holds information about a multiple alignment.
Members:
alignment A list of tuples (name, start residue, sequence, end residue).
The start residue is 1-based. It may be blank, if that sequence is
not aligned in the multiple alignment.
"""
def __init__(self):
self.alignment = []
def to_generic(self, alphabet):
"""Retrieve generic alignment object for the given alignment.
Instead of the tuples, this returns an Alignment object from
Bio.Align.Generic, through which you can manipulate and query
the object.
alphabet is the specified alphabet for the sequences in the code (for
example IUPAC.IUPACProtein.
Thanks to James Casbon for the code.
"""
#TODO - Switch to new Bio.Align.MultipleSeqAlignment class?
seq_parts = []
seq_names = []
parse_number = 0
n = 0
for name, start, seq, end in self.alignment:
if name == 'QUERY': #QUERY is the first in each alignment block
parse_number += 1
n = 0
if parse_number == 1: # create on first_parse, append on all others
seq_parts.append(seq)
seq_names.append(name)
else:
seq_parts[n] += seq
n += 1
generic = Generic.Alignment(alphabet)
for (name,seq) in zip(seq_names,seq_parts):
generic.add_sequence(name, seq)
return generic
class Round(object):
"""Holds information from a PSI-BLAST round.
Members:
number Round number. (int)
reused_seqs Sequences in model, found again. List of Description objects.
new_seqs Sequences not found, or below threshold. List of Description.
alignments A list of Alignment objects.
multiple_alignment A MultipleAlignment object.
"""
def __init__(self):
self.number = None
self.reused_seqs = []
self.new_seqs = []
self.alignments = []
self.multiple_alignment = None
class DatabaseReport(object):
"""Holds information about a database report.
Members:
database_name List of database names. (can have multiple dbs)
num_letters_in_database Number of letters in the database. (int)
num_sequences_in_database List of number of sequences in the database.
posted_date List of the dates the databases were posted.
ka_params A tuple of (lambda, k, h) values. (floats)
gapped # XXX this isn't set right!
ka_params_gap A tuple of (lambda, k, h) values. (floats)
"""
def __init__(self):
self.database_name = []
self.posted_date = []
self.num_letters_in_database = []
self.num_sequences_in_database = []
self.ka_params = (None, None, None)
self.gapped = 0
self.ka_params_gap = (None, None, None)
class Parameters(object):
"""Holds information about the parameters.
Members:
matrix Name of the matrix.
gap_penalties Tuple of (open, extend) penalties. (floats)
sc_match Match score for nucleotide-nucleotide comparison
sc_mismatch Mismatch penalty for nucleotide-nucleotide comparison
num_hits Number of hits to the database. (int)
num_sequences Number of sequences. (int)
num_good_extends Number of extensions. (int)
num_seqs_better_e Number of sequences better than e-value. (int)
hsps_no_gap Number of HSP's better, without gapping. (int)
hsps_prelim_gapped Number of HSP's gapped in prelim test. (int)
hsps_prelim_gapped_attemped Number of HSP's attempted in prelim. (int)
hsps_gapped Total number of HSP's gapped. (int)
query_length Length of the query. (int)
query_id Identifier of the query sequence. (str)
database_length Number of letters in the database. (int)
effective_hsp_length Effective HSP length. (int)
effective_query_length Effective length of query. (int)
effective_database_length Effective length of database. (int)
effective_search_space Effective search space. (int)
effective_search_space_used Effective search space used. (int)
frameshift Frameshift window. Tuple of (int, float)
threshold Threshold. (int)
window_size Window size. (int)
dropoff_1st_pass Tuple of (score, bits). (int, float)
gap_x_dropoff Tuple of (score, bits). (int, float)
gap_x_dropoff_final Tuple of (score, bits). (int, float)
gap_trigger Tuple of (score, bits). (int, float)
blast_cutoff Tuple of (score, bits). (int, float)
"""
def __init__(self):
self.matrix = ''
self.gap_penalties = (None, None)
self.sc_match = None
self.sc_mismatch = None
self.num_hits = None
self.num_sequences = None
self.num_good_extends = None
self.num_seqs_better_e = None
self.hsps_no_gap = None
self.hsps_prelim_gapped = None
self.hsps_prelim_gapped_attemped = None
self.hsps_gapped = None
self.query_id = None
self.query_length = None
self.database_length = None
self.effective_hsp_length = None
self.effective_query_length = None
self.effective_database_length = None
self.effective_search_space = None
self.effective_search_space_used = None
self.frameshift = (None, None)
self.threshold = None
self.window_size = None
self.dropoff_1st_pass = (None, None)
self.gap_x_dropoff = (None, None)
self.gap_x_dropoff_final = (None, None)
self.gap_trigger = (None, None)
self.blast_cutoff = (None, None)
class Blast(Header, DatabaseReport, Parameters):
"""Saves the results from a blast search.
Members:
descriptions A list of Description objects.
alignments A list of Alignment objects.
multiple_alignment A MultipleAlignment object.
+ members inherited from base classes
"""
def __init__(self):
Header.__init__(self)
DatabaseReport.__init__(self)
Parameters.__init__(self)
self.descriptions = []
self.alignments = []
self.multiple_alignment = None
class PSIBlast(Header, DatabaseReport, Parameters):
"""Saves the results from a blastpgp search.
Members:
rounds A list of Round objects.
converged Whether the search converged.
+ members inherited from base classes
"""
def __init__(self):
Header.__init__(self)
DatabaseReport.__init__(self)
Parameters.__init__(self)
self.rounds = []
self.converged = 0
|
asherkhb/coge
|
bin/last_wrapper/Bio/Blast/Record.py
|
Python
|
bsd-2-clause
| 14,321
|
[
"BLAST",
"Biopython"
] |
80e23844c751a9cfe5110ed7b249df5e8e34c015c70096170f72992b25515b53
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtCore, QtWidgets
from peacock.utils import Testing
class TestExodusState(Testing.PeacockAppImageTestCase):
"""
Test for ExodusViewer state when executable is re-run.
"""
qapp = QtWidgets.QApplication([])
def testState(self):
"""
Tests that re-executing doesn't change the state of the exodus viewer.
"""
# The tabs to switch between
exodus = self._app.main_widget.tab_plugin.ExodusViewer
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
# Run and check that basic results show up
self.execute()
self.selectTab(exodus)
Testing.process_events(1)
self.assertImage("testDefault.png")
# Select the mesh
mesh_plugin = exodus.currentWidget().MeshPlugin
mesh_plugin.ViewMeshToggle.setCheckState(QtCore.Qt.Checked)
mesh_plugin.ViewMeshToggle.clicked.emit(True)
self.assertImage("testMeshOn.png", allowed=0.98)
# Re-run and check results again
self.selectTab(execute)
self.execute()
self.selectTab(exodus)
self.assertImage("testMeshOn.png", allowed=0.98)
def testTabChange(self):
"""
Tests that changing tabs chan
"""
# The tabs to switch between
exodus = self._app.main_widget.tab_plugin.ExodusViewer
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
# Execute tab active, but nothing run yet (both timers should be inactive)
self.assertFalse(self._window._timers['initialize'].isActive())
self.assertFalse(self._window._timers['update'].isActive())
# Switch to Exodus tabs (initialize timer should be running)
self.selectTab(exodus)
self.assertTrue(self._window._timers['initialize'].isActive())
self.assertFalse(self._window._timers['update'].isActive())
# Execute (update timer should be running, initialize should be off)
self.execute()
self.assertFalse(self._window._timers['initialize'].isActive())
self.assertTrue(self._window._timers['update'].isActive())
# Switch to Execute tab (both timers should be disabled)
self.selectTab(execute)
self.assertFalse(self._window._timers['initialize'].isActive())
self.assertFalse(self._window._timers['update'].isActive())
def testColorbarState(self):
"""
Test that re-running the simulation maintains colorbar state.
"""
# The tabs to switch between
exodus = self._app.main_widget.tab_plugin.ExodusViewer
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
cbar_plugin = exodus.currentWidget().ColorbarPlugin
# Run and check that basic results show up
self.execute()
self.selectTab(exodus)
Testing.process_events(1)
# Disable colorbar
cbar_plugin.ColorBarToggle.setCheckState(QtCore.Qt.Unchecked)
cbar_plugin.ColorBarToggle.clicked.emit(True)
self.assertImage("testColorbarOff.png", allowed=0.98)
# Re-run and check results again
self.selectTab(execute)
self.execute()
self.selectTab(exodus)
Testing.process_events(1)
self.assertImage("testColorbarOff.png", allowed=0.98)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/peacock_app/check_exodus_state/test_exodus_state.py
|
Python
|
lgpl-2.1
| 3,691
|
[
"MOOSE"
] |
7869e22abee6f72deaa6756c687ab858b3c2135aab02feef5c1d762e66e01490
|
# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Probit regression, implemented in Stan."""
import numpy as np
from inference_gym.tools.stan import stan_model
from inference_gym.tools.stan import util
__all__ = [
'probit_regression',
]
def _add_bias(features):
return np.concatenate([features, np.ones([features.shape[0], 1])], axis=-1)
def probit_regression(
train_features,
train_labels,
test_features=None,
test_labels=None,
):
"""Bayesian probit regression with a Gaussian prior.
Args:
train_features: Floating-point `Tensor` with shape `[num_train_points,
num_features]`. Training features.
train_labels: Integer `Tensor` with shape `[num_train_points]`. Training
labels.
test_features: Floating-point `Tensor` with shape `[num_test_points,
num_features]`. Testing features. Can be `None`, in which case
test-related sample transformations are not computed.
test_labels: Integer `Tensor` with shape `[num_test_points]`. Testing
labels. Can be `None`, in which case test-related sample transformations
are not computed.
Returns:
model: `StanModel`.
"""
code = """
data {
int<lower=0> num_train_points;
int<lower=0> num_test_points;
int<lower=0> num_features;
matrix[num_train_points,num_features] train_features;
int<lower=0,upper=1> train_labels[num_train_points];
matrix[num_test_points,num_features] test_features;
int<lower=0,upper=1> test_labels[num_test_points];
}
parameters {
vector[num_features] weights;
}
model {
{
vector[num_train_points] probits;
probits = train_features * weights;
weights ~ normal(0, 1);
# Stan doesn't have a way to do it in log-space.
train_labels ~ bernoulli(Phi(probits));
}
}
generated quantities {
real test_nll;
real per_example_test_nll[num_test_points];
{
vector[num_test_points] probits;
probits = test_features * weights;
test_nll = -bernoulli_lpmf(test_labels | Phi(probits));
for (i in 1:num_test_points) {
per_example_test_nll[i] = -bernoulli_lpmf(
test_labels[i] | Phi(probits[i]));
}
}
}
"""
have_test = test_features is not None
train_features = _add_bias(train_features)
if have_test:
test_features = _add_bias(test_features)
else:
# cmdstanpy can't handle zero-sized arrays at the moment:
# https://github.com/stan-dev/cmdstanpy/issues/203
test_features = train_features[:1]
test_labels = train_labels[:1]
stan_data = {
'num_train_points': train_features.shape[0],
'num_test_points': test_features.shape[0],
'num_features': train_features.shape[1],
'train_features': train_features,
'train_labels': train_labels,
'test_features': test_features,
'test_labels': test_labels,
}
model = util.cached_stan_model(code)
def _ext_identity(samples):
return util.get_columns(samples, r'^weights\[\d+\]$')
def _ext_test_nll(samples):
return util.get_columns(samples, r'^test_nll$')[:, 0]
def _ext_per_example_test_nll(samples):
return util.get_columns(samples, r'^per_example_test_nll\[\d+\]$')
extract_fns = {'identity': _ext_identity}
if have_test:
extract_fns['test_nll'] = _ext_test_nll
extract_fns['per_example_test_nll'] = _ext_per_example_test_nll
return stan_model.StanModel(
extract_fns=extract_fns,
# The default random initialization saturates the 'Phi' function, causing
# initial log-probs to not be finite. Starting things off at 0 is more
# stable.
sample_fn=util.make_sample_fn(
model, data=stan_data, inits={'weights': np.zeros([25])}),
)
|
tensorflow/probability
|
spinoffs/inference_gym/inference_gym/tools/stan/probit_regression.py
|
Python
|
apache-2.0
| 4,360
|
[
"Gaussian"
] |
2dd3cf80fa9ef3464b5453ce2c7a5b693940f012e30dc883796576c73ed03b65
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
The SCF iteration functions
"""
import numpy as np
from psi4.driver import p4util
from psi4.driver import constants
from psi4.driver.p4util.exceptions import SCFConvergenceError, ValidationError
from psi4 import core
from .efp import get_qm_atoms_opts, modify_Fock_permanent, modify_Fock_induced
#import logging
#logger = logging.getLogger("scf.scf_iterator")
#logger.setLevel(logging.DEBUG)
# Q: I expect more local settings of options for part of SCF.
# For convcrit, do we want:
# (A) easy to grep
# with p4util.OptionsStateCM(['SCF', 'E_CONVERGENCE'], ['SCF', 'D_CONVERGENCE']):
# core.set_local_option('SCF', 'E_CONVERGENCE', 1.e-5)
# core.set_local_option('SCF', 'D_CONVERGENCE', 1.e-4)
# self.iterations()
#
# or (B) functional. options never touched
# self.iterations(e_conv=1.e-5, d_conv=1.e-4)
def scf_compute_energy(self):
"""Base class Wavefunction requires this function. Here it is
simply a wrapper around initialize(), iterations(), finalize_energy(). It
returns the SCF energy computed by finalize_energy().
"""
if core.get_option('SCF', 'DF_SCF_GUESS') and (core.get_global_option('SCF_TYPE') == 'DIRECT'):
# speed up DIRECT algorithm (recomputes full (non-DF) integrals
# each iter) by first converging via fast DF iterations, then
# fully converging in fewer slow DIRECT iterations. aka Andy trick 2.0
core.print_out(" Starting with a DF guess...\n\n")
with p4util.OptionsStateCM(['SCF_TYPE']):
core.set_global_option('SCF_TYPE', 'DF')
self.initialize()
try:
self.iterations()
except SCFConvergenceError:
self.finalize()
raise SCFConvergenceError("""SCF DF preiterations""", self.iteration_, self, 0, 0)
core.print_out("\n DF guess converged.\n\n")
# reset the DIIS & JK objects in prep for DIRECT
if self.initialized_diis_manager_:
self.diis_manager().reset_subspace()
self.initialize_jk(self.memory_jk_)
else:
self.initialize()
try:
self.iterations()
except SCFConvergenceError as e:
if core.get_option("SCF", "FAIL_ON_MAXITER"):
core.print_out(" Failed to converge.\n")
# energy = 0.0
# A P::e fn to either throw or protest upon nonconvergence
# die_if_not_converged()
raise e
else:
core.print_out(" Energy and/or wave function did not converge, but proceeding anyway.\n\n")
else:
core.print_out(" Energy and wave function converged.\n\n")
scf_energy = self.finalize_energy()
return scf_energy
def _build_jk(wfn, memory):
jk = core.JK.build(wfn.get_basisset("ORBITAL"),
aux=wfn.get_basisset("DF_BASIS_SCF"),
do_wK=wfn.functional().is_x_lrc(),
memory=memory)
return jk
def initialize_jk(self, memory, jk=None):
functional = self.functional()
if jk is None:
jk = _build_jk(self, memory)
self.set_jk(jk)
jk.set_print(self.get_print())
jk.set_memory(memory)
jk.set_do_K(functional.is_x_hybrid())
jk.set_do_wK(functional.is_x_lrc())
jk.set_omega(functional.x_omega())
jk.set_omega_alpha(functional.x_alpha())
jk.set_omega_beta(functional.x_beta())
jk.initialize()
jk.print_header()
def scf_initialize(self):
"""Specialized initialization, compute integrals and does everything to prepare for iterations"""
# Figure out memory distributions
# Get memory in terms of doubles
total_memory = (core.get_memory() / 8) * core.get_global_option("SCF_MEM_SAFETY_FACTOR")
# Figure out how large the DFT collocation matrices are
vbase = self.V_potential()
if vbase:
collocation_size = vbase.grid().collocation_size()
if vbase.functional().ansatz() == 1:
collocation_size *= 4 # First derivs
elif vbase.functional().ansatz() == 2:
collocation_size *= 10 # Second derivs
else:
collocation_size = 0
# Change allocation for collocation matrices based on DFT type
jk = _build_jk(self, total_memory)
jk_size = jk.memory_estimate()
# Give remaining to collocation
if total_memory > jk_size:
collocation_memory = total_memory - jk_size
# Give up to 10% to collocation
elif (total_memory * 0.1) > collocation_size:
collocation_memory = collocation_size
else:
collocation_memory = total_memory * 0.1
if collocation_memory > collocation_size:
collocation_memory = collocation_size
# Set constants
self.iteration_ = 0
self.memory_jk_ = int(total_memory - collocation_memory)
self.memory_collocation_ = int(collocation_memory)
if self.get_print():
core.print_out(" ==> Integral Setup <==\n\n")
# Initialize EFP
efp_enabled = hasattr(self.molecule(), 'EFP')
if efp_enabled:
# EFP: Set QM system, options, and callback. Display efp geom in [A]
efpobj = self.molecule().EFP
core.print_out(efpobj.banner())
core.print_out(efpobj.geometry_summary(units_to_bohr=constants.bohr2angstroms))
efpptc, efpcoords, efpopts = get_qm_atoms_opts(self.molecule())
efpobj.set_point_charges(efpptc, efpcoords)
efpobj.set_opts(efpopts, label='psi', append='psi')
efpobj.set_electron_density_field_fn(field_fn)
# Initilize all integratals and perform the first guess
if self.attempt_number_ == 1:
mints = core.MintsHelper(self.basisset())
self.initialize_jk(self.memory_jk_, jk=jk)
if self.V_potential():
self.V_potential().build_collocation_cache(self.memory_collocation_)
core.timer_on("HF: Form core H")
self.form_H()
core.timer_off("HF: Form core H")
if efp_enabled:
# EFP: Add in permanent moment contribution and cache
core.timer_on("HF: Form Vefp")
verbose = core.get_option('SCF', "PRINT")
Vefp = modify_Fock_permanent(self.molecule(), mints, verbose=verbose - 1)
Vefp = core.Matrix.from_array(Vefp)
self.H().add(Vefp)
Horig = self.H().clone()
self.Horig = Horig
core.print_out(" QM/EFP: iterating Total Energy including QM/EFP Induction\n")
core.timer_off("HF: Form Vefp")
core.timer_on("HF: Form S/X")
self.form_Shalf()
core.timer_off("HF: Form S/X")
core.print_out("\n ==> Pre-Iterations <==\n\n")
core.timer_on("HF: Guess")
self.guess()
core.timer_off("HF: Guess")
# Print out initial docc/socc/etc data
if self.get_print():
lack_occupancy = core.get_local_option('SCF', 'GUESS') in ['SAD']
if core.get_global_option('GUESS') in ['SAD']:
lack_occupancy = core.get_local_option('SCF', 'GUESS') in ['AUTO']
self.print_preiterations(small=lack_occupancy)
else:
self.print_preiterations(small=lack_occupancy)
else:
# We're reading the orbitals from the previous set of iterations.
self.form_D()
self.set_energies("Total Energy", self.compute_initial_E())
# turn off VV10 for iterations
if core.get_option('SCF', "DFT_VV10_POSTSCF") and self.functional().vv10_b() > 0.0:
core.print_out(" VV10: post-SCF option active \n \n")
self.functional().set_lock(False)
self.functional().set_do_vv10(False)
self.functional().set_lock(True)
def scf_iterate(self, e_conv=None, d_conv=None):
is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
verbose = core.get_option('SCF', "PRINT")
reference = core.get_option('SCF', "REFERENCE")
# self.member_data_ signals are non-local, used internally by c-side fns
self.diis_enabled_ = _validate_diis()
self.MOM_excited_ = _validate_MOM()
self.diis_start_ = core.get_option('SCF', 'DIIS_START')
damping_enabled = _validate_damping()
soscf_enabled = _validate_soscf()
frac_enabled = _validate_frac()
efp_enabled = hasattr(self.molecule(), 'EFP')
diis_rms = core.get_option('SCF', 'DIIS_RMS_ERROR')
if self.iteration_ < 2:
core.print_out(" ==> Iterations <==\n\n")
core.print_out("%s Total Energy Delta E %s |[F,P]|\n\n" %
(" " if is_dfjk else "", "RMS" if diis_rms else "MAX"))
# SCF iterations!
SCFE_old = 0.0
Dnorm = 0.0
while True:
self.iteration_ += 1
diis_performed = False
soscf_performed = False
self.frac_performed_ = False
#self.MOM_performed_ = False # redundant from common_init()
self.save_density_and_energy()
if efp_enabled:
# EFP: Add efp contribution to Fock matrix
self.H().copy(self.Horig)
global mints_psi4_yo
mints_psi4_yo = core.MintsHelper(self.basisset())
Vefp = modify_Fock_induced(self.molecule().EFP, mints_psi4_yo, verbose=verbose - 1)
Vefp = core.Matrix.from_array(Vefp)
self.H().add(Vefp)
SCFE = 0.0
self.clear_external_potentials()
core.timer_on("HF: Form G")
self.form_G()
core.timer_off("HF: Form G")
upcm = 0.0
if core.get_option('SCF', 'PCM'):
calc_type = core.PCM.CalcType.Total
if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
calc_type = core.PCM.CalcType.NucAndEle
Dt = self.Da().clone()
Dt.add(self.Db())
upcm, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
SCFE += upcm
self.push_back_external_potential(Vpcm)
self.set_variable("PCM POLARIZATION ENERGY", upcm)
self.set_energies("PCM Polarization", upcm)
upe = 0.0
if core.get_option('SCF', 'PE'):
Dt = self.Da().clone()
Dt.add(self.Db())
upe, Vpe = self.pe_state.get_pe_contribution(
Dt, elec_only=False
)
SCFE += upe
self.push_back_external_potential(Vpe)
self.set_variable("PE ENERGY", upe)
self.set_energies("PE Energy", upe)
core.timer_on("HF: Form F")
# SAD: since we don't have orbitals yet, we might not be able
# to form the real Fock matrix. Instead, build an initial one
if (self.iteration_ == 0) and self.sad_:
self.form_initial_F()
else:
self.form_F()
core.timer_off("HF: Form F")
if verbose > 3:
self.Fa().print_out()
self.Fb().print_out()
SCFE += self.compute_E()
if efp_enabled:
global efp_Dt_psi4_yo
# EFP: Add efp contribution to energy
efp_Dt_psi4_yo = self.Da().clone()
efp_Dt_psi4_yo.add(self.Db())
SCFE += self.molecule().EFP.get_wavefunction_dependent_energy()
self.set_energies("Total Energy", SCFE)
core.set_variable("SCF ITERATION ENERGY", SCFE)
Ediff = SCFE - SCFE_old
SCFE_old = SCFE
status = []
# Check if we are doing SOSCF
if (soscf_enabled and (self.iteration_ >= 3) and (Dnorm < core.get_option('SCF', 'SOSCF_START_CONVERGENCE'))):
Dnorm = self.compute_orbital_gradient(False, core.get_option('SCF', 'DIIS_MAX_VECS'))
diis_performed = False
if self.functional().needs_xc():
base_name = "SOKS, nmicro="
else:
base_name = "SOSCF, nmicro="
if not _converged(Ediff, Dnorm, e_conv=e_conv, d_conv=d_conv):
nmicro = self.soscf_update(core.get_option('SCF', 'SOSCF_CONV'),
core.get_option('SCF', 'SOSCF_MIN_ITER'),
core.get_option('SCF', 'SOSCF_MAX_ITER'),
core.get_option('SCF', 'SOSCF_PRINT'))
# if zero, the soscf call bounced for some reason
soscf_performed = (nmicro > 0)
if soscf_performed:
self.find_occupation()
status.append(base_name + str(nmicro))
else:
if verbose > 0:
core.print_out("Did not take a SOSCF step, using normal convergence methods\n")
else:
# need to ensure orthogonal orbitals and set epsilon
status.append(base_name + "conv")
core.timer_on("HF: Form C")
self.form_C()
core.timer_off("HF: Form C")
soscf_performed = True # Stops DIIS
if not soscf_performed:
# Normal convergence procedures if we do not do SOSCF
# SAD: form initial orbitals from the initial Fock matrix, and
# reset the occupations. From here on, the density matrices
# are correct.
if (self.iteration_ == 0) and self.sad_:
self.form_initial_C()
self.reset_occupation()
self.find_occupation()
else:
# Run DIIS
core.timer_on("HF: DIIS")
diis_performed = False
add_to_diis_subspace = self.diis_enabled_ and self.iteration_ >= self.diis_start_
Dnorm = self.compute_orbital_gradient(add_to_diis_subspace, core.get_option('SCF', 'DIIS_MAX_VECS'))
if (add_to_diis_subspace and core.get_option('SCF', 'DIIS_MIN_VECS') - 1):
diis_performed = self.diis()
if diis_performed:
status.append("DIIS")
core.timer_off("HF: DIIS")
if verbose > 4 and diis_performed:
core.print_out(" After DIIS:\n")
self.Fa().print_out()
self.Fb().print_out()
# frac, MOM invoked here from Wfn::HF::find_occupation
core.timer_on("HF: Form C")
self.form_C()
core.timer_off("HF: Form C")
if self.MOM_performed_:
status.append("MOM")
if self.frac_performed_:
status.append("FRAC")
# Reset occupations if necessary
if (self.iteration_ == 0) and self.reset_occ_:
self.reset_occupation()
self.find_occupation()
# Form new density matrix
core.timer_on("HF: Form D")
self.form_D()
core.timer_off("HF: Form D")
self.set_variable("SCF ITERATION ENERGY", SCFE)
# After we've built the new D, damp the update
if (damping_enabled and self.iteration_ > 1 and Dnorm > core.get_option('SCF', 'DAMPING_CONVERGENCE')):
damping_percentage = core.get_option('SCF', "DAMPING_PERCENTAGE")
self.damping_update(damping_percentage * 0.01)
status.append("DAMP={}%".format(round(damping_percentage)))
if verbose > 3:
self.Ca().print_out()
self.Cb().print_out()
self.Da().print_out()
self.Db().print_out()
# Print out the iteration
core.print_out(
" @%s%s iter %3s: %20.14f %12.5e %-11.5e %s\n" %
("DF-" if is_dfjk else "", reference, "SAD" if
((self.iteration_ == 0) and self.sad_) else self.iteration_, SCFE, Ediff, Dnorm, '/'.join(status)))
# if a an excited MOM is requested but not started, don't stop yet
if self.MOM_excited_ and not self.MOM_performed_:
continue
# if a fractional occupation is requested but not started, don't stop yet
if frac_enabled and not self.frac_performed_:
continue
# Call any postiteration callbacks
if not ((self.iteration_ == 0) and self.sad_) and _converged(Ediff, Dnorm, e_conv=e_conv, d_conv=d_conv):
break
if self.iteration_ >= core.get_option('SCF', 'MAXITER'):
raise SCFConvergenceError("""SCF iterations""", self.iteration_, self, Ediff, Dnorm)
def scf_finalize_energy(self):
"""Performs stability analysis and calls back SCF with new guess
if needed, Returns the SCF energy. This function should be called
once orbitals are ready for energy/property computations, usually
after iterations() is called.
"""
# post-scf vv10 correlation
if core.get_option('SCF', "DFT_VV10_POSTSCF") and self.functional().vv10_b() > 0.0:
self.functional().set_lock(False)
self.functional().set_do_vv10(True)
self.functional().set_lock(True)
core.print_out(" ==> Computing Non-Self-Consistent VV10 Energy Correction <==\n\n")
SCFE = 0.0
self.form_V()
SCFE += self.compute_E()
self.set_energies("Total Energy", SCFE)
# Perform wavefunction stability analysis before doing
# anything on a wavefunction that may not be truly converged.
if core.get_option('SCF', 'STABILITY_ANALYSIS') != "NONE":
# Don't bother computing needed integrals if we can't do anything with them.
if self.functional().needs_xc():
raise ValidationError("Stability analysis not yet supported for XC functionals.")
# We need the integral file, make sure it is written and
# compute it if needed
if core.get_option('SCF', 'REFERENCE') != "UHF":
#psio = core.IO.shared_object()
#psio.open(constants.PSIF_SO_TEI, 1) # PSIO_OPEN_OLD
#try:
# psio.tocscan(constants.PSIF_SO_TEI, "IWL Buffers")
#except TypeError:
# # "IWL Buffers" actually found but psio_tocentry can't be returned to Py
# psio.close(constants.PSIF_SO_TEI, 1)
#else:
# # tocscan returned None
# psio.close(constants.PSIF_SO_TEI, 1)
# logic above foiled by psio_tocentry not returning None<--nullptr in pb11 2.2.1
# so forcibly recomputing for now until stability revamp
core.print_out(" SO Integrals not on disk. Computing...")
mints = core.MintsHelper(self.basisset())
#next 2 lines fix a bug that prohibits relativistic stability analysis
mints.integrals()
core.print_out("done.\n")
# Q: Not worth exporting all the layers of psio, right?
follow = self.stability_analysis()
while follow and self.attempt_number_ <= core.get_option('SCF', 'MAX_ATTEMPTS'):
self.attempt_number_ += 1
core.print_out(" Running SCF again with the rotated orbitals.\n")
if self.initialized_diis_manager_:
self.diis_manager().reset_subspace()
# reading the rotated orbitals in before starting iterations
self.form_D()
self.set_energies("Total Energy", self.compute_initial_E())
self.iterations()
follow = self.stability_analysis()
if follow and self.attempt_number_ > core.get_option('SCF', 'MAX_ATTEMPTS'):
core.print_out(" There's still a negative eigenvalue. Try modifying FOLLOW_STEP_SCALE\n")
core.print_out(" or increasing MAX_ATTEMPTS (not available for PK integrals).\n")
# At this point, we are not doing any more SCF cycles
# and we can compute and print final quantities.
if hasattr(self.molecule(), 'EFP'):
efpobj = self.molecule().EFP
efpobj.compute() # do_gradient=do_gradient)
efpene = efpobj.get_energy(label='psi')
efp_wfn_independent_energy = efpene['total'] - efpene['ind']
self.set_energies("EFP", efpene['total'])
SCFE = self.get_energies("Total Energy")
SCFE += efp_wfn_independent_energy
self.set_energies("Total Energy", SCFE)
core.print_out(efpobj.energy_summary(scfefp=SCFE, label='psi'))
self.set_variable(
'EFP ELST ENERGY',
efpene['electrostatic'] + efpene['charge_penetration'] + efpene['electrostatic_point_charges'])
self.set_variable('EFP IND ENERGY', efpene['polarization'])
self.set_variable('EFP DISP ENERGY', efpene['dispersion'])
self.set_variable('EFP EXCH ENERGY', efpene['exchange_repulsion'])
self.set_variable('EFP TOTAL ENERGY', efpene['total'])
self.set_variable('CURRENT ENERGY', efpene['total'])
core.print_out("\n ==> Post-Iterations <==\n\n")
if self.V_potential():
quad = self.V_potential().quadrature_values()
rho_a = quad['RHO_A']/2 if self.same_a_b_dens() else quad['RHO_A']
rho_b = quad['RHO_B']/2 if self.same_a_b_dens() else quad['RHO_B']
rho_ab = (rho_a + rho_b)
self.set_variable("GRID ELECTRONS TOTAL",rho_ab)
self.set_variable("GRID ELECTRONS ALPHA",rho_a)
self.set_variable("GRID ELECTRONS BETA",rho_b)
dev_a = rho_a - self.nalpha()
dev_b = rho_b - self.nbeta()
core.print_out(f" Electrons on quadrature grid:\n")
if self.same_a_b_dens():
core.print_out(f" Ntotal = {rho_ab:15.10f} ; deviation = {dev_b+dev_a:.3e} \n\n")
else:
core.print_out(f" Nalpha = {rho_a:15.10f} ; deviation = {dev_a:.3e}\n")
core.print_out(f" Nbeta = {rho_b:15.10f} ; deviation = {dev_b:.3e}\n")
core.print_out(f" Ntotal = {rho_ab:15.10f} ; deviation = {dev_b+dev_a:.3e} \n\n")
if ((dev_b+dev_a) > 0.1):
core.print_out(" WARNING: large deviation in the electron count on grid detected. Check grid size!")
self.check_phases()
self.compute_spin_contamination()
self.frac_renormalize()
reference = core.get_option("SCF", "REFERENCE")
energy = self.get_energies("Total Energy")
# fail_on_maxiter = core.get_option("SCF", "FAIL_ON_MAXITER")
# if converged or not fail_on_maxiter:
#
# if print_lvl > 0:
# self.print_orbitals()
#
# if converged:
# core.print_out(" Energy converged.\n\n")
# else:
# core.print_out(" Energy did not converge, but proceeding anyway.\n\n")
if core.get_option('SCF', 'PRINT') > 0:
self.print_orbitals()
is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
core.print_out(" @%s%s Final Energy: %20.14f" % ('DF-' if is_dfjk else '', reference, energy))
# if (perturb_h_) {
# core.print_out(" with %f %f %f perturbation" %
# (dipole_field_strength_[0], dipole_field_strength_[1], dipole_field_strength_[2]))
# }
core.print_out("\n\n")
self.print_energies()
self.clear_external_potentials()
if core.get_option('SCF', 'PCM'):
calc_type = core.PCM.CalcType.Total
if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
calc_type = core.PCM.CalcType.NucAndEle
Dt = self.Da().clone()
Dt.add(self.Db())
_, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
self.push_back_external_potential(Vpcm)
# Set callback function for CPSCF
self.set_external_cpscf_perturbation("PCM", lambda pert_dm : self.get_PCM().compute_V(pert_dm))
if core.get_option('SCF', 'PE'):
Dt = self.Da().clone()
Dt.add(self.Db())
_, Vpe = self.pe_state.get_pe_contribution(
Dt, elec_only=False
)
self.push_back_external_potential(Vpe)
# Set callback function for CPSCF
self.set_external_cpscf_perturbation("PE", lambda pert_dm : self.pe_state.get_pe_contribution(pert_dm, elec_only=True)[1])
# Properties
# Comments so that autodoc utility will find these PSI variables
# Process::environment.globals["SCF DIPOLE X"] =
# Process::environment.globals["SCF DIPOLE Y"] =
# Process::environment.globals["SCF DIPOLE Z"] =
# Process::environment.globals["SCF QUADRUPOLE XX"] =
# Process::environment.globals["SCF QUADRUPOLE XY"] =
# Process::environment.globals["SCF QUADRUPOLE XZ"] =
# Process::environment.globals["SCF QUADRUPOLE YY"] =
# Process::environment.globals["SCF QUADRUPOLE YZ"] =
# Process::environment.globals["SCF QUADRUPOLE ZZ"] =
# Orbitals are always saved, in case an MO guess is requested later
# save_orbitals()
# Shove variables into global space
for k, v in self.variables().items():
core.set_variable(k, v)
# TODO re-enable
self.finalize()
if self.V_potential():
self.V_potential().clear_collocation_cache()
core.print_out("\nComputation Completed\n")
return energy
def scf_print_energies(self):
enuc = self.get_energies('Nuclear')
e1 = self.get_energies('One-Electron')
e2 = self.get_energies('Two-Electron')
exc = self.get_energies('XC')
ed = self.get_energies('-D')
self.del_variable('-D Energy')
evv10 = self.get_energies('VV10')
eefp = self.get_energies('EFP')
epcm = self.get_energies('PCM Polarization')
epe = self.get_energies('PE Energy')
hf_energy = enuc + e1 + e2
dft_energy = hf_energy + exc + ed + evv10
total_energy = dft_energy + eefp + epcm + epe
core.print_out(" => Energetics <=\n\n")
core.print_out(" Nuclear Repulsion Energy = {:24.16f}\n".format(enuc))
core.print_out(" One-Electron Energy = {:24.16f}\n".format(e1))
core.print_out(" Two-Electron Energy = {:24.16f}\n".format(e2))
if self.functional().needs_xc():
core.print_out(" DFT Exchange-Correlation Energy = {:24.16f}\n".format(exc))
core.print_out(" Empirical Dispersion Energy = {:24.16f}\n".format(ed))
core.print_out(" VV10 Nonlocal Energy = {:24.16f}\n".format(evv10))
if core.get_option('SCF', 'PCM'):
core.print_out(" PCM Polarization Energy = {:24.16f}\n".format(epcm))
if core.get_option('SCF', 'PE'):
core.print_out(" PE Energy = {:24.16f}\n".format(epe))
if hasattr(self.molecule(), 'EFP'):
core.print_out(" EFP Energy = {:24.16f}\n".format(eefp))
core.print_out(" Total Energy = {:24.16f}\n".format(total_energy))
if core.get_option('SCF', 'PE'):
core.print_out(self.pe_state.cppe_state.summary_string)
self.set_variable('NUCLEAR REPULSION ENERGY', enuc)
self.set_variable('ONE-ELECTRON ENERGY', e1)
self.set_variable('TWO-ELECTRON ENERGY', e2)
if self.functional().needs_xc():
self.set_variable('DFT XC ENERGY', exc)
self.set_variable('DFT VV10 ENERGY', evv10)
self.set_variable('DFT FUNCTIONAL TOTAL ENERGY', hf_energy + exc + evv10)
#self.set_variable(self.functional().name() + ' FUNCTIONAL TOTAL ENERGY', hf_energy + exc + evv10)
self.set_variable('DFT TOTAL ENERGY', dft_energy) # overwritten later for DH
else:
self.set_variable('HF TOTAL ENERGY', hf_energy)
if hasattr(self, "_disp_functor"):
self.set_variable('DISPERSION CORRECTION ENERGY', ed)
#if abs(ed) > 1.0e-14:
# for pv, pvv in self.variables().items():
# if abs(pvv - ed) < 1.0e-14:
# if pv.endswith('DISPERSION CORRECTION ENERGY') and pv.startswith(self.functional().name()):
# fctl_plus_disp_name = pv.split()[0]
# self.set_variable(fctl_plus_disp_name + ' TOTAL ENERGY', dft_energy) # overwritten later for DH
#else:
# self.set_variable(self.functional().name() + ' TOTAL ENERGY', dft_energy) # overwritten later for DH
self.set_variable('SCF ITERATIONS', self.iteration_)
def scf_print_preiterations(self,small=False):
# small version does not print Nalpha,Nbeta,Ndocc,Nsocc, e.g. for SAD guess where they are not
# available
ct = self.molecule().point_group().char_table()
if not small:
core.print_out(" -------------------------------------------------------\n")
core.print_out(" Irrep Nso Nmo Nalpha Nbeta Ndocc Nsocc\n")
core.print_out(" -------------------------------------------------------\n")
for h in range(self.nirrep()):
core.print_out(
f" {ct.gamma(h).symbol():<3s} {self.nsopi()[h]:6d} {self.nmopi()[h]:6d} {self.nalphapi()[h]:6d} {self.nbetapi()[h]:6d} {self.doccpi()[h]:6d} {self.soccpi()[h]:6d}\n"
)
core.print_out(" -------------------------------------------------------\n")
core.print_out(
f" Total {self.nso():6d} {self.nmo():6d} {self.nalpha():6d} {self.nbeta():6d} {self.nbeta():6d} {self.nalpha() - self.nbeta():6d}\n"
)
core.print_out(" -------------------------------------------------------\n\n")
else:
core.print_out(" -------------------------\n")
core.print_out(" Irrep Nso Nmo \n")
core.print_out(" -------------------------\n")
for h in range(self.nirrep()):
core.print_out(
f" {ct.gamma(h).symbol():<3s} {self.nsopi()[h]:6d} {self.nmopi()[h]:6d} \n"
)
core.print_out(" -------------------------\n")
core.print_out(
f" Total {self.nso():6d} {self.nmo():6d}\n"
)
core.print_out(" -------------------------\n\n")
# Bind functions to core.HF class
core.HF.initialize = scf_initialize
core.HF.initialize_jk = initialize_jk
core.HF.iterations = scf_iterate
core.HF.compute_energy = scf_compute_energy
core.HF.finalize_energy = scf_finalize_energy
core.HF.print_energies = scf_print_energies
core.HF.print_preiterations = scf_print_preiterations
def _converged(e_delta, d_rms, e_conv=None, d_conv=None):
if e_conv is None:
e_conv = core.get_option("SCF", "E_CONVERGENCE")
if d_conv is None:
d_conv = core.get_option("SCF", "D_CONVERGENCE")
return (abs(e_delta) < e_conv and d_rms < d_conv)
def _validate_damping():
"""Sanity-checks DAMPING control options
Raises
------
ValidationError
If any of |scf__damping_percentage|, |scf__damping_convergence|
don't play well together.
Returns
-------
bool
Whether DAMPING is enabled during scf.
"""
# Q: I changed the enabled criterion get_option <-- has_option_changed
enabled = (core.get_option('SCF', 'DAMPING_PERCENTAGE') > 0.0)
if enabled:
parameter = core.get_option('SCF', "DAMPING_PERCENTAGE")
if parameter < 0.0 or parameter > 100.0:
raise ValidationError('SCF DAMPING_PERCENTAGE ({}) must be between 0 and 100'.format(parameter))
stop = core.get_option('SCF', 'DAMPING_CONVERGENCE')
if stop < 0.0:
raise ValidationError('SCF DAMPING_CONVERGENCE ({}) must be > 0'.format(stop))
return enabled
def _validate_diis():
"""Sanity-checks DIIS control options
Raises
------
ValidationError
If any of |scf__diis|, |scf__diis_start|,
|scf__diis_min_vecs|, |scf__diis_max_vecs| don't play well together.
Returns
-------
bool
Whether DIIS is enabled during scf.
"""
enabled = bool(core.get_option('SCF', 'DIIS'))
if enabled:
start = core.get_option('SCF', 'DIIS_START')
if start < 1:
raise ValidationError('SCF DIIS_START ({}) must be at least 1'.format(start))
minvecs = core.get_option('SCF', 'DIIS_MIN_VECS')
if minvecs < 1:
raise ValidationError('SCF DIIS_MIN_VECS ({}) must be at least 1'.format(minvecs))
maxvecs = core.get_option('SCF', 'DIIS_MAX_VECS')
if maxvecs < minvecs:
raise ValidationError('SCF DIIS_MAX_VECS ({}) must be at least DIIS_MIN_VECS ({})'.format(
maxvecs, minvecs))
return enabled
def _validate_frac():
"""Sanity-checks FRAC control options
Raises
------
ValidationError
If any of |scf__frac_start| don't play well together.
Returns
-------
bool
Whether FRAC is enabled during scf.
"""
enabled = (core.get_option('SCF', 'FRAC_START') != 0)
if enabled:
if enabled < 0:
raise ValidationError('SCF FRAC_START ({}) must be at least 1'.format(enabled))
return enabled
def _validate_MOM():
"""Sanity-checks MOM control options
Raises
------
ValidationError
If any of |scf__mom_start|, |scf__mom_occ| don't play well together.
Returns
-------
bool
Whether excited-state MOM (not just the plain stabilizing MOM) is enabled during scf.
"""
enabled = (core.get_option('SCF', "MOM_START") != 0 and len(core.get_option('SCF', "MOM_OCC")) > 0)
if enabled:
start = core.get_option('SCF', "MOM_START")
if enabled < 0:
raise ValidationError('SCF MOM_START ({}) must be at least 1'.format(start))
return enabled
def _validate_soscf():
"""Sanity-checks SOSCF control options
Raises
------
ValidationError
If any of |scf__soscf|, |scf__soscf_start_convergence|,
|scf__soscf_min_iter|, |scf__soscf_max_iter| don't play well together.
Returns
-------
bool
Whether SOSCF is enabled during scf.
"""
enabled = core.get_option('SCF', 'SOSCF')
if enabled:
start = core.get_option('SCF', 'SOSCF_START_CONVERGENCE')
if start < 0.0:
raise ValidationError('SCF SOSCF_START_CONVERGENCE ({}) must be positive'.format(start))
miniter = core.get_option('SCF', 'SOSCF_MIN_ITER')
if miniter < 1:
raise ValidationError('SCF SOSCF_MIN_ITER ({}) must be at least 1'.format(miniter))
maxiter = core.get_option('SCF', 'SOSCF_MAX_ITER')
if maxiter < miniter:
raise ValidationError('SCF SOSCF_MAX_ITER ({}) must be at least SOSCF_MIN_ITER ({})'.format(
maxiter, miniter))
conv = core.get_option('SCF', 'SOSCF_CONV')
if conv < 1.e-10:
raise ValidationError('SCF SOSCF_CONV ({}) must be achievable'.format(conv))
return enabled
def field_fn(xyz):
"""Callback function for PylibEFP to compute electric field from electrons
in ab initio part for libefp polarization calculation.
Parameters
----------
xyz : list
(3 * npt, ) flat array of points at which to compute electric field
Returns
-------
list
(3 * npt, ) flat array of electric field at points in `xyz`.
Notes
-----
Function signature defined by libefp, so function uses number of
basis functions and integrals factory `mints_psi4_yo` and total density
matrix `efp_Dt_psi4_yo` from global namespace.
"""
points = np.array(xyz).reshape(-1, 3)
npt = len(points)
# Cartesian basis one-electron EFP perturbation
nbf = mints_psi4_yo.basisset().nbf()
# Electric field at points
field = np.zeros((npt, 3))
for ipt in range(npt):
# get electric field integrals from Psi4
p4_field_ints = mints_psi4_yo.electric_field(origin=points[ipt])
field[ipt] = [
np.vdot(efp_Dt_psi4_yo, np.asarray(p4_field_ints[0])), # Ex
np.vdot(efp_Dt_psi4_yo, np.asarray(p4_field_ints[1])), # Ey
np.vdot(efp_Dt_psi4_yo, np.asarray(p4_field_ints[2])) # Ez
]
field = np.reshape(field, 3 * npt)
return field
|
dgasmith/psi4
|
psi4/driver/procrouting/scf_proc/scf_iterator.py
|
Python
|
lgpl-3.0
| 36,666
|
[
"Psi4"
] |
10e41ca001be2b637fdc2d1d9fe2248073d6ef7f2188926f8b76ffd095f56d6b
|
"""
Upload class
"""
import os
import logging
import galaxy.util
from galaxy import web
from galaxy.tools import DefaultToolState
from galaxy.tools import DataSourceTool
from galaxy.tools.actions import upload_common
from galaxy.tools.parameters import params_to_incoming
from galaxy.tools.parameters import visit_input_values
from galaxy.tools.parameters.basic import DataToolParameter
from galaxy.tools.parameters.basic import DataCollectionToolParameter
from galaxy.tools.parameters.basic import UnvalidatedValue
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import is_hashable
from galaxy.web import error
from galaxy.web import url_for
from galaxy.web.base.controller import BaseUIController
import tool_shed.util.shed_util_common as suc
log = logging.getLogger( __name__ )
class AddFrameData:
def __init__( self ):
self.wiki_url = None
self.debug = None
self.from_noframe = None
class ToolRunner( BaseUIController ):
#Hack to get biomart to work, ideally, we could pass tool_id to biomart and receive it back
@web.expose
def biomart(self, trans, tool_id='biomart', **kwd):
"""Catches the tool id and redirects as needed"""
return self.index(trans, tool_id=tool_id, **kwd)
#test to get hapmap to work, ideally, we could pass tool_id to hapmap biomart and receive it back
@web.expose
def hapmapmart(self, trans, tool_id='hapmapmart', **kwd):
"""Catches the tool id and redirects as needed"""
return self.index(trans, tool_id=tool_id, **kwd)
@web.expose
def default(self, trans, tool_id=None, **kwd):
"""Catches the tool id and redirects as needed"""
return self.index(trans, tool_id=tool_id, **kwd)
def __get_tool_components( self, tool_id, tool_version=None, get_loaded_tools_by_lineage=False, set_selected=False ):
return self.get_toolbox().get_tool_components( tool_id, tool_version, get_loaded_tools_by_lineage, set_selected )
@web.expose
def index( self, trans, tool_id=None, from_noframe=None, **kwd ):
# No tool id passed, redirect to main page
if tool_id is None:
return trans.response.send_redirect( url_for( controller="root", action="welcome" ) )
# When the tool form is initially loaded, the received kwd will not include a 'refresh'
# entry (which only is included when another option is selected in the tool_version_select_field),
# so the default selected option should be the most recent version of the tool. The following
# check will mae sure this occurs.
refreshed_on_change = kwd.get( 'refresh', False )
tool_version_select_field, tools, tool = self.__get_tool_components( tool_id,
tool_version=None,
get_loaded_tools_by_lineage=False,
set_selected=refreshed_on_change )
# No tool matching the tool id, display an error (shouldn't happen)
if not tool:
log.error( "index called with tool id '%s' but no such tool exists", tool_id )
trans.log_event( "Tool id '%s' does not exist" % tool_id )
trans.response.status = 404
return "Tool '%s' does not exist, kwd=%s " % ( tool_id, kwd )
if tool.require_login and not trans.user:
message = "You must be logged in to use this tool."
status = "info"
redirect = url_for( controller='tool_runner', action='index', tool_id=tool_id, **kwd )
return trans.response.send_redirect( url_for( controller='user',
action='login',
cntrller='user',
message=message,
status=status,
redirect=redirect ) )
params = galaxy.util.Params( kwd, sanitize = False ) #Sanitize parameters when substituting into command line via input wrappers
#do param translation here, used by datasource tools
if tool.input_translator:
tool.input_translator.translate( params )
# We may be visiting Galaxy for the first time ( e.g., sending data from UCSC ),
# so make sure to create a new history if we've never had one before.
history = tool.get_default_history_by_trans( trans, create=True )
template, vars = tool.handle_input( trans, params.__dict__ )
if len( params ) > 0:
trans.log_event( "Tool params: %s" % ( str( params ) ), tool_id=tool_id )
add_frame = AddFrameData()
add_frame.debug = trans.debug
if from_noframe is not None:
add_frame.wiki_url = trans.app.config.wiki_url
add_frame.from_noframe = True
return trans.fill_template( template,
history=history,
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
util=galaxy.util,
add_frame=add_frame,
form_input_auto_focus=True,
**vars )
@web.expose
def rerun( self, trans, id=None, from_noframe=None, job_id=None, **kwd ):
"""
Given a HistoryDatasetAssociation id, find the job and that created
the dataset, extract the parameters, and display the appropriate tool
form with parameters already filled in.
"""
if job_id:
try:
job_id = trans.security.decode_id( job_id )
job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
except:
error( "Invalid value for 'job_id' parameter" )
if not trans.user_is_admin():
for data_assoc in job.output_datasets:
#only allow rerunning if user is allowed access to the dataset.
if not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), data_assoc.dataset.dataset ):
error( "You are not allowed to rerun this job" )
param_error_text = "Failed to get parameters for job id %d " % job_id
else:
if not id:
error( "'id' parameter is required" );
try:
id = int( id )
except:
# it's not an un-encoded id, try to parse as encoded
try:
id = trans.security.decode_id( id )
except:
error( "Invalid value for 'id' parameter" )
# Get the dataset object
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( id )
#only allow rerunning if user is allowed access to the dataset.
if not ( trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), data.dataset ) ):
error( "You are not allowed to access this dataset" )
# Get the associated job, if any.
job = data.creating_job
if not job:
raise Exception("Failed to get job information for dataset hid %d" % data.hid)
param_error_text = "Failed to get parameters for dataset id %d " % data.id
# Get the tool object
tool_id = job.tool_id
tool_version = job.tool_version
try:
tool_version_select_field, tools, tool = self.__get_tool_components( tool_id,
tool_version=tool_version,
get_loaded_tools_by_lineage=False,
set_selected=True )
if ( tool.id == job.tool_id or tool.old_id == job.tool_id ) and tool.version == job.tool_version:
tool_id_version_message = ''
elif tool.id == job.tool_id:
if job.tool_version == None:
# For some reason jobs don't always keep track of the tool version.
tool_id_version_message = ''
else:
tool_id_version_message = 'This job was initially run with tool version "%s", which is not currently available. ' % job.tool_version
if len( tools ) > 1:
tool_id_version_message += 'You can rerun the job with the selected tool or choose another derivation of the tool.'
else:
tool_id_version_message += 'You can rerun the job with this tool version, which is a derivation of the original tool.'
else:
if len( tools ) > 1:
tool_id_version_message = 'This job was initially run with tool version "%s", which is not currently available. ' % job.tool_version
tool_id_version_message += 'You can rerun the job with the selected tool or choose another derivation of the tool.'
else:
tool_id_version_message = 'This job was initially run with tool id "%s", version "%s", which is not ' % ( job.tool_id, job.tool_version )
tool_id_version_message += 'currently available. You can rerun the job with this tool, which is a derivation of the original tool.'
assert tool is not None, 'Requested tool has not been loaded.'
except:
# This is expected so not an exception.
tool_id_version_message = ''
error( "This dataset was created by an obsolete tool (%s). Can't re-run." % tool_id )
# Can't rerun upload, external data sources, et cetera. Workflow compatible will proxy this for now
if not tool.is_workflow_compatible:
error( "The '%s' tool does not currently support rerunning." % tool.name )
# Get the job's parameters
try:
params_objects = job.get_param_values( trans.app, ignore_errors = True )
except:
raise Exception( param_error_text )
upgrade_messages = tool.check_and_update_param_values( params_objects, trans, update_values=False )
# Need to remap dataset parameters. Job parameters point to original
# dataset used; parameter should be the analygous dataset in the
# current history.
history = trans.get_history()
hda_source_dict = {} # Mapping from HDA in history to source HDAs.
for hda in history.datasets:
source_hda = hda.copied_from_history_dataset_association
while source_hda:#should this check library datasets as well?
#FIXME: could be multiple copies of a hda in a single history, this does a better job of matching on cloned histories,
#but is still less than perfect when eg individual datasets are copied between histories
if source_hda not in hda_source_dict or source_hda.hid == hda.hid:
hda_source_dict[ source_hda ] = hda
source_hda = source_hda.copied_from_history_dataset_association
# Ditto for dataset collections.
hdca_source_dict = {}
for hdca in history.dataset_collections:
source_hdca = hdca.copied_from_history_dataset_collection_association
while source_hdca:
if source_hdca not in hdca_source_dict or source_hdca.hid == hdca.hid:
hdca_source_dict[ source_hdca ] = hdca
source_hdca = source_hdca.copied_from_history_dataset_collection_association
# Unpack unvalidated values to strings, they'll be validated when the
# form is submitted (this happens when re-running a job that was
# initially run by a workflow)
#This needs to be done recursively through grouping parameters
def rerun_callback( input, value, prefixed_name, prefixed_label ):
if isinstance( value, UnvalidatedValue ):
try:
return input.to_html_value( value.value, trans.app )
except Exception, e:
# Need to determine when (if ever) the to_html_value call could fail.
log.debug( "Failed to use input.to_html_value to determine value of unvalidated parameter, defaulting to string: %s" % ( e ) )
return str( value )
if isinstance( input, DataToolParameter ):
if isinstance(value,list):
values = []
for val in value:
if is_hashable( val ):
if val in history.datasets:
values.append( val )
elif val in hda_source_dict:
values.append( hda_source_dict[ val ])
return values
if is_hashable( value ) and value not in history.datasets and value in hda_source_dict:
return hda_source_dict[ value ]
elif isinstance( input, DataCollectionToolParameter ):
if is_hashable( value ) and value not in history.dataset_collections and value in hdca_source_dict:
return hdca_source_dict[ value ]
visit_input_values( tool.inputs, params_objects, rerun_callback )
# Create a fake tool_state for the tool, with the parameters values
state = tool.new_state( trans )
state.inputs = params_objects
# If the job failed and has dependencies, allow dependency remap
if job.state == job.states.ERROR:
try:
if [ hda.dependent_jobs for hda in [ jtod.dataset for jtod in job.output_datasets ] if hda.dependent_jobs ]:
state.rerun_remap_job_id = trans.app.security.encode_id(job.id)
except:
# Job has no outputs?
pass
#create an incoming object from the original job's dataset-modified param objects
incoming = {}
params_to_incoming( incoming, tool.inputs, params_objects, trans.app )
incoming[ "tool_state" ] = galaxy.util.object_to_string( state.encode( tool, trans.app ) )
template, vars = tool.handle_input( trans, incoming, old_errors=upgrade_messages ) #update new state with old parameters
# Is the "add frame" stuff neccesary here?
add_frame = AddFrameData()
add_frame.debug = trans.debug
if from_noframe is not None:
add_frame.wiki_url = trans.app.config.wiki_url
add_frame.from_noframe = True
return trans.fill_template( template,
history=history,
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
util=galaxy.util,
add_frame=add_frame,
tool_id_version_message=tool_id_version_message,
**vars )
@web.expose
def data_source_redirect( self, trans, tool_id=None ):
"""
Redirects a user accessing a Data Source tool to its target action link.
This method will subvert mix-mode content blocking in several browsers when
accessing non-https data_source tools from an https galaxy server.
Tested as working on Safari 7.0 and FireFox 26
Subverting did not work on Chrome 31
"""
if tool_id is None:
return trans.response.send_redirect( url_for( controller="root", action="welcome" ) )
tool_version_select_field, tools, tool = self.__get_tool_components( tool_id,
tool_version=None,
get_loaded_tools_by_lineage=False,
set_selected=False )
# No tool matching the tool id, display an error (shouldn't happen)
if not tool:
log.error( "data_source_redirect called with tool id '%s' but no such tool exists", tool_id )
trans.log_event( "Tool id '%s' does not exist" % tool_id )
trans.response.status = 404
return "Tool '%s' does not exist, kwd=%s " % ( tool_id, kwd )
if isinstance( tool, DataSourceTool ):
link = url_for( tool.action, **tool.get_static_param_values( trans ) )
else:
link = url_for( controller='tool_runner', tool_id=tool.id )
return trans.response.send_redirect( link )
@web.expose
def redirect( self, trans, redirect_url=None, **kwd ):
if not redirect_url:
return trans.show_error_message( "Required URL for redirection missing" )
trans.log_event( "Redirecting to: %s" % redirect_url )
return trans.fill_template( 'root/redirect.mako', redirect_url=redirect_url )
@web.json
def upload_async_create( self, trans, tool_id=None, **kwd ):
"""
Precreate datasets for asynchronous uploading.
"""
cntrller = kwd.get( 'cntrller', '' )
roles = kwd.get( 'roles', False )
if roles:
# The user associated the DATASET_ACCESS permission on the uploaded datasets with 1 or more roles.
# We need to ensure that the roles are legitimately derived from the roles associated with the LIBRARY_ACCESS
# permission if the library is not public ( this should always be the case since any ill-legitimate roles
# were filtered out of the roles displayed on the upload form. In addition, we need to ensure that the user
# did not associated roles that would make the dataset in-accessible by everyone.
library_id = trans.app.security.decode_id( kwd.get( 'library_id', '' ) )
vars = dict( DATASET_ACCESS_in=roles )
permissions, in_roles, error, msg = trans.app.security_agent.derive_roles_from_access( trans, library_id, cntrller, library=True, **vars )
if error:
return [ 'error', msg ]
def create_dataset( name ):
ud = Bunch( name=name, file_type=None, dbkey=None )
if nonfile_params.get( 'folder_id', False ):
replace_id = nonfile_params.get( 'replace_id', None )
if replace_id not in [ None, 'None' ]:
replace_dataset = trans.sa_session.query( trans.app.model.LibraryDataset ).get( trans.security.decode_id( replace_id ) )
else:
replace_dataset = None
# FIXME: instead of passing params here ( chiech have been process by util.Params(), the original kwd
# should be passed so that complex objects that may have been included in the initial request remain.
library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
else:
library_bunch = None
return upload_common.new_upload( trans, cntrller, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
tool = self.get_toolbox().get_tool( tool_id )
if not tool:
return False # bad tool_id
nonfile_params = galaxy.util.Params( kwd, sanitize=False )
if kwd.get( 'tool_state', None ) not in ( None, 'None' ):
encoded_state = galaxy.util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
tool_state = tool.new_state( trans )
tool.update_state( trans, tool.inputs, tool_state.inputs, kwd, update_only = True )
datasets = []
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
for dataset_upload_input in dataset_upload_inputs:
d_type = dataset_upload_input.get_datatype( trans, kwd )
if d_type.composite_type is not None:
datasets.append( create_dataset( dataset_upload_input.get_composite_dataset_name( kwd ) ) )
else:
params = Bunch( ** tool_state.inputs[dataset_upload_input.name][0] )
if params.file_data not in [ None, "" ]:
name = params.file_data
if name.count('/'):
name = name.rsplit('/',1)[1]
if name.count('\\'):
name = name.rsplit('\\',1)[1]
datasets.append( create_dataset( name ) )
if params.url_paste not in [ None, "" ]:
url_paste = params.url_paste.replace( '\r', '' ).split( '\n' )
url = False
for line in url_paste:
line = line.rstrip( '\r\n' ).strip()
if not line:
continue
elif line.lower().startswith( 'http://' ) or line.lower().startswith( 'ftp://' ) or line.lower().startswith( 'https://' ):
url = True
datasets.append( create_dataset( line ) )
else:
if url:
continue # non-url when we've already processed some urls
else:
# pasted data
datasets.append( create_dataset( 'Pasted Entry' ) )
break
return [ d.id for d in datasets ]
@web.expose
def upload_async_message( self, trans, **kwd ):
# might be more appropriate in a different controller
msg = """<p>Your upload has been queued. History entries that are still uploading will be blue, and turn green upon completion.</p>
<p><b>Please do not use your browser\'s "stop" or "reload" buttons until the upload is complete, or it may be interrupted.</b></p>
<p>You may safely continue to use Galaxy while the upload is in progress. Using "stop" and "reload" on pages other than Galaxy is also safe.</p>
"""
#return trans.show_message( msg, refresh_frames=[ 'history' ] )
return trans.show_message( msg )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
|
Python
|
gpl-3.0
| 23,144
|
[
"Galaxy"
] |
3c99175881226a7b06d27f823fa66eca361145ecbaf71568e6b3e23a99342897
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
_map,rev_map=get_one_hot_map(word_list,def_list,n)
if num_samples is None:
num_samples=len(word_list)
# X = (36665, 56210)
mask=None
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# print (np.max(y))
X=word_list
y=def_list
return X, y, mask,rev_map, _map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
return rtn
else:
total_not=0
if not onehot:
rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
if onehot:
binrep=np.zeros(2**binary_dim)
binrep[mapped]=1
else:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l]=binrep
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,n_input],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2 and not onehot2:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l,i+1,:]=binrep
elif form2 and onehot2:
binrep=np.zeros(2**binary_dim)
binrep[mapped]=1
rtn[l,i+1,:]=binrep.astype(np.float32)
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2 and not onehot2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
elif form2 and onehot2:
binrep=np.zeros(2**binary_dim)
binrep[to_app]=1
rtn[l,x+1,:]=binrep.astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print (self.learning_rate)
self.batch_size = batch_size
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
n_words=self.n_words
if onehot2:
n_words=np.log2(n_words).astype(int)
if onehot:
n_words=2**n_words
self.x = tf.placeholder(tf.float32, [None,n_words],name='x_in')
self.intype=type(self.x)
self.global_step=global_step
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.float32, [None, network_architecture["maxlen"],self.n_words],name='caption_placeholder')
print (self.caption_placeholder.shape)
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
# Create autoencoder network
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(max_to_keep=100)
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
def _create_network(self):
# Initialize autoencode network weights and biases
self.debshit=tf.constant(0)
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,n_input])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
embedded_input,embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[-1,self.network_architecture['n_input']]),logit=True)
embedded_input=tf.reshape(embedded_input,[-1,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
if not vanilla:
embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
cell=tf.contrib.rnn.MultiRNNCell([cell]*3)
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
# to_cat2=tf.expand_dims(seqlen-3,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
# gather_inds2=tf.concat([ixs,to_cat2],axis=-1)
outs=tf.gather_nd(encoder_outs,gather_inds)
# outs2=tf.gather_nd(encoder_outs,gather_inds2)
# self.debshit=tf.gather_nd(self.caption_placeholder[:,1:,:],gather_inds)[:20]
# self.debshit=(outs[:20])#,outs2[:20])
print (outs.shape)
input_embedding,input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
print (input_embedding.shape)
# print embedded_input_KLD_loss.shape,self.mask[:,1:].shape
loss = tf.reduce_sum(input_embedding_KLD_loss)/float(self.batch_size)
self.l1=loss
loss+=tf.reduce_sum(embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])
self.l2=loss
# with tf.variable_scope("RNN"):
# for i in range(self.network_architecture['maxlen']):
# if i > 0:
# # current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
# if form2:
# current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1,:],logit=True)
# else:
# current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
# loss+=KLD_loss
# else:
# current_embedding = input_embedding
# if i > 0:
# tf.get_variable_scope().reuse_variables()
# out, state = self.lstm(current_embedding, state)
# if i > 0:
# # if not form2:
# # labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
# # ix_range=tf.range(0, self.batch_size, 1)
# # ixs = tf.expand_dims(ix_range, 1)
# # concat = tf.concat([ixs, labels],1)
# # onehot = tf.sparse_to_dense(
# # concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
# # else:
# # onehot=self.caption_placeholder[:,i,:]
logit = tf.matmul(input_embedding, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if form2 and not onehot:
# best_word=tf.nn.softmax(logit)
# best_word=tf.round(best_word)
# all_the_f_one_h.append(best_word)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=self.x)
print (logit.shape)
# self.debshit=(logit[:20])
# self.debshit=[tf.nn.sigmoid(logit)[:20],self.x[:20]]
xentropy1=xentropy
xentropy=tf.reduce_sum(xentropy,reduction_indices=-1)
# self.debshit=xentropy[:15]
self.debshit=[tf.concat([tf.cast(xentropy1[:15]*10,),tf.expand_dims(xentropy[:15],-1)],axis=-1)]
else:
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=self.x)
self.debshit=xentropy[:15]
self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
loss += (self.exp_loss)
# loss += tf.log(self.exp_loss)
loss = loss
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias')}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_input, n_z),name='out_log_sigma')}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean')}
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_z_m_2,10),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_z_m_2),name='mid_affine_bias')}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_z_m_2,10),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_z_m_2),name='mid_affine_bias')}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
encode_dim=n_input
if onehot2:
encode_dim=np.log2(encode_dim).astype(int)
if onehot:
encode_dim=2**encode_dim
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_z_m_2,encode_dim),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(encode_dim),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
z=tf.nn.relu(z)
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
embedding=tf.nn.relu(embedding)
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
# if not onehot2:
# x=tf.log(tf.maximum(1e-8,x))
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
# if not onehot2:
# z=tf.exp(z)
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
# self.debshit=embedding
# embedding=tf.exp(embedding)
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print (logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape)
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if mid_vae:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print (logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape)
return z,KLD
def _create_loss_optimizer(self):
if self.global_step is None:
self.global_step=tf.Variable(0,trainable=False)
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), 1.0)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars),global_step=self.global_step)
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss,global_step=self.global_step)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print (tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask}))
exit()
else:
opt, cost,shit,l1,l2,exp_loss = self.sess.run((self.optimizer, self.loss,self.debshit,self.l1,self.l2,self.exp_loss),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask})
if testify:
print (shit,l1,l2)
# print (shit)
return cost,exp_loss
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,n_input])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['input_meaning'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print (input_embedding.shape)
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print (state,output.shape)
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print (previous_word.shape)
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print (i)
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word=tf.nn.sigmoid(logit)
best_word=tf.round(best_word)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print (logit.shape)
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print (previous_word.shape)
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print (f_it)
print (generated_word_index)
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
int(all_samps/batch_size), 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[[],[]]
indlist=np.arange(all_samps).astype(int)
import random
for epoch in range(training_epochs):
avg_cost = 0.
avg_log_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
np.random.shuffle(indlist)
random.shuffle(X)
testify=False
for i in range(total_batch):
batch_xs = map_one_hot(X[i*batch_size:(i+1)*batch_size],forward_map,1,n_input)
batch_ys,mask=map_one_hot(y[i*batch_size:(i+1)*batch_size],forward_map,network_architecture['maxlen']-2,n_input)
batch_ys=batch_ys.astype(np.uint32)
# Fit training using batch data
if epoch==21 and i ==0:
testify=True
else:
testify=False
cost,exp_loss = vae.partial_fit(batch_xs,batch_ys,mask,testify=testify)
if i%display_step:
print (cost,exp_loss)
# if i==45:
# exit()
# Compute average loss
avg_cost += np.sum(exp_loss) / n_samples * batch_size
avg_log_cost += np.sum(cost) / n_samples * batch_size
costs[0].append(avg_cost)
costs[1].append(avg_log_cost)
# Display logs per epoch step
if epoch % display_step == 0 or epoch==1:
if should_save:
print ('saving')
vae.saver.save(vae.sess, model_path+'model')
pkl.dump(costs,open('100_256_45000_allwords_results.pkl','wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
form2=True
vanilla=True
mid_vae=False
onehot=True
onehot2=False
same_embedding=False
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
should_decay=True
zero_end_tok=True
training_epochs=10000
batch_size=500
binary_dim=16
all_the_f_one_h=[]
maxlen=32
if not zero_end_tok:
X, y, mask, _map,forward_map = load_text(2**binary_dim-4)
else:
X, y, mask, _map,forward_map = load_text(2**binary_dim-3)
n_input =binary_dim
if onehot2:
n_input=2**n_input
n_samples = 30000
lstm_dim=512
model_path = './modelstemp/'
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=1024, # dimensionality of latent space
n_z_m=512,
n_z_m_2=256
)
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
np.random.shuffle(ind_list)
x_sample = X[ind_list[:1000]]
print (x_sample)
y_sample = y[ind_list[:1000]]
print (y_sample)
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print (y_hat_words)
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
dricciardelli/vae2vec
|
def_VAE.py
|
Python
|
mit
| 28,459
|
[
"Gaussian"
] |
81ba15b5b19b29cc9395d488d68286b7ed8b7f7a8b136d95277271d83951d2a6
|
#!/usr/bin/python
"""Test of line navigation output of Firefox on the Orca wiki."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>End"))
sequence.append(utils.AssertPresentationAction(
"1. Bottom of file",
["BRAILLE LINE: 'Hosted by Red Hat.'",
" VISIBLE: 'Hosted by Red Hat.', cursor=0",
"BRAILLE LINE: 'The GNOME Project'",
" VISIBLE: 'The GNOME Project', cursor=1",
"SPEECH OUTPUT: 'Hosted by'",
"SPEECH OUTPUT: 'Red Hat'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'The GNOME Project link.'"]))
#sequence.append(utils.StartRecordingAction())
#sequence.append(KeyComboAction("Up"))
#sequence.append(utils.AssertPresentationAction(
# "2. Line Up",
# ["BRAILLE LINE: 'Copyright \xa9 2005, 2006, 2007 The GNOME Project.'",
# " VISIBLE: 'Copyright \xa9 2005, 2006, 2007 The', cursor=1",
# "SPEECH OUTPUT: 'Copyright \xa9 2005, 2006, 2007'",
# "SPEECH OUTPUT: 'The GNOME Project'",
# "SPEECH OUTPUT: 'link.'",
# "SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"2. Line Up",
["BRAILLE LINE: 'GnomeWorldWide image'",
" VISIBLE: 'GnomeWorldWide image', cursor=1",
"SPEECH OUTPUT: 'GnomeWorldWide image link'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"3. Line Up",
["BRAILLE LINE: 'Wide h3'",
" VISIBLE: 'Wide h3', cursor=1",
"SPEECH OUTPUT: 'Wide heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"4. Line Up",
["BRAILLE LINE: 'GNOME World h3'",
" VISIBLE: 'GNOME World h3', cursor=1",
"SPEECH OUTPUT: 'GNOME World heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"5. Line Up",
["BRAILLE LINE: 'More Actions: combo box'",
" VISIBLE: 'More Actions: combo box', cursor=1",
"SPEECH OUTPUT: 'More Actions: combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Line Up",
["BRAILLE LINE: 'Attachments'",
" VISIBLE: 'Attachments', cursor=1",
"SPEECH OUTPUT: 'Attachments'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"7. Line Up",
["BRAILLE LINE: 'Info'",
" VISIBLE: 'Info', cursor=1",
"SPEECH OUTPUT: 'Info'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"8. Line Up",
["BRAILLE LINE: 'Immutable Page'",
" VISIBLE: 'Immutable Page', cursor=1",
"SPEECH OUTPUT: 'Immutable Page.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"9. Line Up",
["BRAILLE LINE: 'Page h3'",
" VISIBLE: 'Page h3', cursor=1",
"SPEECH OUTPUT: 'Page heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"10. Line Up",
["BRAILLE LINE: 'Login'",
" VISIBLE: 'Login', cursor=1",
"SPEECH OUTPUT: 'Login'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"11. Line Up",
["BRAILLE LINE: 'User h3'",
" VISIBLE: 'User h3', cursor=1",
"SPEECH OUTPUT: 'User heading level 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"12. Line Up",
["BRAILLE LINE: 'Orca (last edited 2007-12-07 22:09:22 by WillieWalker)'",
" VISIBLE: 'Orca (last edited 2007-12-07 22:', cursor=1",
"SPEECH OUTPUT: 'Orca (last edited 2007-12-07 22:09:22 by'",
"SPEECH OUTPUT: 'WillieWalker'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"13. Line Up",
["BRAILLE LINE: 'CategoryAccessibility'",
" VISIBLE: 'CategoryAccessibility', cursor=1",
"SPEECH OUTPUT: 'CategoryAccessibility'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"14. Line Up",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"15. Line Up",
["BRAILLE LINE: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'",
" VISIBLE: 'warranty of MERCHANTABILITY or F', cursor=1",
"SPEECH OUTPUT: 'warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"16. Line Up",
["BRAILLE LINE: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'",
" VISIBLE: 'in the hope that it will be usef', cursor=1",
"SPEECH OUTPUT: 'in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"17. Line Up",
["BRAILLE LINE: 'The information on this page and the other Orca-related pages on this site are distributed'",
" VISIBLE: 'The information on this page and', cursor=1",
"SPEECH OUTPUT: 'The information on this page and the other Orca-related pages on this site are distributed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. Line Up",
["BRAILLE LINE: 'separator'",
" VISIBLE: 'separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"19. Line Up",
["BRAILLE LINE: '• Python Pocket Reference, Mark Lutz'",
" VISIBLE: '• Python Pocket Reference, Mark ', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python Pocket Reference, Mark Lutz'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"20. Line Up",
["BRAILLE LINE: '• Python in a Nutshell, Alex Martelli'",
" VISIBLE: '• Python in a Nutshell, Alex Mar', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Python in a Nutshell, Alex Martelli'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"21. Line Up",
["BRAILLE LINE: '• Dive Into Python, Mark Pilgrim'",
" VISIBLE: '• Dive Into Python, Mark Pilgrim', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Dive Into Python, Mark Pilgrim'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"22. Line Up",
["BRAILLE LINE: '• Design documents: Orca Documentation Series'",
" VISIBLE: '• Design documents: Orca Documen', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Design documents:'",
"SPEECH OUTPUT: 'Orca Documentation Series'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"23. Line Up",
["BRAILLE LINE: '• Bug database: GNOME Bug Tracking System (Bugzilla) (current bug list)'",
" VISIBLE: '• Bug database: GNOME Bug Tracki', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Bug database:'",
"SPEECH OUTPUT: 'GNOME Bug Tracking System (Bugzilla)'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'current bug list'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"24. Line Up",
["BRAILLE LINE: '• Mailing list: orca-list@gnome.org (Archives)'",
" VISIBLE: '• Mailing list: orca-list@gnome.', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Mailing list:'",
"SPEECH OUTPUT: 'orca-list@gnome.org'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ')'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"25. Line Up",
["BRAILLE LINE: '• Frequently Asked Questions: FAQ'",
" VISIBLE: '• Frequently Asked Questions: FA', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Frequently Asked Questions:'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"26. Line Up",
["BRAILLE LINE: 'More Information h1'",
" VISIBLE: 'More Information h1', cursor=1",
"SPEECH OUTPUT: 'More Information heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"27. Line Up",
["BRAILLE LINE: 'information.'",
" VISIBLE: 'information.', cursor=1",
"SPEECH OUTPUT: 'information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"28. Line Up",
["BRAILLE LINE: 'There's a bunch you can do! Please refer to the How Can I Help page for detailed'",
" VISIBLE: 'There's a bunch you can do! Plea', cursor=1",
"SPEECH OUTPUT: 'There's a bunch you can do! Please refer to the'",
"SPEECH OUTPUT: 'How Can I Help page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"29. Line Up",
["BRAILLE LINE: 'How Can I Help? h1'",
" VISIBLE: 'How Can I Help? h1', cursor=1",
"SPEECH OUTPUT: 'How Can I Help? heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"30. Line Up",
["BRAILLE LINE: 'Please also refer to the Accessible Applications page for detailed information.'",
" VISIBLE: 'Please also refer to the Accessi', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"31. Line Up",
["BRAILLE LINE: 'application.'",
" VISIBLE: 'application.', cursor=1",
"SPEECH OUTPUT: 'application.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"32. Line Up",
["BRAILLE LINE: 'See also the Application Specific Settings page for how to configure settings specific to an'",
" VISIBLE: 'See also the Application Specifi', cursor=1",
"SPEECH OUTPUT: 'See also the'",
"SPEECH OUTPUT: 'Application Specific Settings'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'page for how to configure settings specific to an'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"33. Line Up",
["BRAILLE LINE: 'tested.'",
" VISIBLE: 'tested.', cursor=1",
"SPEECH OUTPUT: 'tested.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"34. Line Up",
["BRAILLE LINE: 'a repository within which users can share experiences regarding applications they have'",
" VISIBLE: 'a repository within which users ', cursor=1",
"SPEECH OUTPUT: 'a repository within which users can share experiences regarding applications they have'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"35. Line Up",
["BRAILLE LINE: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide'",
" VISIBLE: 'them. The list is not to be a co', cursor=1",
"SPEECH OUTPUT: 'them. The list is not to be a conclusive list of all applications. Rather, the goal is to provide'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"36. Line Up",
["BRAILLE LINE: 'various applications that can be accessed with Orca as well as tips and tricks for using'",
" VISIBLE: 'various applications that can be', cursor=1",
"SPEECH OUTPUT: 'various applications that can be accessed with Orca as well as tips and tricks for using'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"37. Line Up",
["BRAILLE LINE: 'On the Accessible Applications page, you will find a growing list of information regarding'",
" VISIBLE: 'On the Accessible Applications p', cursor=1",
"SPEECH OUTPUT: 'On the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ', you will find a growing list of information regarding'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"38. Line Up",
["BRAILLE LINE: 'access to more and more applications.'",
" VISIBLE: 'access to more and more applicat', cursor=1",
"SPEECH OUTPUT: 'access to more and more applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"39. Line Up",
["BRAILLE LINE: 'than others, however, and the Orca community continually works to provide compelling'",
" VISIBLE: 'than others, however, and the Or', cursor=1",
"SPEECH OUTPUT: 'than others, however, and the Orca community continually works to provide compelling'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"40. Line Up",
["BRAILLE LINE: 'applications, OpenOffice, Firefox, and the Java platform. Some applications work better'",
" VISIBLE: 'applications, OpenOffice, Firefo', cursor=1",
"SPEECH OUTPUT: 'applications,'",
"SPEECH OUTPUT: 'OpenOffice'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: ', Firefox, and the Java platform. Some applications work better'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"41. Line Up",
["BRAILLE LINE: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'",
" VISIBLE: 'technology service provider inte', cursor=1",
"SPEECH OUTPUT: 'technology service provider interface (AT-SPI). This includes the GNOME desktop and its'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"42. Line Up",
["BRAILLE LINE: 'Orca is designed to work with applications and toolkits that support the assistive'",
" VISIBLE: 'Orca is designed to work with ap', cursor=1",
"SPEECH OUTPUT: 'Orca is designed to work with applications and toolkits that support the assistive'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"43. Line Up",
["BRAILLE LINE: 'Accessible Applications h1'",
" VISIBLE: 'Accessible Applications h1', cursor=1",
"SPEECH OUTPUT: 'Accessible Applications heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"44. Line Up",
["BRAILLE LINE: 'Please also refer to the Configuration/Use page for detailed information.'",
" VISIBLE: 'Please also refer to the Configu', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Configuration/Use page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"45. Line Up",
["BRAILLE LINE: 'of Orca key bindings.'",
" VISIBLE: 'of Orca key bindings.', cursor=1",
"SPEECH OUTPUT: 'of Orca key bindings.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"46. Line Up",
["BRAILLE LINE: 'Configuration GUI also includes a \"Key Bindings\" tab that allows you to get a complete list'",
" VISIBLE: 'Configuration GUI also includes ', cursor=1",
"SPEECH OUTPUT: 'Configuration GUI'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'also includes a \"Key Bindings\" tab that allows you to get a complete list'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"47. Line Up",
["BRAILLE LINE: 'Layout\\) for more information on Orca-specific keyboard commands. The Orca'",
" VISIBLE: 'Layout\\) for more information on ', cursor=1",
"SPEECH OUTPUT: 'Layout\\)'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for more information on Orca-specific keyboard commands. The'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"48. Line Up",
["BRAILLE LINE: 'using flat review mode to examine a window. Refer to Orca Keyboard Commands \\(Laptop'",
" VISIBLE: 'using flat review mode to examin', cursor=1",
"SPEECH OUTPUT: 'using flat review mode to examine a window. Refer to'",
"SPEECH OUTPUT: 'Orca Keyboard Commands'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\(Laptop'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"49. Line Up",
["BRAILLE LINE: 'Configuration GUI (accessed by pressing Insert+Space when Orca is running) and for'",
" VISIBLE: 'Configuration GUI (accessed by p', cursor=1",
"SPEECH OUTPUT: 'Configuration GUI'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '(accessed by pressing Insert+Space when Orca is running) and for'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"50. Line Up",
["BRAILLE LINE: 'You may sometimes wish to control Orca itself, such as bringing up the Orca'",
" VISIBLE: 'You may sometimes wish to contro', cursor=1",
"SPEECH OUTPUT: 'You may sometimes wish to control Orca itself, such as bringing up the'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"51. Line Up",
["BRAILLE LINE: 'desktop applications.'",
" VISIBLE: 'desktop applications.', cursor=1",
"SPEECH OUTPUT: 'desktop applications.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"52. Line Up",
["BRAILLE LINE: 'mechanisms of GNOME. These navigation mechanisms are consistent across most'",
" VISIBLE: 'mechanisms of GNOME. These navig', cursor=1",
"SPEECH OUTPUT: 'mechanisms of GNOME'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '. These navigation mechanisms are consistent across most'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"53. Line Up",
["BRAILLE LINE: 'designed to present information as you navigate the desktop using the built-in navigation'",
" VISIBLE: 'designed to present information ', cursor=1",
"SPEECH OUTPUT: 'designed to present information as you navigate the desktop using the'",
"SPEECH OUTPUT: 'built-in navigation'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"54. Line Up",
["BRAILLE LINE: 'when logged in, waiting for a second or so, then typing orca and pressing return. Orca is'",
" VISIBLE: 'when logged in, waiting for a se', cursor=1",
"SPEECH OUTPUT: 'when logged in, waiting for a second or so, then typing orca and pressing return. Orca is'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"55. Line Up",
["BRAILLE LINE: 'The command to run orca is orca. You can enter this command by pressing Alt+F2'",
" VISIBLE: 'The command to run orca is orca.', cursor=1",
"SPEECH OUTPUT: 'The command to run orca is orca. You can enter this command by pressing Alt+F2'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"56. Line Up",
["BRAILLE LINE: 'Configuration/Use h1'",
" VISIBLE: 'Configuration/Use h1', cursor=1",
"SPEECH OUTPUT: 'Configuration/Use heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"57. Line Up",
["BRAILLE LINE: 'distributions as well as installing Orca directly from source.'",
" VISIBLE: 'distributions as well as install', cursor=1",
"SPEECH OUTPUT: 'distributions as well as installing Orca directly from source.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"58. Line Up",
["BRAILLE LINE: 'Please also refer to the Download/Installation page for detailed information on various'",
" VISIBLE: 'Please also refer to the Downloa', cursor=1",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Download/Installation page'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'for detailed information on various'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"59. Line Up",
["BRAILLE LINE: 'Solaris and Ubuntu.'",
" VISIBLE: 'Solaris and Ubuntu.', cursor=1",
"SPEECH OUTPUT: 'Solaris'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'Ubuntu'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"60. Line Up",
["BRAILLE LINE: 'provided by default on a number of operating system distributions, including Open'",
" VISIBLE: 'provided by default on a number ', cursor=1",
"SPEECH OUTPUT: 'provided by default on a number of operating system distributions, including'",
"SPEECH OUTPUT: 'Open'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"61. Line Up",
["BRAILLE LINE: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'",
" VISIBLE: 'As of GNOME 2.16, Orca is a part', cursor=1",
"SPEECH OUTPUT: 'As of GNOME 2.16, Orca is a part of the GNOME platform. As a result, Orca is already'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"62. Line Up",
["BRAILLE LINE: 'Download/Installation h1'",
" VISIBLE: 'Download/Installation h1', cursor=1",
"SPEECH OUTPUT: 'Download/Installation heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"63. Line Up",
["BRAILLE LINE: '• Guide to installing the latest versions of Firefox and Orca'",
" VISIBLE: '• Guide to installing the latest', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Guide to installing the latest versions of Firefox and Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"64. Line Up",
["BRAILLE LINE: '• Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
" VISIBLE: '• Review of Fedora 7 and the Orc', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"65. Line Up",
["BRAILLE LINE: '• Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
" VISIBLE: '• Walk through of the installati', cursor=1",
"SPEECH OUTPUT: '•.'",
"SPEECH OUTPUT: 'Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"66. Line Up",
["BRAILLE LINE: 'http://www.digitaldarragh.com/linuxat.asp and include the following:'",
" VISIBLE: 'http://www.digitaldarragh.com/li', cursor=1",
"SPEECH OUTPUT: 'http://www.digitaldarragh.com/linuxat.asp'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'and include the following:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"67. Line Up",
["BRAILLE LINE: 'contribution (THANKS!)!!! The audio guides can be found at'",
" VISIBLE: 'contribution (THANKS!)!!! The au', cursor=1",
"SPEECH OUTPUT: 'contribution (THANKS!)!!! The audio guides can be found at'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"68. Line Up",
["BRAILLE LINE: 'Darragh Ó Héiligh has created several audio guides for Orca. This is a fantastic'",
" VISIBLE: 'Darragh Ó Héiligh has created se', cursor=1",
"SPEECH OUTPUT: 'Darragh Ó Héiligh'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'has created several audio guides for Orca. This is a fantastic'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"69. Line Up",
["BRAILLE LINE: 'Audio Guides h1'",
" VISIBLE: 'Audio Guides h1', cursor=1",
"SPEECH OUTPUT: 'Audio Guides heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"70. Line Up",
["BRAILLE LINE: 'productive environment composed of users and developers.'",
" VISIBLE: 'productive environment composed ', cursor=1",
"SPEECH OUTPUT: 'productive environment composed of users and developers.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"71. Line Up",
["BRAILLE LINE: 'Please join and participate on the Orca mailing list (archives): it's a helpful, kind, and'",
" VISIBLE: 'Please join and participate on t', cursor=1",
"SPEECH OUTPUT: 'Please join and participate on the'",
"SPEECH OUTPUT: 'Orca mailing list'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '): it's a helpful, kind, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"72. Line Up",
["BRAILLE LINE: 'we use Bugzilla\\).'",
" VISIBLE: 'we use Bugzilla\\).', cursor=1",
"SPEECH OUTPUT: 'we use Bugzilla'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"73. Line Up",
["BRAILLE LINE: 'problems in other components, is maintained in Bugzilla \\(please see our notes on how'",
" VISIBLE: 'problems in other components, is', cursor=1",
"SPEECH OUTPUT: 'problems in other components, is maintained in'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\(please see our'",
"SPEECH OUTPUT: 'notes on how'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"74. Line Up",
["BRAILLE LINE: 'The complete list of work to do, including bugs and feature requests, along with known'",
" VISIBLE: 'The complete list of work to do,', cursor=1",
"SPEECH OUTPUT: 'The complete list of work to do, including bugs and feature requests, along with known'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"75. Line Up",
["BRAILLE LINE: 'community members.'",
" VISIBLE: 'community members.', cursor=1",
"SPEECH OUTPUT: 'community members'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"76. Line Up",
["BRAILLE LINE: 'Microsystems, Inc. with contributions from many'",
" VISIBLE: 'Microsystems, Inc. with contribu', cursor=1",
"SPEECH OUTPUT: 'Microsystems, Inc.'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'with'",
"SPEECH OUTPUT: 'contributions from many'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"77. Line Up",
["BRAILLE LINE: 'been led by the Accessibility Program Office of Sun'",
" VISIBLE: 'been led by the Accessibility Pr', cursor=1",
"SPEECH OUTPUT: 'been led by the'",
"SPEECH OUTPUT: 'Accessibility Program Office of Sun'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"78. Line Up",
["BRAILLE LINE: '(e.g., the GNOME desktop). The development of Orca has'",
" VISIBLE: '(e.g., the GNOME desktop). The d', cursor=1",
"SPEECH OUTPUT: '(e.g., the GNOME desktop). The development of Orca has'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"79. Line Up",
["BRAILLE LINE: 'access to applications and toolkits that support the AT-SPI'",
" VISIBLE: 'access to applications and toolk', cursor=1",
"SPEECH OUTPUT: 'access to applications and toolkits that support the AT-SPI'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"80. Line Up",
["BRAILLE LINE: 'synthesis, braille, and magnification, Orca helps provide'",
" VISIBLE: 'synthesis, braille, and magnific', cursor=1",
"SPEECH OUTPUT: 'synthesis, braille, and magnification, Orca helps provide'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"81. Line Up",
["BRAILLE LINE: 'impairments. Using various combinations of speech'",
" VISIBLE: 'impairments. Using various combi', cursor=1",
"SPEECH OUTPUT: 'impairments. Using various combinations of speech'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"82. Line Up",
["BRAILLE LINE: 'powerful assistive technology for people with visual'",
" VISIBLE: 'powerful assistive technology fo', cursor=1",
"SPEECH OUTPUT: 'powerful assistive technology for people with visual'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"83. Line Up",
["BRAILLE LINE: 'Orca is a free, open source, flexible, extensible, and'",
" VISIBLE: 'Orca is a free, open source, fle', cursor=1",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"84. Line Up",
["BRAILLE LINE: 'About h1'",
" VISIBLE: 'About h1', cursor=1",
"SPEECH OUTPUT: 'About heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"85. Line Up",
["BRAILLE LINE: '8. More Information'",
" VISIBLE: '8. More Information', cursor=1",
"SPEECH OUTPUT: '8.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"86. Line Up",
["BRAILLE LINE: '7. How Can I Help?'",
" VISIBLE: '7. How Can I Help?', cursor=1",
"SPEECH OUTPUT: '7.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"87. Line Up",
["BRAILLE LINE: '6. Accessible Applications'",
" VISIBLE: '6. Accessible Applications', cursor=1",
"SPEECH OUTPUT: '6.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"88. Line Up",
["BRAILLE LINE: '5. Configuration/Use'",
" VISIBLE: '5. Configuration/Use', cursor=1",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"89. Line Up",
["BRAILLE LINE: '4. Download/Installation'",
" VISIBLE: '4. Download/Installation', cursor=1",
"SPEECH OUTPUT: '4.'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"90. Line Up",
["BRAILLE LINE: '3. Audio Guides'",
" VISIBLE: '3. Audio Guides', cursor=1",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"91. Line Up",
["BRAILLE LINE: '2. About'",
" VISIBLE: '2. About', cursor=1",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"92. Line Up",
["BRAILLE LINE: '1. Welcome to Orca!'",
" VISIBLE: '1. Welcome to Orca!', cursor=1",
"SPEECH OUTPUT: '1.'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"93. Line Up",
["BRAILLE LINE: 'Contents'",
" VISIBLE: 'Contents', cursor=1",
"SPEECH OUTPUT: 'Contents'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"94. Line Up",
["BRAILLE LINE: 'HOT HOT HOT: Notes on access to Firefox 3.0'",
" VISIBLE: 'HOT HOT HOT: Notes on access to ', cursor=1",
"SPEECH OUTPUT: 'HOT HOT HOT: Notes on'",
"SPEECH OUTPUT: 'access to Firefox 3.0'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"95. Line Up",
["BRAILLE LINE: 'Orca Logo'",
" VISIBLE: 'Orca Logo', cursor=1",
"SPEECH OUTPUT: 'Orca Logo link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"96. Line Up",
["BRAILLE LINE: 'Welcome to Orca! h1'",
" VISIBLE: 'Welcome to Orca! h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to Orca! heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"97. Line Up",
["BRAILLE LINE: 'Archives\\) | FAQ | DocIndex'",
" VISIBLE: 'Archives\\) | FAQ | DocIndex', cursor=1",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '\\) |'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'DocIndex'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"98. Line Up",
["BRAILLE LINE: 'Home | Download/Installation | Configuration/Use | Accessible Applications | Mailing List \\('",
" VISIBLE: 'Home | Download/Installation | C', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Mailing List'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: '('"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"99. Line Up",
["BRAILLE LINE: 'en Español'",
" VISIBLE: 'en Español', cursor=1",
"SPEECH OUTPUT: 'en Español'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"100. Line Up",
["BRAILLE LINE: 'Home RecentChanges FindPage HelpContents Orca'",
" VISIBLE: 'Home RecentChanges FindPage Help', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'RecentChanges'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'FindPage'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'HelpContents'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"101. Line Up",
["BRAILLE LINE: 'live.gnome.org h1 Search $l Titles push button Text push button'",
" VISIBLE: 'live.gnome.org h1 Search $l Tit', cursor=1",
"SPEECH OUTPUT: 'live.gnome.org heading level 1'",
"SPEECH OUTPUT: 'entry Search.'",
"SPEECH OUTPUT: 'Titles push button'",
"SPEECH OUTPUT: 'Text push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"102. Line Up",
["BRAILLE LINE: 'Home News Projects Art Support Development Community'",
" VISIBLE: 'Home News Projects Art Support D', cursor=1",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'News'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Projects'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Art'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Support'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Development'",
"SPEECH OUTPUT: 'link.'",
"SPEECH OUTPUT: 'Community'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/line_nav_wiki_up.py
|
Python
|
lgpl-2.1
| 43,250
|
[
"ORCA"
] |
472398a0f25dc079efa6b64edade0dec1fb19e3efe2f8d9384971e5764a8ac2e
|
from django.http import HttpResponse
import datetime
from api.models import Citation, Violation
from dateutil import parser
from django.db.models import Q
from django.contrib.auth import logout
from custom_decorators import print_errors
@print_errors
def sms_received(request):
#Automatically reset user's sessions if they haven't communicated in 5 minutes
if 'last_validated' in request.session:
session_expiry = (parser.parse(request.session.get('last_validated', '2000-01-01')) + datetime.timedelta(minutes=5))
if session_expiry < datetime.datetime.now():
print "Session expired! Session expiry time", session_expiry, " | current time", datetime.datetime.now()
del request.session['last_validated']
logout(request)
else:
request.session['last_validated'] = datetime.datetime.now().isoformat()
if 'Body' in request.POST and request.POST['Body'].lower() == "restart":
logout(request)
input_from_user = request.POST.get('Body', '')
if 'auth_type' not in request.session:
#New user!
request.session['auth_type'] = "citation_or_license"
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Welcome to the St. Louis Regional Municipal Court System Helpline! Please enter your citation number or driver's license number. If you do not have either, reply with "none".</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
else:
#Existing user, trying to authenticate
print "Existing user, trying to authenticate"
if not request.session.get('authenticated', False):
if request.session['auth_type'] == "citation_or_license":
#Check and see if valid citation number / driver's license number.
print "Check and see if valid citation number / driver's license number."
try:
potential_citation_number = int(input_from_user)
except:
potential_citation_number = -1
citation_in_db = Citation.objects.filter(Q(citation_number=potential_citation_number) | Q(drivers_license_number=input_from_user))
if not citation_in_db.exists() or not input_from_user:
#if not, change auth_type to last_name and send user message to send last name
print "if not, change auth_type to last_name and send user message to send last name"
request.session['auth_type'] = "last_name"
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">What is your last name?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
else:
#if so, user authenticated and move on to next step [authenticated=True]
print "if so, user authenticated and move on to next step [authenticated=True][1]"
request.session['citation_number'] = citation_in_db[0].citation_number
request.session['authenticated'] = True
elif request.session['auth_type'] == "last_name":
#Check and make sure users exist with that last name
print "Check and make sure users exist with that last name"
citation_in_db = Citation.objects.filter(last_name__iexact=input_from_user)
if not citation_in_db.exists():
#if not, throw error to user
print "if not, throw error to user2"
request.session['auth_type'] = "last_name"
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">No citations found with that last name. Thanks for using our app!</Message>
</Response>
'''
logout(request)
return HttpResponse(twil, content_type='application/xml', status=200)
else:
#if so, change auth_type to dob and send user message to send dob
print "if so, change auth_type to dob and send user message to send dob"
request.session['last_name'] = input_from_user
request.session['auth_type'] = "dob"
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">What is your date of birth?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif request.session['auth_type'] == "dob":
#Check and make sure citations exist with that last name and dob
print "Check and make sure citations exist with that last name and dob"
citation_in_db = Citation.objects.filter(last_name__iexact=request.session['last_name']).filter(date_of_birth=parser.parse(input_from_user))
if not citation_in_db.exists():
#if not, throw error to user
print "if not, throw error to user3"
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">No citations found with that last name and date of birth. Thanks for using our app!</Message>
</Response>
'''
logout(request)
return HttpResponse(twil, content_type='application/xml', status=200)
else:
#if so, authenticated=True and move on to next step
request.session['dob'] = input_from_user
del request.session['auth_type']
request.session['citation_number'] = citation_in_db[0].citation_number
request.session['authenticated'] = True
#user authenticated, send citation info!
print "user authenticated, send citation info!"
citation_in_db = Citation.objects.filter(citation_number=int(request.session['citation_number']))
violations_in_db = Violation.objects.filter(citation_number=request.session['citation_number'])
citation_obj = list(citation_in_db.values())[0]
citation_obj['violations'] = list(violations_in_db.values())
total_owed = float(0)
has_warrant = False
for v in violations_in_db:
total_owed += float(v.fine_amount.strip('$').strip()) if v.fine_amount.strip('$').strip() else 0
total_owed += float(v.court_cost.strip('$').strip()) if v.court_cost.strip('$').strip() else 0
if v.warrant_status:
has_warrant = True
citation_obj['total_owed'] = total_owed
citation_obj['has_warrant'] = has_warrant
twil = '''<?xml version="1.0" encoding="UTF-8"?><Response>'''
if input_from_user == "1":
for v in violations_in_db:
twil += '<Message> {violation_info}</Message>'
violation_info = "Your violation is " + str(v.violation_description) + ", with a fine amount of $" + str(v.fine_amount or 0) + " and a court cost of $" + str(v.court_cost or 0)
twil = twil.replace('{violation_info}',violation_info)
elif input_from_user == "2":
twil += '<Message>{citation_info}</Message>'
citation_info = "Your citation number is " + str(citation_obj['citation_number']) + ", and its date is " + str(citation_obj['citation_date']).split(' ')[0]
twil = twil.replace('{citation_info}',citation_info)
elif input_from_user == "3":
twil += "<Message>To pay by phone, call (314) 382-6544. To pay in person, go to 7150 Natural Bridge Rd, St Louis, MO 63121. For community service options, visit YourSTLCourts.com or contact your judge to see if you are eligible.</Message>"
else:
#send general citation info
twil += '''<Message method="GET">{ticket_info}</Message>'''
ticket_info = "You have a court hearing on " + str(citation_in_db[0].court_date).split(" ")[0] + ", at " + str(citation_in_db[0].court_location) + ", located at " + str(citation_in_db[0].court_address) + " . "
if has_warrant:
ticket_info += " You have an outstanding warrant. "
else:
ticket_info += " You do not have an outstanding warrant. "
ticket_info += "You currently have an outstanding balance of $" + str(total_owed) + ". "
twil = twil.replace("{ticket_info}", ticket_info)
twil += "<Message>For a list of violations, send 1. For citation information, send 2. For options on how to pay outstanding fines, send 3. For additional assistance, please call the court clerk at (314) 382-6544</Message>"
twil += "</Response>"
return HttpResponse(twil, content_type='application/xml', status=200)
|
emeth-/the-foot-globalhack5
|
api/views/text.py
|
Python
|
mit
| 9,578
|
[
"VisIt"
] |
ba856582178beee5a2b03c5a3660da18946176ab45591d0178620ac8690565ad
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image preprocessing layers."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine.base_preprocessing_layer import PreprocessingLayer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import control_flow_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.util.tf_export import keras_export
ResizeMethod = image_ops.ResizeMethod
_RESIZE_METHODS = {
'bilinear': ResizeMethod.BILINEAR,
'nearest': ResizeMethod.NEAREST_NEIGHBOR,
'bicubic': ResizeMethod.BICUBIC,
'area': ResizeMethod.AREA,
'lanczos3': ResizeMethod.LANCZOS3,
'lanczos5': ResizeMethod.LANCZOS5,
'gaussian': ResizeMethod.GAUSSIAN,
'mitchellcubic': ResizeMethod.MITCHELLCUBIC
}
H_AXIS = 1
W_AXIS = 2
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {'reflect', 'wrap', 'constant', 'nearest'}:
raise NotImplementedError(
'Unknown `fill_mode` {}. Only `reflect`, `wrap`, '
'`constant` and `nearest` are supported.'.format(fill_mode))
if interpolation not in {'nearest', 'bilinear'}:
raise NotImplementedError('Unknown `interpolation` {}. Only `nearest` and '
'`bilinear` are supported.'.format(interpolation))
@keras_export('keras.layers.experimental.preprocessing.Resizing')
class Resizing(PreprocessingLayer):
"""Image resizing layer.
Resize the batched image input to target height and width. The input should
be a 4-D tensor in the format of NHWC.
Arguments:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
name: A string, the name of the layer.
"""
def __init__(self,
height,
width,
interpolation='bilinear',
name=None,
**kwargs):
self.target_height = height
self.target_width = width
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
super(Resizing, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('Resizing')
def call(self, inputs):
outputs = image_ops.resize_images_v2(
images=inputs,
size=[self.target_height, self.target_width],
method=self._interpolation_method)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.target_height, self.target_width, input_shape[3]])
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
'interpolation': self.interpolation,
}
base_config = super(Resizing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.CenterCrop')
class CenterCrop(PreprocessingLayer):
"""Crop the central portion of the images to target height and width.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, target_height, target_width, channels)`.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
Arguments:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
name: A string, the name of the layer.
"""
def __init__(self, height, width, name=None, **kwargs):
self.target_height = height
self.target_width = width
self.input_spec = InputSpec(ndim=4)
super(CenterCrop, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('CenterCrop')
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = inputs_shape[W_AXIS]
img_hd_diff = img_hd - self.target_height
img_wd_diff = img_wd - self.target_width
checks = []
checks.append(
check_ops.assert_non_negative(
img_hd_diff,
message='The crop height {} should not be greater than input '
'height.'.format(self.target_height)))
checks.append(
check_ops.assert_non_negative(
img_wd_diff,
message='The crop width {} should not be greater than input '
'width.'.format(self.target_width)))
with ops.control_dependencies(checks):
bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)
bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack(
[-1, self.target_height, self.target_width, -1])
outputs = array_ops.slice(inputs, bbox_begin, bbox_size)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.target_height, self.target_width, input_shape[3]])
def get_config(self):
config = {
'height': self.target_height,
'width': self.target_width,
}
base_config = super(CenterCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomCrop')
class RandomCrop(PreprocessingLayer):
"""Randomly crop the images to target height and width.
This layer will crop all the images in the same batch to the same cropping
location.
By default, random cropping is only applied during training. At inference
time, the images will be first rescaled to preserve the shorter side, and
center cropped. If you need to apply random cropping at inference time,
set `training` to True when calling the layer.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, target_height, target_width, channels)`.
Arguments:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
"""
def __init__(self, height, width, seed=None, name=None, **kwargs):
self.height = height
self.width = width
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomCrop, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomCrop')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_cropped_inputs():
"""Cropped inputs with stateless random ops."""
input_shape = array_ops.shape(inputs)
crop_size = array_ops.stack(
[input_shape[0], self.height, self.width, input_shape[3]])
check = control_flow_ops.Assert(
math_ops.reduce_all(input_shape >= crop_size),
[self.height, self.width])
input_shape = control_flow_ops.with_dependencies([check], input_shape)
limit = input_shape - crop_size + 1
offset = stateless_random_ops.stateless_random_uniform(
array_ops.shape(input_shape),
dtype=crop_size.dtype,
maxval=crop_size.dtype.max,
seed=self._rng.make_seeds()[:, 0]) % limit
return array_ops.slice(inputs, offset, crop_size)
# TODO(b/143885775): Share logic with Resize and CenterCrop.
def resize_and_center_cropped_inputs():
"""Deterministically resize to shorter side and center crop."""
input_shape = array_ops.shape(inputs)
input_height_t = input_shape[H_AXIS]
input_width_t = input_shape[W_AXIS]
ratio_cond = (input_height_t / input_width_t > (self.height / self.width))
# pylint: disable=g-long-lambda
resized_height = control_flow_util.smart_cond(
ratio_cond,
lambda: math_ops.cast(self.width * input_height_t / input_width_t,
input_height_t.dtype), lambda: self.height)
resized_width = control_flow_util.smart_cond(
ratio_cond, lambda: self.width,
lambda: math_ops.cast(self.height * input_width_t / input_height_t,
input_width_t.dtype))
# pylint: enable=g-long-lambda
resized_inputs = image_ops.resize_images_v2(
images=inputs, size=array_ops.stack([resized_height, resized_width]))
img_hd_diff = resized_height - self.height
img_wd_diff = resized_width - self.width
bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)
bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([-1, self.height, self.width, -1])
outputs = array_ops.slice(resized_inputs, bbox_begin, bbox_size)
return outputs
output = control_flow_util.smart_cond(training, random_cropped_inputs,
resize_and_center_cropped_inputs)
original_shape = inputs.shape.as_list()
batch_size, num_channels = original_shape[0], original_shape[3]
output_shape = [batch_size] + [self.height, self.width] + [num_channels]
output.set_shape(output_shape)
return output
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], self.height, self.width, input_shape[3]])
def get_config(self):
config = {
'height': self.height,
'width': self.width,
'seed': self.seed,
}
base_config = super(RandomCrop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.Rescaling')
class Rescaling(PreprocessingLayer):
"""Multiply inputs by `scale` and adds `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
Arguments:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
name: A string, the name of the layer.
"""
def __init__(self, scale, offset=0., name=None, **kwargs):
self.scale = scale
self.offset = offset
super(Rescaling, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('Rescaling')
def call(self, inputs):
dtype = self._compute_dtype
scale = math_ops.cast(self.scale, dtype)
offset = math_ops.cast(self.offset, dtype)
return math_ops.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'scale': self.scale,
'offset': self.offset,
}
base_config = super(Rescaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
HORIZONTAL = 'horizontal'
VERTICAL = 'vertical'
HORIZONTAL_AND_VERTICAL = 'horizontal_and_vertical'
@keras_export('keras.layers.experimental.preprocessing.RandomFlip')
class RandomFlip(PreprocessingLayer):
"""Randomly flip each image horizontally and vertically.
This layer will flip the images based on the `mode` attribute.
During inference time, the output will be identical to input. Call the layer
with `training=True` to flip the input.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
mode: String indicating which flip mode to use. Can be "horizontal",
"vertical", or "horizontal_and_vertical". Defaults to
"horizontal_and_vertical". "horizontal" is a left-right flip and
"vertical" is a top-bottom flip.
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
"""
def __init__(self,
mode=HORIZONTAL_AND_VERTICAL,
seed=None,
name=None,
**kwargs):
super(RandomFlip, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomFlip')
self.mode = mode
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError('RandomFlip layer {name} received an unknown mode '
'argument {arg}'.format(name=name, arg=mode))
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_flipped_inputs():
flipped_outputs = inputs
if self.horizontal:
flipped_outputs = image_ops.random_flip_left_right(flipped_outputs,
self.seed)
if self.vertical:
flipped_outputs = image_ops.random_flip_up_down(
flipped_outputs, self.seed)
return flipped_outputs
output = control_flow_util.smart_cond(training, random_flipped_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'mode': self.mode,
'seed': self.seed,
}
base_config = super(RandomFlip, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): Add examples, here and everywhere.
@keras_export('keras.layers.experimental.preprocessing.RandomTranslation')
class RandomTranslation(PreprocessingLayer):
"""Randomly translate each image during training.
Arguments:
height_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for shifting vertically.
A negative value means shifting image up, while a positive value
means shifting image down. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range [-20%, +30%].
`height_factor=0.2` results in an output height shifted by a random
amount in the range [-20%, +20%].
width_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for shifting horizontally.
A negative value means shifting image left, while a positive value
means shifting image right. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and
shifted right by 30%.
`width_factor=0.2` results in an output height shifted left or right
by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond the edge with the
same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode` is "constant".
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Raise:
ValueError: if either bound is not between [0, 1], or upper bound is
less than lower bound.
"""
def __init__(self,
height_factor,
width_factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
name=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError('`height_factor` cannot have upper bound less than '
'lower bound, got {}'.format(height_factor))
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError('`width_factor` cannot have upper bound less than '
'lower bound, got {}'.format(width_factor))
if abs(self.width_lower) > 1. or abs(self.width_upper) > 1.:
raise ValueError('`width_factor` must have values between [-1, 1], '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomTranslation, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomTranslation')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_translated_inputs():
"""Translated inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
h_axis, w_axis = H_AXIS, W_AXIS
img_hd = math_ops.cast(inputs_shape[h_axis], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[w_axis], dtypes.float32)
height_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=dtypes.float32)
height_translate = height_translate * img_hd
width_translate = self._rng.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=dtypes.float32)
width_translate = width_translate * img_wd
translations = math_ops.cast(
array_ops.concat([width_translate, height_translate], axis=1),
dtype=dtypes.float32)
return transform(
inputs,
get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value)
output = control_flow_util.smart_cond(training, random_translated_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomTranslation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing [dx, dy] to translate
for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `transform`.
"""
with K.name_scope(name or 'translation_matrix'):
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images,
transforms,
fill_mode='reflect',
fill_value=0.0,
interpolation='bilinear',
output_shape=None,
name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows,
num_columns) (HW). The rank must be statically known (the shape is not
`TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2,
c0, c1], then it maps the *output* point `(x, y)` to a transformed *input*
point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode` is "constant".
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
output_shape: Output dimesion after the transform, [height, width]. If None,
output is the same size as input image.
name: The name of the op.
## Fill mode.
Behavior for each valid value is as follows:
reflect (d c b a | a b c d | d c b a)
The input is extended by reflecting about the edge of the last pixel.
constant (k k k k | a b c d | k k k k)
The input is extended by filling all values beyond the edge with the same
constant value k = 0.
wrap (a b c d | a b c d | a b c d)
The input is extended by wrapping around to the opposite edge.
nearest (a a a a | a b c d | d d d d)
The input is extended by the nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with K.name_scope(name or 'transform'):
if output_shape is None:
output_shape = array_ops.shape(images)[1:3]
if not context.executing_eagerly():
output_shape_value = tensor_util.constant_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = ops.convert_to_tensor_v2_with_dispatch(
output_shape, dtypes.int32, name='output_shape')
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError('output_shape must be a 1-D Tensor of 2 elements: '
'new_height, new_width, instead got '
'{}'.format(output_shape))
fill_value = ops.convert_to_tensor_v2_with_dispatch(
fill_value, dtypes.float32, name='fill_value')
if compat.forward_compatible(2020, 8, 5):
return gen_image_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper())
return gen_image_ops.ImageProjectiveTransformV2(
images=images,
output_shape=output_shape,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper())
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images) a
vector with an angle to rotate each image in the batch. The rank must be
statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with K.name_scope(name or 'rotation_matrix'):
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
@keras_export('keras.layers.experimental.preprocessing.RandomRotation')
class RandomRotation(PreprocessingLayer):
"""Randomly rotate each image.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
factor: a float represented as fraction of 2pi, or a tuple of size
2 representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output
rotation by a random amount in the range `[-20% * 2pi, 30% * 2pi]`.
`factor=0.2` results in an output rotating by a random amount in the range
`[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond the edge with the
same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode` is "constant".
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
data_format='channels_last'.
Raise:
ValueError: if either bound is not between [0, 1], or upper bound is
less than lower bound.
"""
def __init__(self,
factor,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
name=None,
fill_value=0.0,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError('Factor cannot have negative values, '
'got {}'.format(factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomRotation, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomRotation')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_rotated_inputs():
"""Rotated inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
min_angle = self.lower * 2. * np.pi
max_angle = self.upper * 2. * np.pi
angles = self._rng.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle)
return transform(
inputs,
get_rotation_matrix(angles, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_rotated_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomRotation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomZoom')
class RandomZoom(PreprocessingLayer):
"""Randomly zoom each image during training.
Arguments:
height_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for zooming vertically.
When represented as a single float, this value is used for both the
upper and lower bound. A positive value means zooming out, while a
negative value means zooming in.
For instance, `height_factor=(0.2, 0.3)` result in an output zoomed out
by a random amount in the range [+20%, +30%].
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range [+20%, +30%].
width_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for zooming horizontally.
When represented as a single float, this value is used for both the
upper and lower bound.
For instance, `width_factor=(0.2, 0.3)` result in an output zooming out
between 20% to 30%.
`width_factor=(-0.3, -0.2)` result in an output zooming in between 20%
to 30%. Defaults to `None`, i.e., zooming vertical and horizontal
directions by preserving the aspect ratio.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond the edge with the
same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: "nearest", "bilinear".
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode` is "constant".
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = tf.keras.layers.experimental.preprocessing.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Raise:
ValueError: if lower bound is not between [0, 1], or upper bound is
negative.
"""
def __init__(self,
height_factor,
width_factor=None,
fill_mode='reflect',
interpolation='bilinear',
seed=None,
name=None,
fill_value=0.0,
**kwargs):
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1. or abs(self.height_upper) > 1.:
raise ValueError('`height_factor` must have values between [-1, 1], '
'got {}'.format(height_factor))
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor # pylint: disable=invalid-unary-operand-type
self.width_upper = width_factor
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`width_factor` must have values larger than -1, '
'got {}'.format(width_factor))
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self._rng = make_generator(self.seed)
self.input_spec = InputSpec(ndim=4)
super(RandomZoom, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomZoom')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_zoomed_inputs():
"""Zoomed inputs with random ops."""
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
height_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.height_lower,
maxval=1. + self.height_upper)
if self.width_factor is not None:
width_zoom = self._rng.uniform(
shape=[batch_size, 1],
minval=1. + self.width_lower,
maxval=1. + self.width_upper)
else:
width_zoom = height_zoom
zooms = math_ops.cast(
array_ops.concat([width_zoom, height_zoom], axis=1),
dtype=dtypes.float32)
return transform(
inputs,
get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation)
output = control_flow_util.smart_cond(training, random_zoomed_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'height_factor': self.height_factor,
'width_factor': self.width_factor,
'fill_mode': self.fill_mode,
'fill_value': self.fill_value,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomZoom, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing [zx, zy] to zoom
for each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to operation `image_projective_transform_v2`. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with K.name_scope(name or 'zoom_matrix'):
num_zooms = array_ops.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.) / 2.0) * (1.0 - zooms[:, 1, None])
return array_ops.concat(
values=[
zooms[:, 0, None],
array_ops.zeros((num_zooms, 1), dtypes.float32),
x_offset,
array_ops.zeros((num_zooms, 1), dtypes.float32),
zooms[:, 1, None],
y_offset,
array_ops.zeros((num_zooms, 2), dtypes.float32),
],
axis=1)
@keras_export('keras.layers.experimental.preprocessing.RandomContrast')
class RandomContrast(PreprocessingLayer):
"""Adjust the contrast of an image or images by a random factor.
Contrast is adjusted independently for each channel of each image during
training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, height, width, channels)`, data_format='channels_last'.
Attributes:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a single
float, lower = upper. The contrast factor will be randomly picked between
[1.0 - lower, 1.0 + upper].
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
Raise:
ValueError: if lower bound is not between [0, 1], or upper bound is
negative.
"""
def __init__(self, factor, seed=None, name=None, **kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = self.upper = factor
if self.lower < 0. or self.upper < 0. or self.lower > 1.:
raise ValueError('Factor cannot have negative values or greater than 1.0,'
' got {}'.format(factor))
self.seed = seed
self.input_spec = InputSpec(ndim=4)
super(RandomContrast, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomContrast')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_contrasted_inputs():
return image_ops.random_contrast(inputs, 1. - self.lower, 1. + self.upper,
self.seed)
output = control_flow_util.smart_cond(training, random_contrasted_inputs,
lambda: inputs)
output.set_shape(inputs.shape)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'factor': self.factor,
'seed': self.seed,
}
base_config = super(RandomContrast, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomHeight')
class RandomHeight(PreprocessingLayer):
"""Randomly vary the height of a batch of images during training.
Adjusts the height of a batch of images by a random factor. The input
should be a 4-D tensor in the "channels_last" image data format.
By default, this layer is inactive during inference.
Arguments:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
height changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with height changed by a random
amount in the range `[-20%, +30%]. `factor=0.2` results in an output with
height changed by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`
(data_format='channels_last').
Output shape:
4D tensor with shape: `(samples, random_height, width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
name=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.height_lower < -1. or self.height_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomHeight, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomHeight')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_height_inputs():
"""Inputs height-adjusted with random ops."""
inputs_shape = array_ops.shape(inputs)
img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
img_wd = inputs_shape[W_AXIS]
height_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper))
adjusted_height = math_ops.cast(height_factor * img_hd, dtypes.int32)
adjusted_size = array_ops.stack([adjusted_height, img_wd])
output = image_ops.resize_images_v2(
images=inputs, size=adjusted_size, method=self._interpolation_method)
original_shape = inputs.shape.as_list()
output_shape = [original_shape[0]] + [None] + original_shape[2:4]
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_height_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], None, input_shape[2], input_shape[3]])
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomHeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.experimental.preprocessing.RandomWidth')
class RandomWidth(PreprocessingLayer):
"""Randomly vary the width of a batch of images during training.
Adjusts the width of a batch of images by a random factor. The input
should be a 4-D tensor in the "channels_last" image data format.
By default, this layer is inactive during inference.
Arguments:
factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results in an output with
width changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with width changed by a random
amount in the range `[-20%, +30%]. `factor=0.2` results in an output with
width changed by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method. Defaults to `bilinear`.
Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, `mitchellcubic`
seed: Integer. Used to create a random seed.
name: A string, the name of the layer.
Input shape:
4D tensor with shape:
`(samples, height, width, channels)` (data_format='channels_last').
Output shape:
4D tensor with shape:
`(samples, height, random_width, channels)`.
"""
def __init__(self,
factor,
interpolation='bilinear',
seed=None,
name=None,
**kwargs):
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError('`factor` cannot have upper bound less than '
'lower bound, got {}'.format(factor))
if self.width_lower < -1. or self.width_upper < -1.:
raise ValueError('`factor` must have values larger than -1, '
'got {}'.format(factor))
self.interpolation = interpolation
self._interpolation_method = get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
self.seed = seed
self._rng = make_generator(self.seed)
super(RandomWidth, self).__init__(name=name, **kwargs)
base_preprocessing_layer._kpl_gauge.get_cell('V2').set('RandomWidth')
def call(self, inputs, training=True):
if training is None:
training = K.learning_phase()
def random_width_inputs():
"""Inputs width-adjusted with random ops."""
inputs_shape = array_ops.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
width_factor = self._rng.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper))
adjusted_width = math_ops.cast(width_factor * img_wd, dtypes.int32)
adjusted_size = array_ops.stack([img_hd, adjusted_width])
output = image_ops.resize_images_v2(
images=inputs, size=adjusted_size, method=self._interpolation_method)
original_shape = inputs.shape.as_list()
output_shape = original_shape[0:2] + [None] + [original_shape[3]]
output.set_shape(output_shape)
return output
return control_flow_util.smart_cond(training, random_width_inputs,
lambda: inputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], None, input_shape[3]])
def get_config(self):
config = {
'factor': self.factor,
'interpolation': self.interpolation,
'seed': self.seed,
}
base_config = super(RandomWidth, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(b/147877541, b/158339556): This class is added to temporarily enable
# creating generators within distribution strategies. Remove it when the proper
# API is in place.
class _RandomGenerator(stateful_random_ops.Generator):
"""A subclass that allows creation inside distribution strategies.
This is a temporary solution to allow creating tf.random.Generator inside
distribution strategies. It will be removed when proper API is in place.
All replicas will have the same RNG state and generate the same random
numbers.
"""
# TODO(b/157995497): Temporarily use primary variable handle inside cross
# replica context.
@property
def state(self):
"""The internal state of the RNG."""
state_var = self._state_var
try:
_ = getattr(state_var, 'handle')
return state_var
except ValueError:
return state_var.values[0]
def _create_variable(self, *args, **kwargs):
# This function does the same thing as the base class's namesake, except
# that it skips the distribution-strategy check. When we are inside a
# distribution-strategy scope, variables.Variable will pick a proper
# variable class (e.g. MirroredVariable).
return variables.Variable(*args, **kwargs)
def make_generator(seed=None):
if seed:
return _RandomGenerator.from_seed(seed)
else:
return _RandomGenerator.from_non_deterministic_state()
def get_interpolation(interpolation):
interpolation = interpolation.lower()
if interpolation not in _RESIZE_METHODS:
raise NotImplementedError(
'Value not recognized for `interpolation`: {}. Supported values '
'are: {}'.format(interpolation, _RESIZE_METHODS.keys()))
return _RESIZE_METHODS[interpolation]
|
karllessard/tensorflow
|
tensorflow/python/keras/layers/preprocessing/image_preprocessing.py
|
Python
|
apache-2.0
| 54,907
|
[
"Gaussian"
] |
e572344b24224c337f46896795b8f6c70f4c259b47683fb753d698253db32953
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import multiselectfield.db.fields
import re
class Migration(migrations.Migration):
dependencies = [
('main', '0033_auto_20141204_1041'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='comments',
field=models.CharField(blank=True, verbose_name='Additional comments', max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='email',
field=models.EmailField(blank=True, verbose_name='Email address', max_length=75),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='first_name',
field=models.CharField(verbose_name='First name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='languages',
field=multiselectfield.db.fields.MultiSelectField(blank=True, verbose_name='Spoken languages', max_length=8, choices=[('fr', 'French'), ('en', 'English'), ('nl', 'Dutch')]),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='last_name',
field=models.CharField(verbose_name='Name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='location',
field=models.CharField(blank=True, null=True, verbose_name='Address', max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='mobile_number',
field=models.CharField(blank=True, verbose_name='Phone number (mobile)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='phone_number',
field=models.CharField(blank=True, verbose_name='Phone number (home)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='contact',
name='relationship',
field=models.CharField(blank=True, verbose_name='Your relationship with that person', max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='first_name',
field=models.CharField(verbose_name='First name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='languages',
field=multiselectfield.db.fields.MultiSelectField(blank=True, verbose_name='Spoken languages', max_length=8, choices=[('fr', 'French'), ('en', 'English'), ('nl', 'Dutch')]),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='last_name',
field=models.CharField(verbose_name='Name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='location',
field=models.CharField(blank=True, null=True, verbose_name='Address', max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='mobile_number',
field=models.CharField(blank=True, verbose_name='Phone number (mobile)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='order',
field=models.IntegerField(default=0, verbose_name='Priority', choices=[(1, 'First contact'), (2, 'Contact'), (3, 'Last contact')]),
preserve_default=True,
),
migrations.AlterField(
model_name='emergencycontact',
name='phone_number',
field=models.CharField(blank=True, verbose_name='Phone number (home)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Birthday'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='credit',
field=models.IntegerField(default=0, verbose_name='Remaining credit'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(verbose_name='Email address', max_length=75),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(verbose_name='First name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='how_found',
field=multiselectfield.db.fields.MultiSelectField(verbose_name='How did you hear about care4care ?', max_length=41, choices=[('internet', 'The Internet'), ('show', 'A presentation, brochure, flyer,... '), ('branch', 'The local branch'), ('member', 'Another member'), ('friends', 'Friends or family'), ('other', 'Other ...')]),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='languages',
field=multiselectfield.db.fields.MultiSelectField(blank=True, verbose_name='Spoken languages', max_length=8, choices=[('fr', 'French'), ('en', 'English'), ('nl', 'Dutch')]),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(verbose_name='Name', max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='location',
field=models.CharField(blank=True, null=True, verbose_name='Address', max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='mobile_number',
field=models.CharField(blank=True, verbose_name='Phone number (mobile)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='phone_number',
field=models.CharField(blank=True, verbose_name='Phone number (home)', validators=[django.core.validators.RegexValidator(message="Your phone number must be in format '+99999999'. Up to 15 digits.", regex='^\\+?1?\\d{9,15}$')], max_length=16),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='status',
field=models.IntegerField(default=1, choices=[(1, 'Active'), (2, 'On vacation'), (3, 'Disabled')]),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='user_type',
field=models.IntegerField(default=1, help_text='A member can help or be helped while a non-member is a professional who registers to access patient data. Please choose the one that suits you', verbose_name='Account type', choices=[(1, 'Member'), (2, 'Non-member'), (3, 'Verified member')]),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(verbose_name='Username', validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$', 32), 'Enter a valid username. No more than 30 characters. There may be numbers andcharacters @/./+/-/_', 'invalid')], unique=True, max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='verifiedinformation',
name='criminal_record',
field=models.FileField(null=True, verbose_name='Criminal record', upload_to='documents/'),
preserve_default=True,
),
migrations.AlterField(
model_name='verifiedinformation',
name='recomendation_letter_1',
field=models.FileField(null=True, verbose_name='Letter of recommendation n°1', upload_to='documents/'),
preserve_default=True,
),
migrations.AlterField(
model_name='verifiedinformation',
name='recomendation_letter_2',
field=models.FileField(null=True, verbose_name='Letter de recommendation n°2', upload_to='documents/'),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='additional_info',
field=models.TextField(blank=True, verbose_name='Additional information', max_length=300),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='can_wheelchair',
field=models.BooleanField(default=False, verbose_name='Can you carry a wheelchair in your car?', choices=[(True, 'Yes'), (False, 'No')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='drive_license',
field=multiselectfield.db.fields.MultiSelectField(blank=True, verbose_name='Type of driving license', max_length=11, choices=[(1, 'Moped'), (2, 'Motorcycle'), (3, 'Car'), (4, 'Truck'), (5, 'Bus'), (6, 'Tractor')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='have_car',
field=models.BooleanField(default=False, verbose_name='Do you have a car?', choices=[(True, 'Yes'), (False, 'No')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='hobbies',
field=models.TextField(blank=True, verbose_name='Your hobby', max_length=200),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='mail_preferences',
field=models.IntegerField(default=1, verbose_name='Receive my messages', choices=[(1, 'Message box'), (2, 'Mail')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='offered_job',
field=multiselectfield.db.fields.MultiSelectField(blank=True, verbose_name='What jobs you want to do?', max_length=21, choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other ...')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verifieduser',
name='receive_help_from_who',
field=models.IntegerField(default=5, verbose_name='Receive offers and demands', choices=[(5, 'All'), (3, 'Verified member'), (6, 'My favorite members')]),
preserve_default=True,
),
]
|
MaximeBiset/care4care
|
main/migrations/0034_auto_20141204_1122.py
|
Python
|
agpl-3.0
| 12,435
|
[
"VisIt"
] |
9bdd869efd10c0e320e2462adbcb55752b17dd66d6d668216196f6b23bfbee50
|
#
# Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
# Aureliana Barghini - B-Open - https://bopen.eu
#
import datetime
import json
import logging
import os
import typing as T
import attr
import numpy as np
from . import __version__, abc, cfmessage, messages
LOG = logging.getLogger(__name__)
#
# Edition-independent keys in ecCodes namespaces. Documented in:
# https://software.ecmwf.int/wiki/display/ECC/GRIB%3A+Namespaces
#
GLOBAL_ATTRIBUTES_KEYS = ["edition", "centre", "centreDescription", "subCentre"]
DATA_ATTRIBUTES_KEYS = [
"paramId",
"dataType",
"numberOfPoints",
"typeOfLevel",
"stepUnits",
"stepType",
"gridType",
]
EXTRA_DATA_ATTRIBUTES_KEYS = [
"shortName",
"units",
"name",
"cfName",
"cfVarName",
"missingValue",
"totalNumber",
"numberOfDirections",
"numberOfFrequencies",
"NV",
"gridDefinitionDescription",
]
GRID_TYPE_MAP = {
"regular_ll": [
"Nx",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"Ny",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
],
"rotated_ll": [
"Nx",
"Ny",
"angleOfRotationInDegrees",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
],
"reduced_ll": [
"Ny",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
],
"regular_gg": [
"Nx",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"N",
"Ny",
],
"rotated_gg": [
"Nx",
"Ny",
"angleOfRotationInDegrees",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
"N",
],
"lambert": [
"LaDInDegrees",
"LoVInDegrees",
"iScansNegatively",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
"DyInMetres",
"DxInMetres",
"Latin2InDegrees",
"Latin1InDegrees",
"Ny",
"Nx",
],
"reduced_gg": ["N", "pl"],
"sh": ["M", "K", "J"],
}
GRID_TYPE_KEYS = sorted(set(k for _, ks in GRID_TYPE_MAP.items() for k in ks))
ENSEMBLE_KEYS = ["number"]
VERTICAL_KEYS = ["level:float"]
DATA_TIME_KEYS = ["dataDate", "dataTime", "endStep"]
ALL_REF_TIME_KEYS = [
"time",
"step",
"valid_time",
"verifying_time",
"forecastMonth",
"indexing_time",
]
SPECTRA_KEYS = ["directionNumber", "frequencyNumber"]
ALL_HEADER_DIMS = ENSEMBLE_KEYS + VERTICAL_KEYS + SPECTRA_KEYS
INDEX_KEYS = sorted(
GLOBAL_ATTRIBUTES_KEYS + DATA_ATTRIBUTES_KEYS + DATA_TIME_KEYS + ALL_HEADER_DIMS
)
COORD_ATTRS = {
# geography
"latitude": {"units": "degrees_north", "standard_name": "latitude", "long_name": "latitude"},
"longitude": {"units": "degrees_east", "standard_name": "longitude", "long_name": "longitude"},
# vertical
"depthBelowLand": {
"units": "m",
"positive": "down",
"long_name": "soil depth",
"standard_name": "depth",
},
"depthBelowLandLayer": {
"units": "m",
"positive": "down",
"long_name": "soil depth",
"standard_name": "depth",
},
"hybrid": {
"units": "1",
"positive": "down",
"long_name": "hybrid level",
"standard_name": "atmosphere_hybrid_sigma_pressure_coordinate",
},
"heightAboveGround": {
"units": "m",
"positive": "up",
"long_name": "height above the surface",
"standard_name": "height",
},
"isobaricInhPa": {
"units": "hPa",
"positive": "down",
"stored_direction": "decreasing",
"standard_name": "air_pressure",
"long_name": "pressure",
},
"isobaricInPa": {
"units": "Pa",
"positive": "down",
"stored_direction": "decreasing",
"standard_name": "air_pressure",
"long_name": "pressure",
},
"isobaricLayer": {
"units": "Pa",
"positive": "down",
"standard_name": "air_pressure",
"long_name": "pressure",
},
# ensemble
"number": {
"units": "1",
"standard_name": "realization",
"long_name": "ensemble member numerical id",
},
# time
"step": {
"units": "hours",
"standard_name": "forecast_period",
"long_name": "time since forecast_reference_time",
},
"time": {
"units": "seconds since 1970-01-01T00:00:00",
"calendar": "proleptic_gregorian",
"standard_name": "forecast_reference_time",
"long_name": "initial time of forecast",
},
"indexing_time": {
"units": "seconds since 1970-01-01T00:00:00",
"calendar": "proleptic_gregorian",
"standard_name": "forecast_reference_time",
"long_name": "nominal initial time of forecast",
},
"valid_time": {
"units": "seconds since 1970-01-01T00:00:00",
"calendar": "proleptic_gregorian",
"standard_name": "time",
"long_name": "time",
},
"verifying_time": {
"units": "seconds since 1970-01-01T00:00:00",
"calendar": "proleptic_gregorian",
"standard_name": "time",
"long_name": "time",
},
"forecastMonth": {"units": "1", "long_name": "months since forecast_reference_time"},
}
class DatasetBuildError(ValueError):
def __str__(self) -> str:
return str(self.args[0])
def enforce_unique_attributes(index, attributes_keys, filter_by_keys={}):
# type: (T.Mapping[str, T.List[T.Any]], T.Sequence[str], T.Dict[str, T.Any]) -> T.Dict[str, T.Any]
attributes = {} # type: T.Dict[str, T.Any]
for key in attributes_keys:
values = index.get(key, [])
if len(values) > 1:
fbks = []
for value in values:
fbk = {key: value}
fbk.update(filter_by_keys)
fbks.append(fbk)
raise DatasetBuildError("multiple values for key %r" % key, key, fbks)
if values and values[0] not in ("undef", "unknown"):
attributes["GRIB_" + key] = values[0]
return attributes
@attr.attrs(auto_attribs=True, eq=False)
class Variable:
dimensions: T.Tuple[str, ...]
data: np.ndarray
attributes: T.Dict[str, T.Any] = attr.attrib(default={}, repr=False)
def __eq__(self, other):
# type: (T.Any) -> bool
if other.__class__ is not self.__class__:
return NotImplemented
equal = (self.dimensions, self.attributes) == (other.dimensions, other.attributes)
return equal and np.array_equal(self.data, other.data)
def expand_item(item, shape):
# type: (T.Tuple[T.Any, ...], T.Sequence[int]) -> T.Tuple[T.List[int], ...]
expanded_item = []
for i, size in zip(item, shape):
if isinstance(i, (list, np.ndarray)):
expanded_item.append([int(e) for e in i])
elif isinstance(i, slice):
expanded_item.append(list(range(i.start or 0, i.stop or size, i.step or 1)))
elif isinstance(i, int):
expanded_item.append([i])
else:
raise TypeError("Unsupported index type %r" % type(i))
return tuple(expanded_item)
@attr.attrs(auto_attribs=True)
class OnDiskArray:
index: abc.Index[T.Any, abc.Field]
shape: T.Tuple[int, ...]
field_id_index: T.Dict[
T.Tuple[T.Any, ...], T.List[T.Union[int, T.Tuple[int, int]]]
] = attr.attrib(repr=False)
missing_value: float
geo_ndim: int = attr.attrib(default=1, repr=False)
dtype = np.dtype("float32")
def build_array(self) -> np.ndarray:
"""Helper method used to test __getitem__"""
array = np.full(self.shape, fill_value=np.nan, dtype="float32")
for header_indexes, message_ids in self.field_id_index.items():
# NOTE: fill a single field as found in the message
message = self.index.get_field(message_ids[0]) # type: ignore
values = message["values"]
array.__getitem__(header_indexes).flat[:] = values
array[array == self.missing_value] = np.nan
return array
def __getitem__(self, item):
# type: (T.Tuple[T.Any, ...]) -> np.ndarray
header_item_list = expand_item(item[: -self.geo_ndim], self.shape)
header_item = [{ix: i for i, ix in enumerate(it)} for it in header_item_list]
array_field_shape = tuple(len(i) for i in header_item_list) + self.shape[-self.geo_ndim :]
array_field = np.full(array_field_shape, fill_value=np.nan, dtype="float32")
for header_indexes, message_ids in self.field_id_index.items():
try:
array_field_indexes = [it[ix] for it, ix in zip(header_item, header_indexes)]
except KeyError:
continue
# NOTE: fill a single field as found in the message
message = self.index.get_field(message_ids[0]) # type: ignore
values = message["values"]
array_field.__getitem__(tuple(array_field_indexes)).flat[:] = values
array = np.asarray(array_field[(Ellipsis,) + item[-self.geo_ndim :]])
array[array == self.missing_value] = np.nan
for i, it in reversed(list(enumerate(item[: -self.geo_ndim]))):
if isinstance(it, int):
array = array[(slice(None, None, None),) * i + (0,)]
return array
GRID_TYPES_DIMENSION_COORDS = {"regular_ll", "regular_gg"}
GRID_TYPES_2D_NON_DIMENSION_COORDS = {
"rotated_ll",
"rotated_gg",
"lambert",
"lambert_azimuthal_equal_area",
"albers",
"polar_stereographic",
}
def build_geography_coordinates(
first: abc.Field, encode_cf: T.Sequence[str], errors: str, log: logging.Logger = LOG,
) -> T.Tuple[T.Tuple[str, ...], T.Tuple[int, ...], T.Dict[str, Variable]]:
geo_coord_vars = {} # type: T.Dict[str, Variable]
grid_type = first["gridType"]
if "geography" in encode_cf and grid_type in GRID_TYPES_DIMENSION_COORDS:
geo_dims = ("latitude", "longitude") # type: T.Tuple[str, ...]
geo_shape = (first["Ny"], first["Nx"]) # type: T.Tuple[int, ...]
latitudes = np.array(first["distinctLatitudes"], ndmin=1)
geo_coord_vars["latitude"] = Variable(
dimensions=("latitude",), data=latitudes, attributes=COORD_ATTRS["latitude"].copy()
)
if latitudes[0] > latitudes[-1]:
geo_coord_vars["latitude"].attributes["stored_direction"] = "decreasing"
geo_coord_vars["longitude"] = Variable(
dimensions=("longitude",),
data=np.array(first["distinctLongitudes"], ndmin=1),
attributes=COORD_ATTRS["longitude"],
)
elif "geography" in encode_cf and grid_type in GRID_TYPES_2D_NON_DIMENSION_COORDS:
geo_dims = ("y", "x")
geo_shape = (first["Ny"], first["Nx"])
try:
geo_coord_vars["latitude"] = Variable(
dimensions=("y", "x"),
data=np.array(first["latitudes"]).reshape(geo_shape),
attributes=COORD_ATTRS["latitude"],
)
geo_coord_vars["longitude"] = Variable(
dimensions=("y", "x"),
data=np.array(first["longitudes"]).reshape(geo_shape),
attributes=COORD_ATTRS["longitude"],
)
except KeyError: # pragma: no cover
if errors != "ignore":
log.warning("ecCodes provides no latitudes/longitudes for gridType=%r", grid_type)
else:
geo_dims = ("values",)
geo_shape = (first["numberOfPoints"],)
# add secondary coordinates if ecCodes provides them
try:
latitude = first["latitudes"]
geo_coord_vars["latitude"] = Variable(
dimensions=("values",), data=np.array(latitude), attributes=COORD_ATTRS["latitude"]
)
longitude = first["longitudes"]
geo_coord_vars["longitude"] = Variable(
dimensions=("values",),
data=np.array(longitude),
attributes=COORD_ATTRS["longitude"],
)
except KeyError: # pragma: no cover
if errors != "ignore":
log.warning("ecCodes provides no latitudes/longitudes for gridType=%r", grid_type)
return geo_dims, geo_shape, geo_coord_vars
def encode_cf_first(data_var_attrs, encode_cf=("parameter", "time"), time_dims=("time", "step")):
# type: (T.MutableMapping[str, T.Any], T.Sequence[str], T.Sequence[str]) -> T.List[str]
coords_map = ENSEMBLE_KEYS[:]
param_id = data_var_attrs.get("GRIB_paramId", "undef")
data_var_attrs["long_name"] = "original GRIB paramId: %s" % param_id
data_var_attrs["units"] = "1"
if "parameter" in encode_cf:
if "GRIB_cfName" in data_var_attrs:
data_var_attrs["standard_name"] = data_var_attrs["GRIB_cfName"]
if "GRIB_name" in data_var_attrs:
data_var_attrs["long_name"] = data_var_attrs["GRIB_name"]
if "GRIB_units" in data_var_attrs:
data_var_attrs["units"] = data_var_attrs["GRIB_units"]
if "time" in encode_cf:
if set(time_dims).issubset(ALL_REF_TIME_KEYS):
coords_map.extend(time_dims)
else:
raise ValueError("time_dims %r not a subset of %r" % (time_dims, ALL_REF_TIME_KEYS))
else:
coords_map.extend(DATA_TIME_KEYS)
coords_map.extend(VERTICAL_KEYS)
coords_map.extend(SPECTRA_KEYS)
return coords_map
def read_data_var_attrs(first: abc.Field, extra_keys: T.List[str]) -> T.Dict[str, T.Any]:
attributes = {}
for key in extra_keys:
try:
value = first[key]
if value is not None:
attributes["GRIB_" + key] = value
except Exception:
pass
return attributes
def build_variable_components(
index: abc.Index[T.Any, abc.Field],
encode_cf: T.Sequence[str] = (),
filter_by_keys: T.Dict[str, T.Any] = {},
log: logging.Logger = LOG,
errors: str = "warn",
squeeze: bool = True,
read_keys: T.Iterable[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
) -> T.Tuple[T.Dict[str, int], Variable, T.Dict[str, Variable]]:
data_var_attrs = enforce_unique_attributes(index, DATA_ATTRIBUTES_KEYS, filter_by_keys)
grid_type_keys = GRID_TYPE_MAP.get(index.getone("gridType"), [])
extra_keys = sorted(list(read_keys) + EXTRA_DATA_ATTRIBUTES_KEYS + grid_type_keys)
first = index.first()
extra_attrs = read_data_var_attrs(first, extra_keys)
data_var_attrs.update(**extra_attrs)
coords_map = encode_cf_first(data_var_attrs, encode_cf, time_dims)
coord_name_key_map = {}
coord_vars = {}
for coord_key in coords_map:
values = index[coord_key]
if len(values) == 1 and values[0] == "undef":
log.debug("missing from GRIB stream: %r" % coord_key)
continue
orig_name = coord_key.partition(":")[0]
coord_name = orig_name
if (
"vertical" in encode_cf
and coord_name == "level"
and "GRIB_typeOfLevel" in data_var_attrs
):
coord_name = data_var_attrs["GRIB_typeOfLevel"]
coord_name_key_map[coord_name] = coord_key
attributes = {
"long_name": "original GRIB coordinate for key: %s(%s)" % (orig_name, coord_name),
"units": "1",
}
attributes.update(COORD_ATTRS.get(coord_name, {}).copy())
data = np.array(sorted(values, reverse=attributes.get("stored_direction") == "decreasing"))
dimensions = (coord_name,) # type: T.Tuple[str, ...]
if squeeze and len(values) == 1:
data = data[0]
dimensions = ()
coord_vars[coord_name] = Variable(dimensions=dimensions, data=data, attributes=attributes)
header_dimensions = tuple(d for d, c in coord_vars.items() if not squeeze or c.data.size > 1)
header_shape = tuple(coord_vars[d].data.size for d in header_dimensions)
geo_dims, geo_shape, geo_coord_vars = build_geography_coordinates(first, encode_cf, errors)
dimensions = header_dimensions + geo_dims
shape = header_shape + geo_shape
coord_vars.update(geo_coord_vars)
offsets = {} # type: T.Dict[T.Tuple[int, ...], T.List[T.Union[int, T.Tuple[int, int]]]]
header_value_index = {}
extra_coords_data: T.Dict[str, T.Dict[str, T.Any]] = {
coord_name: {} for coord_name in extra_coords
}
extra_dims = tuple(extra_coords.values())
for dim in header_dimensions + extra_dims:
if np.isscalar(coord_vars[dim].data):
header_value_index[dim] = {coord_vars[dim].data.item(): 0}
else:
header_value_index[dim] = {v: i for i, v in enumerate(coord_vars[dim].data.tolist())}
for header_values, message_ids in index.iter_index():
header_indexes = [] # type: T.List[int]
for dim in header_dimensions + extra_dims:
header_value = header_values[index.index_keys.index(coord_name_key_map.get(dim, dim))]
if dim in header_dimensions:
header_indexes.append(header_value_index[dim][header_value])
for coord_name in extra_coords:
coord_value = header_values[
index.index_keys.index(coord_name_key_map.get(coord_name, coord_name))
]
if dim == extra_coords[coord_name]:
saved_coord_value = extra_coords_data[coord_name].get(
header_value, coord_value
)
if saved_coord_value != coord_value:
raise ValueError(
f"'{coord_name}' cannot be indexed by dimension '{extra_coords[coord_name]}': \n"
f"found two '{coord_name}' distinct values ({saved_coord_value}, {coord_value}) "
f"for '{extra_coords[coord_name]}' value {header_value}."
)
extra_coords_data[coord_name][header_value] = coord_value
offsets[tuple(header_indexes)] = message_ids
missing_value = data_var_attrs.get("missingValue", 9999)
on_disk_array = OnDiskArray(
index=index,
shape=shape,
field_id_index=offsets,
missing_value=missing_value,
geo_ndim=len(geo_dims),
)
if "time" in coord_vars and "step" in coord_vars:
# add the 'valid_time' secondary coordinate
time_dims, time_data = cfmessage.build_valid_time(
coord_vars["time"].data, coord_vars["step"].data,
)
attrs = COORD_ATTRS["valid_time"]
coord_vars["valid_time"] = Variable(dimensions=time_dims, data=time_data, attributes=attrs)
for coord_name in extra_coords:
coord_data = np.array(list(extra_coords_data[coord_name].values()))
if extra_coords[coord_name] not in header_dimensions:
coord_dims: T.Tuple[str, ...] = ()
coord_data = coord_data.reshape(())
else:
coord_dims = (extra_coords[coord_name],)
attrs = COORD_ATTRS.get(coord_name, {}).copy()
coord_vars[coord_name] = Variable(dimensions=coord_dims, data=coord_data, attributes=attrs)
data_var_attrs["coordinates"] = " ".join(coord_vars.keys())
# OnDiskArray is close enough to np.ndarray to work, but not to make mypy happy
data_var = Variable(dimensions=dimensions, data=on_disk_array, attributes=data_var_attrs) # type: ignore
dims = {d: s for d, s in zip(dimensions, data_var.data.shape)}
return dims, data_var, coord_vars
def dict_merge(master, update):
# type: (T.Dict[str, T.Any], T.Dict[str, T.Any]) -> None
for key, value in update.items():
if key not in master:
master[key] = value
elif master[key] == value:
pass
else:
raise DatasetBuildError(
"key present and new value is different: "
"key=%r value=%r new_value=%r" % (key, master[key], value)
)
def build_dataset_attributes(index, filter_by_keys, encoding):
# type: (abc.Index[T.Any, abc.Field], T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Dict[str, T.Any]
attributes = enforce_unique_attributes(index, GLOBAL_ATTRIBUTES_KEYS, filter_by_keys)
attributes["Conventions"] = "CF-1.7"
if "GRIB_centreDescription" in attributes:
attributes["institution"] = attributes["GRIB_centreDescription"]
attributes_namespace = {
"cfgrib_version": __version__,
"cfgrib_open_kwargs": json.dumps(encoding),
"eccodes_version": messages.eccodes_version,
"timestamp": datetime.datetime.now().isoformat().partition(".")[0][:16],
}
history_in = (
"{timestamp} GRIB to CDM+CF via "
"cfgrib-{cfgrib_version}/ecCodes-{eccodes_version} with {cfgrib_open_kwargs}"
)
attributes["history"] = history_in.format(**attributes_namespace)
return attributes
def build_dataset_components(
index: abc.Index[T.Any, abc.Field],
errors: str = "warn",
encode_cf: T.Sequence[str] = ("parameter", "time", "geography", "vertical"),
squeeze: bool = True,
log: logging.Logger = LOG,
read_keys: T.Iterable[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
) -> T.Tuple[T.Dict[str, int], T.Dict[str, Variable], T.Dict[str, T.Any], T.Dict[str, T.Any]]:
dimensions = {} # type: T.Dict[str, int]
variables = {} # type: T.Dict[str, Variable]
filter_by_keys = index.filter_by_keys
for param_id in index.get("paramId", []):
var_index = index.subindex(paramId=param_id)
try:
dims, data_var, coord_vars = build_variable_components(
var_index,
encode_cf,
filter_by_keys,
errors=errors,
squeeze=squeeze,
read_keys=read_keys,
time_dims=time_dims,
extra_coords=extra_coords,
)
except DatasetBuildError as ex:
# NOTE: When a variable has more than one value for an attribute we need to raise all
# the values in the file, not just the ones associated with that variable. See #54.
key = ex.args[1]
error_message = "multiple values for unique key, try re-open the file with one of:"
fbks = []
for value in index[key]:
fbk = {key: value}
fbk.update(filter_by_keys)
fbks.append(fbk)
error_message += "\n filter_by_keys=%r" % fbk
raise DatasetBuildError(error_message, key, fbks)
short_name = data_var.attributes.get("GRIB_shortName", "paramId_%d" % param_id)
var_name = data_var.attributes.get("GRIB_cfVarName", "unknown")
if "parameter" in encode_cf and var_name not in ("undef", "unknown"):
short_name = var_name
try:
dict_merge(variables, coord_vars)
dict_merge(variables, {short_name: data_var})
dict_merge(dimensions, dims)
except ValueError:
if errors == "ignore":
pass
elif errors == "raise":
raise
else:
log.exception("skipping variable: paramId==%r shortName=%r", param_id, short_name)
encoding = {
"source": index.source(),
"filter_by_keys": filter_by_keys,
"encode_cf": encode_cf,
}
attributes = build_dataset_attributes(index, filter_by_keys, encoding)
return dimensions, variables, attributes, encoding
@attr.attrs(auto_attribs=True)
class Dataset:
"""
Map a GRIB file to the NetCDF Common Data Model with CF Conventions.
"""
dimensions: T.Dict[str, int]
variables: T.Dict[str, Variable]
attributes: T.Dict[str, T.Any]
encoding: T.Dict[str, T.Any]
def compute_index_keys(
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
filter_by_keys: T.Dict[str, T.Any] = {},
) -> T.List[str]:
return sorted(set(INDEX_KEYS) | set(filter_by_keys) | set(time_dims) | set(extra_coords))
def open_from_index(
index: abc.Index[T.Any, abc.Field],
read_keys: T.Sequence[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
**kwargs: T.Any,
) -> Dataset:
dimensions, variables, attributes, encoding = build_dataset_components(
index, read_keys=read_keys, time_dims=time_dims, extra_coords=extra_coords, **kwargs
)
return Dataset(dimensions, variables, attributes, encoding)
def open_fieldset(
fieldset: T.Union[abc.Fieldset[abc.Field], abc.MappingFieldset[T.Any, abc.Field]],
indexpath: T.Optional[str] = None,
filter_by_keys: T.Dict[str, T.Any] = {},
read_keys: T.Sequence[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
computed_keys: messages.ComputedKeysType = cfmessage.COMPUTED_KEYS,
log: logging.Logger = LOG,
**kwargs: T.Any,
) -> Dataset:
"""Builds a ``cfgrib.Dataset`` form a mapping of mappings."""
if indexpath is not None and indexpath is not messages.DEFAULT_INDEXPATH:
log.warning(f"indexpath value {indexpath} is ignored")
index_keys = compute_index_keys(time_dims, extra_coords, filter_by_keys)
index = messages.FieldsetIndex.from_fieldset(fieldset, index_keys, computed_keys)
filtered_index = index.subindex(filter_by_keys)
return open_from_index(filtered_index, read_keys, time_dims, extra_coords, **kwargs)
def open_fileindex(
stream: messages.FileStream,
indexpath: str = messages.DEFAULT_INDEXPATH,
index_keys: T.Sequence[str] = INDEX_KEYS + ["time", "step"],
filter_by_keys: T.Dict[str, T.Any] = {},
computed_keys: messages.ComputedKeysType = cfmessage.COMPUTED_KEYS,
) -> messages.FileIndex:
index_keys = sorted(set(index_keys) | set(filter_by_keys))
index = messages.FileIndex.from_indexpath_or_filestream(
stream, index_keys, indexpath=indexpath, computed_keys=computed_keys
)
return index.subindex(filter_by_keys)
def open_file(
path: T.Union[str, "os.PathLike[str]"],
grib_errors: str = "warn",
indexpath: str = messages.DEFAULT_INDEXPATH,
filter_by_keys: T.Dict[str, T.Any] = {},
read_keys: T.Sequence[str] = (),
time_dims: T.Sequence[str] = ("time", "step"),
extra_coords: T.Dict[str, str] = {},
**kwargs: T.Any,
) -> Dataset:
"""Open a GRIB file as a ``cfgrib.Dataset``."""
path = os.fspath(path)
stream = messages.FileStream(path, errors=grib_errors)
index_keys = compute_index_keys(time_dims, extra_coords)
index = open_fileindex(stream, indexpath, index_keys, filter_by_keys=filter_by_keys)
return open_from_index(index, read_keys, time_dims, extra_coords, **kwargs)
|
ecmwf/cfgrib
|
cfgrib/dataset.py
|
Python
|
apache-2.0
| 28,814
|
[
"NetCDF"
] |
64cddddc8036141919e096ecded5b750630119cee3d35c9ac3609d88bb46f8f8
|
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, use
sudo easy_install mercurial=="""+hg_suggested+"""
or visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
#######################################################################
# hg change
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
return codereview_disabled
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
return "cannot create CL outside default branch"
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
return codereview_disabled
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgpatch_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgpatch"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgpatch: " + ExceptionDetail() + "\nInstall hgpatch with:\n$ go get code.google.com/p/go.codereview/cmd/hgpatch\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgpatch_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
return codereview_disabled
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
return codereview_disabled
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
return codereview_disabled
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
return codereview_disabled
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
return "no changed files, not sending mail"
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
return codereview_disabled
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
return codereview_disabled
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
return "nothing changed"
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
hg_push(ui, repo)
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return None
#######################################################################
# hg sync
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
return codereview_disabled
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return
#######################################################################
# hg upload
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
return codereview_disabled
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.r100' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.r99 with
# release-branch.r100. If we do ten releases a year
# that gives us 4 years before we have to worry about this.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.r100')
tags.sort()
for t in tags:
if t.startswith('release-branch.'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
|
tav/go
|
lib/codereview/codereview.py
|
Python
|
bsd-3-clause
| 105,552
|
[
"VisIt"
] |
798df0a2134e63ca368d4b945fdf7bf4174152bd8e46b6ffc82dd0eb542c313d
|
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The DASALC model in the following ICLR paper.
Zhen Qin, Le Yan, Honglei Zhuang, Yi Tay, Rama Kumar Pasumarthi, Xuanhui Wang,
Mike Bendersky, Marc Najork
"Are Neural Rankers still Outperformed by Gradient Boosted Decision Trees?"
ICLR 2021
The default hyperparameters set in this file was used on the Web30K dataset.
For Yahoo and Istella dataset we used the following configurations:
Yahoo:
train_batch_size: 256
learning_rate:0.0001
dropout_rate:0.7
input_noise_stddev:4.5
num_attention_heads: 4
Istella:
hidden_layer_dims:3072
input_noise_stddev:0.2
num_attention_layers: 2,
Note that the metrics reported in TF-Ranking are usually lower than the reported
numbers in the paper, since in TF-Ranking evaluation, queries with no relevant
docs will have zero ranking metrics, while such queries are ignored in the paper
evaluation, which is the norm in the literature.
The numbers reported in the paper were based models trained in a distributed
training environment. Due to the hardware difference and randomness, you may
still need to re-tune some of the hyperparameters.
The supported proto formats are listed at ../python/data.py.
--------------------------------------------------------------------------------
Sample command lines:
MODEL_DIR=/tmp/output && \
TRAIN=tensorflow_ranking/examples/data/train_numerical_elwc.tfrecord && \
EVAL=tensorflow_ranking/examples/data/vali_numerical_elwc.tfrecord && \
rm -rf $MODEL_DIR && \
bazel build -c opt \
tensorflow_ranking/research/dasalc_py_binary && \
./bazel-bin/tensorflow_ranking/research/dasalc_py_binary \
--train_input_pattern=$TRAIN \
--eval_input_pattern=$EVAL \
--model_dir=$MODEL_DIR
You can use TensorBoard to display the training results stored in $MODEL_DIR.
Notes:
* Use --alsologtostderr if the output is not printed into screen.
"""
from absl import flags
import tensorflow as tf
from tensorflow.python.estimator.canned import optimizers
import tensorflow_ranking as tfr
flags.DEFINE_string("train_input_pattern", None,
"Input file path used for training.")
flags.DEFINE_string("eval_input_pattern", None,
"Input file path used for eval.")
flags.DEFINE_string("model_dir", None, "Output directory for models.")
flags.DEFINE_integer("batch_size", 128, "The batch size for train.")
flags.DEFINE_integer("num_train_steps", 15000, "Number of steps for train.")
flags.DEFINE_integer("num_eval_steps", 100, "Number of steps for evaluation.")
flags.DEFINE_integer("checkpoint_secs", 30,
"Saves a model checkpoint every checkpoint_secs seconds.")
flags.DEFINE_integer("num_checkpoints", 100,
"Saves at most num_checkpoints checkpoints in workspace.")
flags.DEFINE_integer("num_features", 136, "Number of features per example.")
flags.DEFINE_integer(
"list_size", 200,
"List size used for training. Use None for dynamic list size.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate for optimizer.")
flags.DEFINE_float("dropout_rate", 0.4, "The dropout rate before output layer.")
flags.DEFINE_integer("hidden_layer_dims", 512,
"Number of units in each hidden layer.")
flags.DEFINE_string("loss", "softmax_loss",
"The RankingLossKey for the loss function.")
flags.DEFINE_float("batch_norm_moment", 0.9,
"Batch Normalization's momentum hyperparameter.")
flags.DEFINE_float("input_noise_stddev", 1.5,
"Input Gaussian noise standard deviation.")
flags.DEFINE_integer("num_attention_layers", 4,
"number of self attention layers.")
flags.DEFINE_integer("num_attention_heads", 2,
"number of self attention heads.")
flags.DEFINE_integer("head_size", 100, "Size of attention head.")
FLAGS = flags.FLAGS
_LABEL_FEATURE = "utility"
_CUTOFF = 30.
_MASK = "example_list_mask"
_PADDING_LABEL = -1
def context_feature_columns():
"""Returns context feature columns."""
return {}
def example_feature_columns():
"""Returns the example feature columns."""
feature_names = [
"custom_features_{}".format(i + 1) for i in range(FLAGS.num_features)
]
def log1p_cutoff(t):
return tf.clip_by_value(
tf.math.log1p(tf.abs(t)) * tf.sign(t), -_CUTOFF, _CUTOFF)
example_feature_columns_ = {}
for name in feature_names:
example_feature_columns_[name] = tf.feature_column.numeric_column(
name, shape=(1,), default_value=0.0, normalizer_fn=log1p_cutoff)
return example_feature_columns_
def transform_function(features, mode):
"""Transform function for DASALC model."""
mask = features.pop(_MASK)
context_features, example_features = tfr.feature.encode_listwise_features(
features=features,
context_feature_columns=context_feature_columns(),
example_feature_columns=example_feature_columns(),
mode=mode,
scope="transform_layer")
training = (mode == tf.estimator.ModeKeys.TRAIN)
concat_tensor = tfr.keras.layers.ConcatFeatures()(
inputs=(context_features, example_features, mask))
din_layer = tfr.keras.layers.DocumentInteractionAttention(
num_heads=FLAGS.num_attention_heads,
head_size=FLAGS.head_size,
num_layers=FLAGS.num_attention_layers,
dropout=FLAGS.dropout_rate,
input_noise_stddev=FLAGS.input_noise_stddev)
example_features["document_interaction_network_embedding"] = din_layer(
inputs=(concat_tensor, mask), training=training)
return context_features, example_features
def scoring_function(context_features, example_features, mode):
"""A feed-forward network to score query-document pairs."""
del context_features
with tf.compat.v1.name_scope("input_layer"):
input_features = [
tf.compat.v1.layers.flatten(example_features[name])
for name in sorted(example_features)
if name != "document_interaction_network_embedding"
]
input_layer = tf.concat(input_features, 1)
context_input = tf.compat.v1.layers.flatten(
example_features["document_interaction_network_embedding"])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
cur_layer = tf.compat.v1.layers.batch_normalization(
input_layer, training=is_training, momentum=FLAGS.batch_norm_moment)
cur_layer = tf.keras.layers.GaussianNoise(FLAGS.input_noise_stddev)(
cur_layer, training=is_training)
context_layer = tf.compat.v1.layers.batch_normalization(
context_input, training=is_training, momentum=FLAGS.batch_norm_moment)
last_dim = FLAGS.hidden_layer_dims
for layer_width in [FLAGS.hidden_layer_dims, FLAGS.hidden_layer_dims]:
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)
cur_layer = tf.nn.relu(cur_layer)
cur_layer = tf.compat.v1.layers.batch_normalization(
cur_layer, training=is_training, momentum=FLAGS.batch_norm_moment)
cur_layer = tf.compat.v1.layers.dropout(
inputs=cur_layer, rate=FLAGS.dropout_rate, training=is_training)
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=last_dim)
context_layer = tf.compat.v1.layers.dense(context_layer, units=last_dim)
output_layer = tf.math.multiply(cur_layer, context_layer)
output_layer = tf.math.add(output_layer, cur_layer)
output_layer = tf.nn.relu(output_layer)
output_layer = tf.compat.v1.layers.dropout(
inputs=output_layer, rate=FLAGS.dropout_rate, training=is_training)
return tf.compat.v1.layers.dense(output_layer, units=1)
class DASALCPipeline(tfr.ext.pipeline.RankingPipeline):
"""A custom ranking pipeline for dasalc model."""
def _make_serving_input_fn(self):
"""Returns `Estimator` `input_fn` for serving the model."""
context_feature_spec = tf.feature_column.make_parse_example_spec(
context_feature_columns().values())
example_feature_spec = tf.feature_column.make_parse_example_spec(
example_feature_columns().values())
serving_input_receiver_fn = (
tfr.data.build_ranking_serving_input_receiver_fn(
data_format="example_list_with_context",
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
mask_feature_name=_MASK))
return serving_input_receiver_fn
def _make_dataset(self,
batch_size,
list_size,
input_pattern,
randomize_input=True,
num_epochs=None):
"""Overwrites the inner immplementation of input function.
Args:
batch_size: (int) The number of input examples to process per batch. Use
params['batch_size'] for TPUEstimator, and `batch_size` for Estimator.
list_size: (int) The list size for an ELWC example.
input_pattern: (str) File pattern for the input data.
randomize_input: (bool) If true, randomize input example order. It should
almost always be true except for unittest/debug purposes.
num_epochs: (int) The number of times the input dataset must be repeated.
None to repeat the data indefinitely.
Returns:
A tuple of (feature tensors, label tensor).
"""
context_feature_spec = tf.feature_column.make_parse_example_spec(
self._context_feature_columns.values())
label_column = tf.feature_column.numeric_column(
self._label_feature_name,
dtype=self._label_feature_type,
default_value=_PADDING_LABEL)
example_feature_spec = tf.feature_column.make_parse_example_spec(
list(self._example_feature_columns.values()) + [label_column])
dataset = tfr.data.build_ranking_dataset(
file_pattern=input_pattern,
data_format="example_list_with_context",
batch_size=batch_size,
list_size=list_size,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
reader=tf.data.TFRecordDataset,
shuffle=randomize_input,
num_epochs=num_epochs,
prefetch_buffer_size=10000,
reader_num_threads=64,
mask_feature_name=_MASK)
return dataset.map(self._features_and_labels)
def train_and_eval():
"""Train and Evaluate."""
hparams = {
"train_input_pattern": FLAGS.train_input_pattern,
"eval_input_pattern": FLAGS.eval_input_pattern,
"learning_rate": FLAGS.learning_rate,
"train_batch_size": FLAGS.batch_size,
"eval_batch_size": FLAGS.batch_size,
"predict_batch_size": FLAGS.batch_size,
"num_train_steps": FLAGS.num_train_steps,
"num_eval_steps": FLAGS.num_eval_steps,
"checkpoint_secs": FLAGS.checkpoint_secs,
"num_checkpoints": FLAGS.num_checkpoints,
"loss": FLAGS.loss,
"list_size": FLAGS.list_size,
"listwise_inference": True,
"convert_labels_to_binary": False,
"model_dir": FLAGS.model_dir
}
optimizer = optimizers.get_optimizer_instance(
"Adam", learning_rate=FLAGS.learning_rate)
estimator = tfr.estimator.EstimatorBuilder(
context_feature_columns=context_feature_columns(),
example_feature_columns=example_feature_columns(),
scoring_function=scoring_function,
transform_function=transform_function,
optimizer=optimizer,
loss_reduction=tf.compat.v1.losses.Reduction.MEAN,
hparams=hparams).make_estimator()
ranking_pipeline = DASALCPipeline(
context_feature_columns=context_feature_columns(),
example_feature_columns=example_feature_columns(),
hparams=hparams,
estimator=estimator,
label_feature_name="utility",
label_feature_type=tf.int64,
best_exporter_metric="metric/ndcg_5")
ranking_pipeline.train_and_eval()
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
train_and_eval()
if __name__ == "__main__":
flags.mark_flag_as_required("train_input_pattern")
flags.mark_flag_as_required("eval_input_pattern")
flags.mark_flag_as_required("model_dir")
tf.compat.v1.app.run()
|
tensorflow/ranking
|
tensorflow_ranking/research/dasalc.py
|
Python
|
apache-2.0
| 12,510
|
[
"Gaussian"
] |
b9f8e151ff4d8c957778b2f51937637b7b262bd6c1e26aba4b74d1b9aab3cd5a
|
import numpy as np
import glob
import deepdish as dd
import os
#
# This may win awards for quickest-dirtiest scripts I've made
# recently......
#
# compute averages:
# convert to loading factors - compute averages
file_list = np.sort(glob.glob('loading_factors*.dat'))
data_list = [None] * len(file_list)
i = 0
for name in file_list:
data_list[i] = np.genfromtxt(name,names=True)
i = i + 1
#data = np.genfromtxt('loading_factors.dat',names=True)
#data2 = np.genfromtxt('loading_factors2.dat',names=True)
data = {}
for k in data_list[0].dtype.names:
data[k] = list(data_list[0][k])
for loaded_data in data_list[1:]:
data[k] = data[k] + list(loaded_data[k])
data[k] = np.array( data[k] )
if os.path.isfile('orate.dat'):
#print("Using adjusted outflow rate")
correct_orate = np.genfromtxt('orate.dat', names=True)
data['O_rate'] = np.interp( data['time'],
correct_orate['time'],
correct_orate['O_rate'])
#
#correct_orate = np.genfromtxt('orate.dat',names=True)
#
#data['O_rate'] = correct_orate['O_rate'] #####
loading_data = {}
for k in ['M_out','M_out_hot','M_out_cold']:
select = data['M_out'] > 0
loading_data[k] = np.average( data[k][select] / data['SFR'][select])
for k in ['Metal_out','Metal_out_hot','Metal_out_cold']:
select = data['Metal_out'] > 0
loading_data[k] = np.average( data[k][select] / data['O_rate'][select])
for k in ['E_out','E_hot_out','E_cold_out','E_colder_out']:
select = data['E_out'] > 0
loading_data[k] = np.average( data[k][select] / (data['SNR'][select]*1.0E51))
#
# compute SFR for gas and stars
#
files = np.sort(glob.glob("./*galaxy_data*.h5"))
gassdens = np.zeros(np.size(files))
for i,f in enumerate(files):
try:
r = dd.io.load(f,"/gas_profiles/surface_density/disk/xbins")
except:
gassdens[i] = np.average(gassdens[gassdens>0])
continue
gassdens[i] =dd.io.load(f, "/meta_data/M_gas")
A = np.pi * 600.0 * 600.0 # surface area of galaxy disk
gassdens = np.average(gassdens) / A # in Msun / pc^2
starsdens = np.average( dd.io.load( files[-1], '/time_data/SFR_100')) / A * 1.0E6 # in Msun / yr / kpc^2
# compute other values
loading_data['Eta_h-Eta_c'] = loading_data['E_hot_out'] / loading_data['E_cold_out']
select = data['M_out'] > 0
loading_data['e_s'] = np.average( data['E_out'][select] / (data['M_out'][select]*1.989E33 )) # erg / g
select = data['M_out_hot'] > 0
loading_data['e_s_hot'] = np.average( data['E_hot_out'][select] / (data['M_out_hot'][select]*1.989E33 )) # erg / g
select = data['M_out_cold'] > 0
loading_data['e_s_cold'] = np.average( data['E_cold_out'][select] / (data['M_out_cold'][select]*1.989E33 )) # erg / g
loading_data['e_s_h-e_s_c'] = loading_data['e_s_hot'] / loading_data['e_s_cold']
loading_data['Sigma_gas'] = gassdens
loading_data['Sigma_sfr'] = starsdens
select = loading_data['Metal_out_hot'] > 0
loading_data['E_h-Metal_h'] = loading_data['E_hot_out'][select] / loading_data['Metal_out_hot'][select]
names = { 'M_out' : 'Eta_{mass}',
'M_out_hot' : "Eta_{mass,hot}",
'M_out_cold' : "Eta_{mass,cold}",
'Metal_out' : "Eta_{metal}",
'Metal_out_hot' : "Eta_{metal,hot}",
'Metal_out_cold' : "Eta_{metal,cold}",
'E_out' : "Eta_{E}",
'E_hot_out' : "Eta_{E,hot}",
'E_cold_out' : "Eta_{E,cold}",
'Eta_h-Eta_c' : "Eta_{E,hot} / Eta_{E,cold}",
'E_h-Metal_h' : 'Eta_{E,hot} / Eta_{metal,hot}',
'e_s' : 'e_s (erg/g)',
'e_s_hot' : 'e_s,h (erg/g)',
'e_s_cold' : 'e_s,c (erg/g)',
'e_s_h-e_s_c' : 'e_{s,h} / e_{s,c}',
'Sigma_gas' : 'Sigma_gas (Msun / pc^2)',
'Sigma_sfr' : 'Sigma_sfr (Msun / yr / kpc^2)'}
for k in loading_data.keys():
if 'colder' in k:
continue
print("%30s %8.3E"%(names[k],loading_data[k]))
|
aemerick/galaxy_analysis
|
notebooks/loading_table.py
|
Python
|
mit
| 4,045
|
[
"Galaxy"
] |
46c0c81beb192c803a444f54a9bb38d44e16880ea05fd51b6309d7941888c985
|
import optparse
import datetime
import random
import sys
#import numpy
from itertools import groupby
from operator import itemgetter
from rosetta import *
from rosetta.protocols.loops.loop_mover.refine import *
from rosetta.protocols.loops.loop_closure.ccd import *
import cheshift as cs
init()
#init(extra_options = "-constant_seed")
#random.seed(100)
path_db = 'CS_DB'
path_triple = 'TRIPLE_5'
tolerance = 1
def get_groups(walkers, max_lenght_loop, min_length_loop):
group_walkers = []
grouped = []
for k, g in groupby(enumerate(walkers), lambda (i,x):i-x):
grouped.append(map(itemgetter(1), g))
#print grouped
for i in range(0, len(grouped)):
for j in range(0, len(grouped[i]), max_lenght_loop):
if len(grouped[i][j:j+max_lenght_loop]) >= min_length_loop:
group_walkers.append(grouped[i][j:j+max_lenght_loop])
return group_walkers
# def get_groups(walkers, max_lenght_loop):
# group_walkers = []
# grouped = []
# for k, g in groupby(enumerate(walkers), lambda (i,x):i-x):
# grouped.append(map(itemgetter(1), g))
# for i in range(0, len(grouped)):
# for j in range(0, len(grouped[i]), max_lenght_loop):
# group_walkers.append(grouped[i][j:j+max_lenght_loop])
# return group_walkers
def GiveWalkers(pdb_file):
reference = 0.
walkers = cs.CheShift(pdb_file, cs_exp, reference, Db, path_triple, tolerance)
return walkers
def minimize(pdb_file, counter):
pose = Pose()
pose_from_pdb(pose, pdb_file)
pymover = PyMOL_Mover()
scorefxn= create_score_function('talaris2013')
scorefxn.set_weight(rg , 1)
pymover.apply(pose)
scorefxn(pose)
pymover.update_energy(True)
pymover.send_energy(pose)
pymover.keep_history(True)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
movemap = MoveMap()
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS MIN %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "minimize", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
minmover = MinMover()
minmover.score_function(scorefxn)
minmover.movemap(movemap)
minmover.apply(pose)
pose.pdb_info().name('refine_minimize_%s' % counter)
#pymover.apply(pose)
pymover.send_energy(pose)
pose.dump_pdb("refine_minimize%s.pdb" % counter)
pdb_file = "refine_minimize%s.pdb" % counter
return pdb_file
def refine_chi(pdb_file, counter,
kT = 1.0, cycles = 9,
jobs = 1, job_output = 'refine_chi'):
pose = Pose()
pose_from_pdb(pose, pdb_file)
starting_pose = Pose()
starting_pose.assign(pose)
scorefxn = create_score_function('talaris2013') # scorefxn = get_fa_scorefxn() #
scorefxn.set_weight(rg , 1)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
movemap = MoveMap()
movemap.set_bb(False)
movemap.set_chi(False)
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS CHI %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "refine_chi", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
for residue in walkers:
movemap.set_chi(residue, True)
minmover = MinMover()
minmover.movemap(movemap)
minmover.score_function(scorefxn)
to_pack = standard_packer_task(starting_pose)
to_pack.restrict_to_repacking() # prevents design, packing only
length = pose.total_residue()
vector = rosetta.utility.vector1_bool()
for i in range (1, length+1):
if i in walkers:
vector.append(True)
else:
vector.append(False)
to_pack.restrict_to_residues(vector)
# to_pack.temporarily_fix_everything()
# for residue in walkers:
# to_pack.temporarily_set_pack_residue(residue, True)
to_pack.or_include_current(True) # considers the original sidechains
packmover = PackRotamersMover(scorefxn, to_pack)
scorefxn(pose)
pymover = PyMOL_Mover()
pymover.update_energy(True)
pymover.apply(pose)
pymover.send_energy(pose)
pymover.keep_history(True)
combined_mover = SequenceMover()
combined_mover.add_mover(packmover)
combined_mover.add_mover(minmover)
combined_mover.add_mover(pymover)
mc = MonteCarlo(pose, scorefxn, kT) # must reset for each trajectory!
trial = TrialMover(combined_mover, mc)
chi_refinement = RepeatMover(trial, cycles)
jd = PyJobDistributor(job_output, jobs, scorefxn)
jd.native_pose = starting_pose
scores = [0]*(jobs + 1)
scores[0] = scorefxn(starting_pose)
cycle_counter = 0 # for exporting to PyMOL
while not jd.job_complete:
# -reload the starting pose
pose.assign(starting_pose)
cycle_counter += 1
pose.pdb_info().name(job_output + '_' + str(cycle_counter))
# -reset the MonteCarlo object (sets lowest_score to that of p)
mc.reset(pose)
chi_refinement.apply(pose)
mc.recover_low(pose)
jd.output_decoy(pose)
pose.pdb_info().name( job_output + '_' + str(cycle_counter) + '_final')
pymover.apply(pose)
pymover.send_energy(pose) # see the total score in color
# -store the final score for this trajectory
scores[cycle_counter] = scorefxn(pose)
#print scorefxn.show(pose)
# Final print
print 'Original Score\t:\t' , scores[0]
for i in range(1, len(scores)): # print out the job scores
print job_output + '_' + str(i) + '\t:\t', scores[i]
pdb_file = "%s_1.pdb" % (job_output)
return pdb_file
def refine_bb(pdb_file, counter,
kT = 1.0, smallmoves = 3, shearmoves = 5,
backbone_angle_max = 7, cycles = 9,
jobs = 1, job_output = 'refine_bb'):
pose = Pose()
pose_from_pdb(pose, pdb_file)
starting_pose = Pose()
starting_pose.assign(pose)
#scorefxn_low = create_score_function('cen_std')
scorefxn= create_score_function('talaris2013') # scorefxn = get_fa_scorefxn() #
scorefxn.set_weight(rg , 1)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
movemap = MoveMap()
movemap.set_bb(False)
movemap.set_chi(False)
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS -BB %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "refine_bb", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
for residue in walkers:
movemap.set_bb(residue, True)
for residue in walkers:
movemap.set_chi(residue, True)
smallmover = SmallMover(movemap, kT, smallmoves)
smallmover.angle_max(backbone_angle_max)
shearmover = ShearMover(movemap, kT, shearmoves)
shearmover.angle_max(backbone_angle_max)
minmover = MinMover()
minmover.movemap(movemap)
minmover.score_function(scorefxn)
to_pack = standard_packer_task(starting_pose)
to_pack.restrict_to_repacking() # prevents design, packing only
length = pose.total_residue()
vector = rosetta.utility.vector1_bool()
for i in range (1, length+1):
if i in walkers:
vector.append(True)
else:
vector.append(False)
to_pack.restrict_to_residues(vector)
# to_pack.temporarily_fix_everything()
# for residue in walkers:
# to_pack.temporarily_set_pack_residue(residue, True)
to_pack.or_include_current(True) # considers the original sidechains
packmover = PackRotamersMover(scorefxn, to_pack)
scorefxn(pose)
pymover = PyMOL_Mover()
pymover.update_energy(True)
pymover.apply(pose)
pymover.send_energy(pose)
pymover.keep_history(True)
combined_mover = SequenceMover()
combined_mover.add_mover(smallmover)
combined_mover.add_mover(shearmover)
combined_mover.add_mover(minmover)
combined_mover.add_mover(packmover)
combined_mover.add_mover(pymover)
mc = MonteCarlo(pose, scorefxn, kT) # must reset for each trajectory!
trial = TrialMover(combined_mover, mc)
bb_refinement = RepeatMover(trial, cycles)
jd = PyJobDistributor(job_output, jobs, scorefxn)
jd.native_pose = starting_pose
scores = [0]*(jobs + 1)
scores[0] = scorefxn(starting_pose)
cycle_counter = 0 # for exporting to PyMOL
while not jd.job_complete:
# -reload the starting pose
pose.assign(starting_pose)
cycle_counter += 1
pose.pdb_info().name(job_output + '_' + str(cycle_counter))
# -reset the MonteCarlo object (sets lowest_score to that of p)
mc.reset(pose)
bb_refinement.apply(pose)
mc.recover_low(pose)
jd.output_decoy(pose)
pose.pdb_info().name( job_output + '_' + str(cycle_counter) + '_final')
pymover.apply(pose)
pymover.send_energy(pose) # see the total score in color
# -store the final score for this trajectory
scores[cycle_counter] = scorefxn(pose)
#print scorefxn.show(pose)
# Final print
print 'Original Score\t:\t' , scores[0]
for i in range(1, len(scores)): # print out the job scores
print job_output + '_' + str(i) + '\t:\t', scores[i]
pdb_file = "%s_1.pdb" % job_output
return pdb_file
def low_res(pdb_file, counter,
kT = 1.0, smallmoves = 3, shearmoves = 5,
backbone_angle_max = 7, cycles = 9):
pose = Pose()
pose_from_pdb(pose, pdb_file)
starting_pose = Pose()
starting_pose.assign(pose)
scorefxn_high = create_score_function('talaris2013')
scorefxn_low = create_score_function('cen_std')
scorefxn_low.apply_patch_from_file('score4L')
#scorefxn_low.set_weight(rg , 50)
to_centroid = SwitchResidueTypeSetMover('centroid')
to_fullatom = SwitchResidueTypeSetMover('fa_standard')
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS LOW %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "low_res", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
movemap = MoveMap()
movemap.set_bb(False)
movemap.set_chi(False)
for residue in walkers:
movemap.set_bb(residue, True)
for residue in walkers:
movemap.set_chi(residue, True)
to_centroid.apply(pose)
pymover = PyMOL_Mover()
scorefxn_low(pose)
#pymover.update_energy(True)
pymover.apply(pose)
#pymover.send_energy(pose)
#pymover.keep_history(True)
smallmover = SmallMover(movemap, kT, smallmoves)
smallmover.angle_max(backbone_angle_max)
shearmover = ShearMover(movemap, kT, shearmoves)
shearmover.angle_max(backbone_angle_max)
minmover = MinMover()
minmover.movemap(movemap)
minmover.score_function(scorefxn_low)
combined_mover = SequenceMover()
combined_mover.add_mover(smallmover)
combined_mover.add_mover(shearmover)
combined_mover.add_mover(minmover)
mc = MonteCarlo(pose, scorefxn_low, kT) # must reset for each trajectory!
trial = TrialMover(combined_mover, mc)
bb_refinement = RepeatMover(trial, cycles)
bb_refinement.apply(pose)
mc.recover_low(pose)
to_fullatom.apply(pose)
recover_sidechains = ReturnSidechainMover(starting_pose)
recover_sidechains.apply(pose)
pose.pdb_info().name( 'lowres_' + str(counter))
pymover.apply(pose)
pose.dump_pdb("refine_lowres%s.pdb" % counter)
pdb_file = "refine_lowres%s.pdb" % counter
return pdb_file
def loop_modeler(pdb_file, counter):
pose = Pose()
pose_from_pdb(pose, pdb_file)
starting_pose = Pose()
starting_pose.assign(pose)
scorefxn_high = create_score_function('talaris2013')
scorefxn_low = create_score_function('cen_std') # scorefxn = get_fa_scorefxn() #
scorefxn_low.apply_patch_from_file('score4L')
#scorefxn_low.set_weight(chainbreak, 1)
#scorefxn_low.set_weight(rg , 50)
to_centroid = SwitchResidueTypeSetMover('centroid')
to_fullatom = SwitchResidueTypeSetMover('fa_standard')
pymov = PyMOL_Mover()
scorefxn_high(starting_pose) # for exporting the scores
pymov.apply(starting_pose)
pymov.send_energy(starting_pose)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS LOO %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "loop_modeler", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
group_walkers = get_groups(walkers, 5, 3)
print group_walkers
cycle_counter = 0
for i in range(0, len(group_walkers)):
looplist = group_walkers.pop(random.randrange(0, len(group_walkers)))
print looplist
loop_begin = looplist[0]
loop_end = looplist[-1]
loop_cutpoint = (loop_begin + loop_end) / 2
my_loop = Loop(loop_begin, loop_end, loop_cutpoint)
set_single_loop_fold_tree(pose, my_loop)
add_cutpoint_variants(pose)
loops = Loops()
loops.add_loop(my_loop)
movemap = MoveMap()
movemap.set_bb(False)
movemap.set_chi(False)
movemap.set_bb_true_range(loop_begin, loop_end)
movemap.set_chi_true_range(loop_begin, loop_end)
smallmover = SmallMover(movemap, kT, smallmoves)
smallmover.angle_max(backbone_angle_max)
shearmover = ShearMover(movemap, kT, shearmoves)
shearmover.angle_max(backbone_angle_max)
minmover = MinMover()
minmover.movemap(movemap)
minmover.score_function(scorefxn_low)
to_pack = standard_packer_task(starting_pose)
to_pack.restrict_to_repacking() # prevents design, packing only
length = pose.total_residue()
vector = rosetta.utility.vector1_bool()
for i in range (1, length+1):
if i in looplist:
vector.append(True)
else:
vector.append(False)
to_pack.restrict_to_residues(vector)
to_pack.or_include_current(True) # considers the original sidechains
packmover = PackRotamersMover(scorefxn_low, to_pack)
combined_mover = SequenceMover()
combined_mover.add_mover(smallmover)
combined_mover.add_mover(shearmover)
combined_mover.add_mover(minmover)
#combined_mover.add_mover(packmover)
to_centroid.apply(pose)
mc = MonteCarlo(pose, scorefxn_low, kT) # must reset for each trajectory!
trial = TrialMover(combined_mover, mc)
bb_refinement = RepeatMover(combined_mover, 9)
print "STARTING"
print "BB_REFINEMENT"
bb_refinement.apply(pose)
print "DONE"
mc.recover_low(pose)
loop_refine = LoopMover_Refine_CCD(loops)
#loop_refine.apply(pose)
#print "loop refine done"
ccd_closure = CcdLoopClosureMover(my_loop, movemap)
ccd_closure.apply(pose)
print "CCD CLOSURE DONE"
to_fullatom.apply(pose)
recover_sidechains = ReturnSidechainMover(starting_pose)
recover_sidechains.apply(pose)
pose.pdb_info().name( 'looprefine_' + str(counter) + '_' + str(cycle_counter))
pymov.apply(pose)
cycle_counter += 1
pose.pdb_info().name( 'looprefine_' + str(counter) + '_final')
pymov.apply(pose)
scorefxn_high(pose)
pymov.send_energy(pose)
pose.dump_pdb("refine_loop%s.pdb" % counter)
pdb_file = "refine_loop%s.pdb" % counter
return pdb_file
def fast_relaxation(pdb_file, counter):
pose = Pose()
pose_from_pdb(pose, pdb_file)
pymover = PyMOL_Mover()
scorefxn= create_score_function('talaris2013')
scorefxn.set_weight(rg , 1)
pymover.apply(pose)
scorefxn(pose)
pymover.update_energy(True)
pymover.send_energy(pose)
pymover.keep_history(True)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
movemap = MoveMap()
movemap.set_bb(False)
movemap.set_chi(False)
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS REL %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "fast_relaxation", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
for residue in walkers:
movemap.set_bb(residue, True)
for residue in walkers:
movemap.set_chi(residue, True)
######## Esto es para permitir el relajamiento de la estructura
#if counter % 5 == 0:
#movemap.set_bb(True)
#movemap.set_chi(True)
######## Probablemente es mala idea
relax = FastRelax()
relax.set_scorefxn(scorefxn)
relax.set_movemap(movemap)
relax.apply(pose)
# pose.pdb_info().name( 'relax_' + str(counter) + '_final')
# pymover.apply(pose)
# pymover.send_energy(pose)
pose.dump_pdb("refine_relax%s.pdb" % counter)
pdb_file = "refine_relax%s.pdb" % counter
return pdb_file
##### OPTIONS ######
parser = optparse.OptionParser()
parser.add_option('--pdb_filename', dest = 'pdb_filename',
default = '1HKO', # default example PDB
help = 'the PDB file containing the protein to refine')
parser.add_option('--cs_exp', dest = 'cs_exp',
default = '4803',
help = 'the experimental chemical shifts files for the structure to refine')
parser.add_option('--rmsd_pdb', dest = 'rmsd_pdb',
default = '1CYO.clean2',
help = 'the pdb file containing the structure to compare rmsd with')
# custom refinement options
parser.add_option('--kT', dest='kT',
default = '1.0',
help = 'the \"temperature\" of the sample refinement protocol')
parser.add_option( '--smallmoves', dest='smallmoves',
default = '3',
help = 'the number of times SmallMover is applies in\
the custom refinement protocol' )
parser.add_option('--shearmoves', dest='shearmoves',
default = '5',
help = 'the number of times ShearMover is applied in\
the custom refinement protocol' )
parser.add_option( '--backbone_angle_max', dest='backbone_angle_max',
default = '7',
help = 'the maximum angle perturbation of SmallMover and ShearMover in\
the custom refinement protocol')
parser.add_option("--iterations", dest="iterations",
default = "6",
help = "the number of times the structure will cycle between cheshift\
and pyrosetta")
parser.add_option("--finals", dest="finals",
default = "1",
help = "the number of times the final structures that will be\
obtained")
parser.add_option('--cycles', dest='cycles',
default = '9',
help = 'the number of refinement rounds (small, shear, min, pack) in\
the sample refinement protocol')
# PyJobDistributor options
parser.add_option('--jobs', dest='jobs',
default = '1', # default to single trajectory for speed
help = 'the number of jobs (trajectories) to perform')
# parser.add_option('--job_output', dest = 'job_output',
# default = 'refine', # if a specific output name is desired
# help = 'the name preceding all output, output PDB files and .fasc')
(options,args) = parser.parse_args()
# PDB file option
cs_exp = str(options.cs_exp)
pdb_filename = str(options.pdb_filename)
rmsd_pdb = str(options.rmsd_pdb)
# custom refinement options
kT = float(options.kT)
smallmoves = int(options.smallmoves)
shearmoves = int(options.shearmoves)
backbone_angle_max = int(options.backbone_angle_max)
cycles = int(options.cycles)
iterations = int(options.iterations)
finals = int(options.finals)
# JobDistributor options
jobs = int(options.jobs)
#job_output = options.job_output
reference, outliers, Db, ok = cs.setup_CheShift(cs_exp, pdb_filename, path_db)
pdb_file = "%s_00000.pdb" % pdb_filename
rmsd_file = "%s.pdb" % rmsd_pdb
rmsd_pose = Pose()
pose_from_pdb(rmsd_pose, rmsd_file)
rmsd_pose.dump_pdb("%s_TRANSFORMED.pdb" % rmsd_pdb)
counter = 0
final_structures = 0
fd = open('rosettaflaws.log', 'w')
fe = open('rosetta.log', 'w')
now = datetime.datetime.now()
fe.write("%s --pdb_filename %s --cs_exp %s --rmsd_pdb %s --iterations %s\n" % (str(now), pdb_filename, cs_exp, rmsd_pdb,iterations))
walkerlist = []
pdb_file = minimize(pdb_file, counter)
for i in range(0, iterations):
print walkerlist
if i >= 4:
if walkerlist[-1] == walkerlist[-2] == walkerlist[-3] == walkerlist[-4] == walkerlist[-5] == walkerlist[-6] == walkerlist[-7] == walkerlist[-8] == walkerlist[-9] == walkerlist[-10] == walkerlist[-11]:
pose = Pose()
pose_from_pdb(pose, pdb_file)
scorefxn = create_score_function('talaris2013')
scorefxn.set_weight(rg , 1)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
walkers = GiveWalkers(pdb_file)
pose.dump_pdb("reFINAL_%s_%s.pdb" % (pdb_filename, counter))
pdb_file = "%s_00000.pdb" % pdb_filename
fd.write("**WALKERS FIN %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "FINAL", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
fe.write("**Number of flaws unchanged in last 10 movements, protocol restarting.**\n")
pdb_file = minimize(pdb_file, counter)
#print(pdb_file)
#pdb_file = fast_relaxation(pdb_file, counter)
#pdb_file = loop_modeler(pdb_file, counter)
job_output = "refine_chi%s" % counter
pdb_file = refine_chi(pdb_file, counter,
kT, cycles, jobs, job_output)
#print(pdb_file)
#pdb_file = low_res(pdb_file, counter,
#kT, smallmoves, shearmoves, backbone_angle_max, cycles)
job_output = "refine_bb%s" % counter
pdb_file = refine_bb(pdb_file, counter,
kT, smallmoves, shearmoves, backbone_angle_max, cycles, jobs, job_output)
#if counter % 2 == 0 and counter != 0:
#pdb_file = fast_relaxation(pdb_file, counter)
pdb_file = fast_relaxation(pdb_file, counter)
counter += 1
# for i in range(0, iterations):
# if i >= 4:
# if walkerlist[-1] == walkerlist[-2] == walkerlist[-3] == walkerlist[-4] == walkerlist[-5] == walkerlist[-6] == walkerlist[-7] == walkerlist[-8] == walkerlist[-9] == walkerlist[-10] == walkerlist[-11]:
# fd.close()
# sys.exit("**Sin cambios en las ultimas 11 lineas, protocolo terminado.**")
# pdb_file = fast_relaxation(pdb_file, counter)
# pdb_file = low_res(pdb_file, counter,
# kT, smallmoves, shearmoves, backbone_angle_max, cycles)
# counter += 1
pose = Pose()
pose_from_pdb(pose, pdb_file)
scorefxn= create_score_function('talaris2013')
scorefxn.set_weight(rg , 1)
aarmsd = all_atom_rmsd(pose, rmsd_pose)
carmsd = native_CA_rmsd(pose, rmsd_pose)
gdt = CA_gdtmm(pose, rmsd_pose)
pymover = PyMOL_Mover()
pymover.apply(rmsd_pose)
#pose.dump_pdb("reFINAL_%s.pdb" % pdb_filename)
walkers = GiveWalkers(pdb_file)
fd.write("**WALKERS FIN %s : %s\n" % (counter, walkers))
walkerlist.append(len(walkers))
fe.write("%*s%*s scorefxn:%*s flaws:%*s carmsd:%*s aarmsd:%*s gdt:%*s\n" % (15, "FINAL", 4, counter, 14, str(scorefxn(pose)), 3, str(len(walkers)), 14, str(carmsd), 14, str(aarmsd), 14, str(gdt)))
fd.close()
fe.write("**Number of iterations completed, protocol finished.**\n")
now = datetime.datetime.now()
fe.write(str(now))
fe.close()
sys.exit("**Number of iterations completed, protocol finished.**")
|
pgerramirez/13Ca-and-13Cb-CS-driven-refinement
|
protocolo_refinamiento_R.py
|
Python
|
gpl-3.0
| 22,781
|
[
"PyMOL"
] |
45d794463c60696424c7cf666907b42ea9aaba7f4f88b6667fa2727ec3129bd3
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import string_types
from six.moves import zip_longest
import re
from types import GeneratorType
from collections import Counter, defaultdict, Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual(seq.id, "")
self.assertEqual(seq.description, "")
self.assertIsNone(seq.quality)
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-', id='foo', description='bar baz',
quality=range(11))
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual(seq.id, 'foo')
self.assertEqual(seq.description, 'bar baz')
npt.assert_equal(seq.quality, np.array(range(11), dtype='int'))
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0,))
npt.assert_equal(seq.values, np.array('', dtype='c'))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT', id='foo', description='bar baz',
quality=range(4))
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, id='abc', description='123', quality=[42] * 4),
Sequence('ACGT', id='abc', description='123', quality=[42] * 4))
# subclasses work too
seq = SequenceSubclass('ACGT', id='foo', description='bar baz',
quality=range(4))
self.assertEqual(
Sequence(seq),
Sequence('ACGT', id='foo', description='bar baz',
quality=range(4)))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_id(self):
seq = Sequence('', id='')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, '')
def test_init_single_character_id(self):
seq = Sequence('', id='z')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, 'z')
def test_init_multiple_character_id(self):
seq = Sequence('', id='\nabc\tdef G123')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, '\nabc\tdef G123')
def test_init_empty_description(self):
seq = Sequence('', description='')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, '')
def test_init_single_character_description(self):
seq = Sequence('', description='z')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, 'z')
def test_init_multiple_character_description(self):
seq = Sequence('', description='\nabc\tdef G123')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, '\nabc\tdef G123')
def test_init_empty_quality(self):
for q in ([], (), np.array([])):
seq = Sequence('', quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (0,))
npt.assert_equal(seq.quality, np.array([]))
def test_init_single_quality_score(self):
for q in (2, [2], (2,), np.array([2])):
seq = Sequence('G', quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (1,))
npt.assert_equal(seq.quality, np.array([2]))
def test_init_multiple_quality_scores(self):
for q in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, quality=q)
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
self.assertEqual(seq.quality.shape, (9,))
npt.assert_equal(seq.quality,
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0]))
def test_init_no_copy_of_quality(self):
qual = np.array([22, 22, 1])
seq = Sequence('ACA', quality=qual)
self.assertIs(seq.quality, qual)
with self.assertRaises(ValueError):
qual[1] = 42
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with self.assertRaisesRegexp(TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with self.assertRaisesRegexp(TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with self.assertRaisesRegexp(TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with self.assertRaisesRegexp(TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with self.assertRaisesRegexp(TypeError, 'int'):
Sequence(42)
with self.assertRaisesRegexp(TypeError, 'float'):
Sequence(4.2)
with self.assertRaisesRegexp(TypeError, 'int64'):
Sequence(np.int_(50))
with self.assertRaisesRegexp(TypeError, 'float64'):
Sequence(np.float_(50))
with self.assertRaisesRegexp(TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_id(self):
with self.assertRaises(TypeError):
Sequence('abc', id=('f', 'o', 'o'))
def test_init_invalid_description(self):
with self.assertRaises(TypeError):
Sequence('abc', description=('f', 'o', 'o'))
def test_init_invalid_quality(self):
# invalid dtype
with self.assertRaises(TypeError):
Sequence('ACGT', quality=[2, 3, 4.1, 5])
with self.assertRaises(TypeError):
Sequence('ACGT', quality=[2, np.nan, 4, 5])
# wrong number of dimensions
with self.assertRaisesRegexp(ValueError, '2.*1-D'):
Sequence('ACGT', quality=[[2, 3], [4, 5]])
# wrong number of elements
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', quality=[2, 3, 4])
# negatives
with self.assertRaisesRegexp(ValueError,
'Quality scores.*greater than.*zero'):
Sequence('ACGT', quality=[2, 3, -1, 4])
def test_value_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_id_property(self):
seq = Sequence('', id='foo')
self.assertIsInstance(seq.id, string_types)
self.assertEqual(seq.id, 'foo')
with self.assertRaises(TypeError):
seq.id[1] = 42
with self.assertRaises(AttributeError):
seq.id = 'bar'
def test_description_property(self):
seq = Sequence('', description='foo')
self.assertIsInstance(seq.description, string_types)
self.assertEqual(seq.description, 'foo')
with self.assertRaises(TypeError):
seq.description[1] = 42
with self.assertRaises(AttributeError):
seq.description = 'bar'
def test_quality_property(self):
seq = Sequence('ACA', quality=[22, 22, 0])
self.assertIsInstance(seq.quality, np.ndarray)
self.assertEqual(seq.quality.dtype, np.int)
npt.assert_equal(seq.quality, np.array([22, 22, 0]))
with self.assertRaises(ValueError):
seq.quality[1] = 42
with self.assertRaises(AttributeError):
seq.quality = [22, 22, 42]
def test_has_quality(self):
seq = Sequence('')
self.assertFalse(seq._has_quality())
seq = Sequence('', quality=[])
self.assertTrue(seq._has_quality())
seq = Sequence('ACA', quality=(5, 4, 67))
self.assertTrue(seq._has_quality())
seq = Sequence('ACA')
self.assertFalse(seq._has_quality())
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", id='b') == Sequence("a", id='b'))
self.assertTrue(Sequence("a", id='b', description='c') ==
Sequence("a", id='b', description='c'))
self.assertTrue(Sequence("a", id='b', description='c', quality=[1]) ==
Sequence("a", id='b', description='c', quality=[1]))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", id='b'))
self.assertTrue(Sequence("a", id='c') !=
Sequence("a", id='c', description='t'))
self.assertTrue(Sequence("a", quality=[1]) != Sequence("a"))
self.assertTrue(Sequence("a", quality=[2]) !=
Sequence("a", quality=[1]))
self.assertTrue(Sequence("c", quality=[3]) !=
Sequence("b", quality=[3]))
self.assertTrue(Sequence("a", id='b') != Sequence("c", id='b'))
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_has_qual(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, id='id', description='dsc',
quality=np.arange(length))
eseq = Sequence("S", id='id', description='dsc', quality=np.array([0]))
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", id='id', description='dsc',
quality=np.array([len(seq) - 1]))
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", id='id', description='dsc',
quality=[10])
self.assertEqual(seq[10], eseq)
def test_getitem_with_int_no_qual(self):
seq = Sequence("Sequence string !1@2#3?.,", id='id2',
description='no_qual')
eseq = Sequence("t", id='id2', description='no_qual')
self.assertEqual(seq[10], eseq)
def test_getitem_with_slice_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id3', description="dsc3",
quality=np.arange(length))
eseq = Sequence("012", id='id3', description="dsc3",
quality=np.arange(3))
self.assertEquals(seq[0:3], eseq)
self.assertEquals(seq[:3], eseq)
self.assertEquals(seq[:3:1], eseq)
eseq = Sequence("def", id='id3', description="dsc3",
quality=[13, 14, 15])
self.assertEquals(seq[-3:], eseq)
self.assertEquals(seq[-3::1], eseq)
eseq = Sequence("02468ace", id='id3', description='dsc3',
quality=[0, 2, 4, 6, 8, 10, 12, 14])
self.assertEquals(seq[0:length:2], eseq)
self.assertEquals(seq[::2], eseq)
eseq = Sequence(s[::-1], id='id3', description='dsc3',
quality=np.arange(length)[::-1])
self.assertEquals(seq[length::-1], eseq)
self.assertEquals(seq[::-1], eseq)
eseq = Sequence('fdb97531', id='id3', description='dsc3',
quality=[15, 13, 11, 9, 7, 5, 3, 1])
self.assertEquals(seq[length::-2], eseq)
self.assertEquals(seq[::-2], eseq)
self.assertEquals(seq[0:500:], seq)
eseq = Sequence('', id='id3', description='dsc3',
quality=[])
self.assertEquals(seq[length:0], eseq)
self.assertEquals(seq[-length:0], eseq)
self.assertEquals(seq[1:0], eseq)
eseq = Sequence("0", id='id3', description='dsc3',
quality=[0])
self.assertEquals(seq[0:1], eseq)
self.assertEquals(seq[0:1:1], eseq)
self.assertEquals(seq[-length::-1], eseq)
def test_getitem_with_slice_no_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id4', description="no_qual4")
eseq = Sequence("02468ace", id='id4', description='no_qual4')
self.assertEquals(seq[0:length:2], eseq)
self.assertEquals(seq[::2], eseq)
def test_getitem_with_tuple_of_mixed_with_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id5', description="dsc5",
quality=np.arange(length))
eseq = Sequence("00000", id='id5', description='dsc5',
quality=[0, 0, 0, 0, 0])
self.assertEquals(seq[0, 0, 0, 0, 0], eseq)
self.assertEquals(seq[0, 0:1, 0, -length::-1, 0, 1:0], eseq)
self.assertEquals(seq[0:1, 0:1, 0:1, 0:1, 0:1], eseq)
self.assertEquals(seq[0:1, 0, 0, 0, 0], eseq)
eseq = Sequence("0123fed9", id='id5', description='dsc5',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEquals(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9, 1:0], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_tuple_of_mixed_no_qual(self):
seq = Sequence("0123456789abcdef", id='id6', description="no_qual6")
eseq = Sequence("0123fed9", id='id6', description='no_qual6')
self.assertEquals(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEquals(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9], eseq)
self.assertEquals(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_iterable_of_mixed_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id7', description="dsc7",
quality=np.arange(length))
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield -1
yield slice(-2, -4, -1)
yield 9
eseq = Sequence("0123fed9", id='id7', description='dsc7',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEquals(seq[generator()], eseq)
self.assertEquals(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEquals(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_iterable_of_mixed_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id7', description="dsc7")
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield slice(None, -4, -1)
yield 9
eseq = Sequence("0123fed9", id='id7', description='dsc7')
self.assertEquals(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEquals(seq[generator()], eseq)
self.assertEquals(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEquals(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_numpy_index_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence("0123fed9", id='id9', description='dsc9',
quality=[0, 1, 2, 3, 15, 14, 13, 9])
self.assertEquals(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_numpy_index_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence("0123fed9", id='id10', description='dsc10')
self.assertEquals(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_empty_indices_empty_seq_has_qual(self):
s = ""
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence('', id='id9', description='dsc9', quality=[])
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_empty_seq_no_qual(self):
s = ""
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence('', id='id10', description='dsc10')
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id9', description="dsc9",
quality=np.arange(length))
eseq = Sequence('', id='id9', description='dsc9', quality=[])
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id10', description="dsc10")
eseq = Sequence('', id='id10', description='dsc10')
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_boolean_vector_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, id='id11', description="dsc11",
quality=np.arange(length))
eseq = Sequence("13579bdf", id='id11', description="dsc11",
quality=[1, 3, 5, 7, 9, 11, 13, 15])
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
self.assertEqual(seq[[False, True] * 8], eseq)
def test_getitem_with_boolean_vector_no_qual(self):
s = "0123456789abcdef"
seq = Sequence(s, id='id11', description="dsc11")
eseq = Sequence("13579bdf", id='id11', description="dsc11")
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
def test_getitem_with_invalid(self):
seq = Sequence("123456", id='idm', description='description',
quality=[1, 2, 3, 4, 5, 6])
with self.assertRaises(IndexError):
seq['not an index']
with self.assertRaises(IndexError):
seq[['1', '2']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), 'a']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), True]]
with self.assertRaises(IndexError):
seq[True]
with self.assertRaises(IndexError):
seq[np.array([True, False])]
with self.assertRaises(IndexError):
seq[99999999999999999]
with self.assertRaises(IndexError):
seq[0, 0, 99999999999999999]
# numpy 1.8.1 and 1.9.2 raise different error types
# (ValueError, IndexError).
with self.assertRaises(Exception):
seq[100 * [True, False, True]]
def test_len(self):
self.assertEqual(len(Sequence("")), 0)
self.assertEqual(len(Sequence("a")), 1)
self.assertEqual(len(Sequence("abcdef")), 6)
def test_contains(self):
seq = Sequence("#@ACGT,24.13**02")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertTrue(c(',24') in seq)
self.assertTrue(c('*') in seq)
self.assertTrue(c('') in seq)
self.assertFalse(c("$") in seq)
self.assertFalse(c("AGT") in seq)
self.assertEqual(tested, 4)
def test_contains_sequence_subclass(self):
with self.assertRaises(TypeError):
SequenceSubclass("A") in Sequence("AAA")
self.assertTrue(SequenceSubclass("A").values in Sequence("AAA"))
def test_hash(self):
with self.assertRaises(TypeError):
hash(Sequence("ABCDEFG"))
self.assertNotIsInstance(Sequence("ABCDEFG"), Hashable)
def test_iter_has_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b",
quality=np.arange(10))
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i), id='a', description='b',
quality=[i]))
self.assertTrue(tested)
def test_iter_no_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b")
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i), id='a', description='b'))
self.assertTrue(tested)
def test_reversed_has_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b",
quality=np.arange(10))
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i), id='a', description='b',
quality=[9 - i]))
self.assertTrue(tested)
def test_reversed_no_quality(self):
tested = False
seq = Sequence("0123456789", id="a", description="b")
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i), id='a', description='b'))
self.assertTrue(tested)
def test_repr(self):
seq_simple = Sequence("ACGT")
seq_med = Sequence("ACGT", id="id", description="desc",
quality=[1, 2, 3, 4])
seq_complex = Sequence(("ASDKJHDJHFGUGF*&@KFHKHSDGKASDHGKDUYGKFHJ#&*YJ"
"FE&I@#JH@#ASJDHGF*&@#IG#*&IGUJKSADHAKSDJHI#*Y"
"LFUFLIU#RHL*Y#HHFLI#*FHL@#(*HJ"),
id="This is a long id", description="desc",
quality=([1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2] *
10))
self.assertEqual(repr(seq_simple), "Sequence('ACGT', length=4)")
self.assertEqual(repr(seq_med),
("Sequence('ACGT', length=4, id='id',"
" description='desc', quality=[1, 2, 3, 4])"))
self.assertEqual(repr(seq_complex),
("Sequence('ASDKJH ... @#(*HJ', length=120, id='This"
" is a long id', \n description='desc', "
"quality=[1, 2, 3, 4, 5, 6, ..., 7, 8, 9, 0, 1, 2])")
)
def test_str(self):
self.assertEqual(str(Sequence("GATTACA")), "GATTACA")
self.assertEqual(str(Sequence("ACCGGTACC")), "ACCGGTACC")
self.assertEqual(str(Sequence("GREG")), "GREG")
self.assertEqual(str(Sequence("ABC", quality=[1, 2, 3])), "ABC")
self.assertIs(type(str(Sequence("A"))), str)
def test_to_default_behavior(self):
# minimal sequence, sequence with all optional attributes present, and
# a subclass of Sequence
for seq in (Sequence('ACGT'),
Sequence('ACGT', id='foo', description='bar',
quality=range(4)),
SequenceSubclass('ACGU', id='rna seq')):
to = seq._to()
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
def test_to_update_single_attribute(self):
seq = Sequence('HE..--..LLO', id='hello',
description='gapped hello',
quality=range(11))
to = seq._to(id='new id')
self.assertFalse(seq is to)
# they don't compare equal when we compare all attributes...
self.assertFalse(seq.equals(to))
# ...but they *do* compare equal when we ignore id, as that was the
# only attribute that changed
self.assertTrue(seq.equals(to, ignore=['id']))
# id should be what we specified in the _to call...
self.assertEqual(to.id, 'new id')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.id, 'hello')
def test_to_update_multiple_attributes(self):
seq = Sequence('HE..--..LLO', id='hello',
description='gapped hello',
quality=range(11))
to = seq._to(id='new id', quality=range(20, 25),
sequence='ACGTA', description='new desc')
self.assertFalse(seq is to)
self.assertFalse(seq.equals(to))
# attributes should be what we specified in the _to call...
self.assertEqual(to.id, 'new id')
npt.assert_array_equal(to.quality, np.array([20, 21, 22, 23, 24]))
npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
self.assertEqual(to.description, 'new desc')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.id, 'hello')
npt.assert_array_equal(seq.quality, range(11))
npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
dtype='c'))
self.assertEqual(seq.description, 'gapped hello')
def test_to_invalid_kwargs(self):
seq = Sequence('ACCGGTACC', id="test-seq",
description="A test sequence")
with self.assertRaises(TypeError):
seq._to(id='bar', unrecognized_kwarg='baz')
def test_to_extra_non_attribute_kwargs(self):
# test that we can pass through additional kwargs to the constructor
# that aren't related to biological sequence attributes (i.e., they
# aren't state that has to be copied)
class SequenceSubclassWithNewSignature(Sequence):
def __init__(self, sequence, id='', description='', quality=None,
foo=False):
super(SequenceSubclassWithNewSignature, self).__init__(
sequence, id=id, description=description, quality=quality)
self.foo = foo
seq = SequenceSubclassWithNewSignature('ACTG', description='foo')
# _to() without specifying `foo`
to = seq._to()
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
self.assertFalse(seq.foo)
# `foo` should default to False
self.assertFalse(to.foo)
# _to() with `foo` specified
to = seq._to(foo=True)
self.assertTrue(seq.equals(to))
self.assertFalse(seq is to)
self.assertFalse(seq.foo)
# `foo` should now be True
self.assertTrue(to.foo)
def test_equals_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('').equals(Sequence('')))
self.assertTrue(Sequence('z').equals(Sequence('z')))
self.assertTrue(
Sequence('ACGT').equals(Sequence('ACGT')))
def test_equals_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
self.assertTrue(seq1.equals(seq2))
# order shouldn't matter
self.assertTrue(seq2.equals(seq1))
def test_equals_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=(1, 2, 3, 4))
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
id='foo', description='abc',
quality=np.array([1, 2, 3, 4]))
self.assertTrue(seq1.equals(seq2))
def test_equals_ignore_type(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertTrue(seq1.equals(seq2, ignore=['type']))
def test_equals_ignore_id(self):
seq1 = Sequence('ACGT', id='foo')
seq2 = Sequence('ACGT', id='bar')
self.assertTrue(seq1.equals(seq2, ignore=['id']))
def test_equals_ignore_description(self):
seq1 = Sequence('ACGT', description='foo')
seq2 = Sequence('ACGT', description='bar')
self.assertTrue(seq1.equals(seq2, ignore=['description']))
def test_equals_ignore_quality(self):
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', quality=[5, 6, 7, 8])
self.assertTrue(seq1.equals(seq2, ignore=['quality']))
def test_equals_ignore_sequence(self):
seq1 = Sequence('ACGA')
seq2 = Sequence('ACGT')
self.assertTrue(seq1.equals(seq2, ignore=['sequence']))
def test_equals_ignore_everything(self):
seq1 = Sequence('ACGA', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = SequenceSubclass('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertTrue(seq1.equals(seq2,
ignore=['quality', 'description', 'id',
'sequence', 'type']))
def test_equals_type_mismatch(self):
seq1 = Sequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
seq2 = SequenceSubclass('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertFalse(seq1.equals(seq2,
ignore=['quality', 'description', 'id']))
def test_equals_id_mismatch(self):
seq1 = Sequence('ACGT', id='foo')
seq2 = Sequence('ACGT', id='bar')
self.assertFalse(seq1.equals(seq2))
def test_equals_description_mismatch(self):
seq1 = Sequence('ACGT', description='foo')
seq2 = Sequence('ACGT', description='bar')
self.assertFalse(seq1.equals(seq2))
def test_equals_quality_mismatch(self):
# both provided
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT', quality=[1, 2, 3, 5])
self.assertFalse(seq1.equals(seq2))
# one provided
seq1 = Sequence('ACGT', quality=[1, 2, 3, 4])
seq2 = Sequence('ACGT')
self.assertFalse(seq1.equals(seq2))
def test_equals_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1.equals(seq2))
def test_count(self):
def construct_char_array(s):
return np.fromstring(s, dtype='|S1')
def construct_uint8_array(s):
return np.fromstring(s, dtype=np.uint8)
seq = Sequence("1234567899876555")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertEqual(seq.count(c('4')), 1)
self.assertEqual(seq.count(c('8')), 2)
self.assertEqual(seq.count(c('5')), 4)
self.assertEqual(seq.count(c('555')), 1)
self.assertEqual(seq.count(c('555'), 0, 4), 0)
self.assertEqual(seq.count(c('555'), start=0, end=4), 0)
self.assertEqual(seq.count(c('5'), start=10), 3)
self.assertEqual(seq.count(c('5'), end=10), 1)
with self.assertRaises(ValueError):
seq.count(c(''))
self.assertEquals(tested, 4)
def test_count_on_subclass(self):
with self.assertRaises(TypeError) as cm:
Sequence("abcd").count(SequenceSubclass("a"))
self.assertIn("Sequence", str(cm.exception))
self.assertIn("SequenceSubclass", str(cm.exception))
def test_distance(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("abcdef")
seq2 = constructor("12bcef")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
self.assertEqual(tested, 4)
def test_distance_arbitrary_function(self):
def metric(x, y):
return len(x) ** 2 + len(y) ** 2
seq1 = Sequence("12345678")
seq2 = Sequence("1234")
result = seq1.distance(seq2, metric=metric)
self.assertIsInstance(result, float)
self.assertEqual(result, 80.0)
def test_distance_default_metric(self):
seq1 = Sequence("abcdef")
seq2 = Sequence("12bcef")
seq_wrong = Sequence("abcdefghijklmnop")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq1), 0.0)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
with self.assertRaises(ValueError):
seq1.distance(seq_wrong)
with self.assertRaises(ValueError):
seq_wrong.distance(seq1)
def test_distance_on_subclass(self):
seq1 = Sequence("abcdef")
seq2 = SequenceSubclass("12bcef")
with self.assertRaises(TypeError):
seq1.distance(seq2)
def test_matches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([True, False] * 4)
npt.assert_equal(seq1.matches(seq2), expected)
self.assertEqual(tested, 4)
def test_matches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.matches(seq2)
def test_matches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.matches(seq2)
def test_mismatches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([False, True] * 4)
npt.assert_equal(seq1.mismatches(seq2), expected)
self.assertEqual(tested, 4)
def test_mismatches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatches(seq2)
def test_mismatches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatches(seq2)
def test_mismatch_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1)), int)
self.assertEqual(seq1.mismatch_frequency(seq1), 0)
self.assertEqual(seq1.mismatch_frequency(seq2), 4)
self.assertEqual(seq1.mismatch_frequency(seq3), 8)
def test_mismatch_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.mismatch_frequency(seq1, relative=True), 0.0)
self.assertEqual(seq1.mismatch_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.mismatch_frequency(seq3, relative=True), 1.0)
def test_mismatch_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatch_frequency(seq2)
def test_mismatch_frequence_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatch_frequency(seq2)
def test_match_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1)), int)
self.assertEqual(seq1.match_frequency(seq1), 8)
self.assertEqual(seq1.match_frequency(seq2), 4)
self.assertEqual(seq1.match_frequency(seq3), 0)
def test_match_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.match_frequency(seq1, relative=True), 1.0)
self.assertEqual(seq1.match_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.match_frequency(seq3, relative=True), 0.0)
def test_match_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.match_frequency(seq2)
def test_match_frequency_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.match_frequency(seq2)
def test_index(self):
tested = 0
for c in self.sequence_kinds:
tested += 1
seq = Sequence("ABCDEFG@@ABCDFOO")
self.assertEqual(seq.index(c("A")), 0)
self.assertEqual(seq.index(c("@")), 7)
self.assertEqual(seq.index(c("@@")), 7)
with self.assertRaises(ValueError):
seq.index("A", start=1, end=5)
self.assertEqual(tested, 4)
def test_index_on_subclass(self):
with self.assertRaises(TypeError):
Sequence("ABCDEFG").index(SequenceSubclass("A"))
self.assertEqual(
SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
def _compare_kmers_results(self, observed, expected):
for obs, exp in zip_longest(observed, expected, fillvalue=None):
self.assertEqual(obs, exp)
def test_iter_kmers(self):
seq = Sequence('GATTACA', quality=range(7))
expected = [
Sequence('G', quality=[0]),
Sequence('A', quality=[1]),
Sequence('T', quality=[2]),
Sequence('T', quality=[3]),
Sequence('A', quality=[4]),
Sequence('C', quality=[5]),
Sequence('A', quality=[6])
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA', quality=[0, 1]),
Sequence('TT', quality=[2, 3]),
Sequence('AC', quality=[4, 5])
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT', quality=[0, 1, 2]),
Sequence('TAC', quality=[3, 4, 5])
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap(self):
seq = Sequence('GATTACA', quality=range(7))
expected = [
Sequence('G', quality=[0]),
Sequence('A', quality=[1]),
Sequence('T', quality=[2]),
Sequence('T', quality=[3]),
Sequence('A', quality=[4]),
Sequence('C', quality=[5]),
Sequence('A', quality=[6])
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA', quality=[0, 1]),
Sequence('AT', quality=[1, 2]),
Sequence('TT', quality=[2, 3]),
Sequence('TA', quality=[3, 4]),
Sequence('AC', quality=[4, 5]),
Sequence('CA', quality=[5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT', quality=[0, 1, 2]),
Sequence('ATT', quality=[1, 2, 3]),
Sequence('TTA', quality=[2, 3, 4]),
Sequence('TAC', quality=[3, 4, 5]),
Sequence('ACA', quality=[4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_invalid_k(self):
seq = Sequence('GATTACA', quality=range(7))
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_different_sequences(self):
seq = Sequence('HE..--..LLO', id='hello', description='gapped hello',
quality=range(11))
expected = [
Sequence('HE.', quality=[0, 1, 2], id='hello',
description='gapped hello'),
Sequence('.--', quality=[3, 4, 5], id='hello',
description='gapped hello'),
Sequence('..L', quality=[6, 7, 8], id='hello',
description='gapped hello')
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_kmer_frequencies(self):
seq = Sequence('GATTACA', quality=range(7))
# overlap = True
expected = Counter('GATTACA')
self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
# overlap = False
expected = Counter(['GAT', 'TAC'])
self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
expected = Counter(['GATTACA'])
self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
def test_kmer_frequencies_relative(self):
seq = Sequence('GATTACA', quality=range(7))
# overlap = True
expected = defaultdict(float)
expected['A'] = 3/7.
expected['C'] = 1/7.
expected['G'] = 1/7.
expected['T'] = 2/7.
self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
expected)
expected = defaultdict(float)
expected['GAT'] = 1/5.
expected['ATT'] = 1/5.
expected['TTA'] = 1/5.
expected['TAC'] = 1/5.
expected['ACA'] = 1/5.
self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
expected)
# overlap = False
expected = defaultdict(float)
expected['GAT'] = 1/2.
expected['TAC'] = 1/2.
self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
expected)
expected = defaultdict(float)
expected['GATTACA'] = 1.0
self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
expected)
def test_kmer_frequencies_floating_point_precision(self):
# Test that a sequence having no variation in k-words yields a
# frequency of exactly 1.0. Note that it is important to use
# self.assertEqual here instead of self.assertAlmostEqual because we
# want to test for exactly 1.0. A previous implementation of
# Sequence.kmer_frequencies(relative=True) added (1 / num_words) for
# each occurrence of a k-word to compute the frequencies (see
# https://github.com/biocore/scikit-bio/issues/801). In certain cases,
# this yielded a frequency slightly less than 1.0 due to roundoff
# error. The test case here uses a sequence with 10 characters that are
# all identical and computes k-word frequencies with k=1. This test
# case exposes the roundoff error present in the previous
# implementation because there are 10 k-words (which are all
# identical), so 1/10 added 10 times yields a number slightly less than
# 1.0. This occurs because 1/10 cannot be represented exactly as a
# floating point number.
seq = Sequence('AAAAAAAAAA')
self.assertEqual(seq.kmer_frequencies(1, relative=True),
defaultdict(float, {'A': 1.0}))
def test_find_with_regex(self):
seq = Sequence('GATTACA', quality=range(7))
pat = re.compile('(T+A)(CA)')
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_string_as_input(self):
seq = Sequence('GATTACA', quality=range(7))
pat = '(T+A)(CA)'
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_no_groups(self):
seq = Sequence('GATTACA', quality=range(7))
pat = re.compile('(FOO)')
self.assertEqual(list(seq.find_with_regex(pat)), [])
def test_find_with_regex_ignore_no_difference(self):
seq = Sequence('..ABCDEFG..')
pat = "([A-Z]+)"
exp = [slice(2, 9)]
self.assertEqual(list(seq.find_with_regex(pat)), exp)
obs = seq.find_with_regex(
pat, ignore=np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=bool))
self.assertEqual(list(obs), exp)
def test_find_with_regex_ignore(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 1, 1, 0, 1], dtype=bool))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_find_with_regex_ignore_index_array(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([1, 2, 4, 5, 11, 13, 14, 17, 18, 20]))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_iter_contiguous_index_array(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c([0, 1, 2, 3, 8, 9, 10, 11]))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_boolean_vector(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(([True] * 4 + [False] * 4) * 2))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_iterable_slices(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(spaced_out()))
self.assertEqual(list(obs), exp)
exp = [Sequence("01234567"), Sequence("cdef")]
obs = s.iter_contiguous(c(contiguous()))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_with_max_length(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("234"), Sequence("678"), Sequence("abc")]
obs = s.iter_contiguous(c([True, False, True, True] * 4),
min_length=3)
self.assertEqual(list(obs), exp)
exp = [Sequence("0"), Sequence("234"), Sequence("678"),
Sequence("abc"), Sequence("ef")]
obs1 = list(s.iter_contiguous(c([True, False, True, True] * 4),
min_length=1))
obs2 = list(s.iter_contiguous(c([True, False, True, True] * 4)))
self.assertEqual(obs1, obs2)
self.assertEqual(obs1, exp)
def test_iter_contiguous_with_invert(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("4567"), Sequence("cdef")]
obs = s.iter_contiguous(c(spaced_out()), invert=True)
self.assertEqual(list(obs), exp)
exp = [Sequence("89ab")]
obs = s.iter_contiguous(c(contiguous()), invert=True)
self.assertEqual(list(obs), exp)
def test_munge_to_index_array_valid_index_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([1, 2, 3], dtype=int)
obs = s._munge_to_index_array(c([1, 2, 3]))
npt.assert_equal(obs, exp)
exp = np.array([1, 3, 5], dtype=int)
obs = s._munge_to_index_array(c([1, 3, 5]))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_index_array(self):
s = Sequence("12345678")
for c in list, tuple, np.array, pd.Series:
with self.assertRaises(ValueError):
s._munge_to_index_array(c([3, 2, 1]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([5, 6, 7, 2]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([0, 1, 2, 1]))
def test_munge_to_index_array_valid_bool_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([2, 3, 5], dtype=int)
obs = s._munge_to_index_array(
c([False, False, True, True, False, True]))
npt.assert_equal(obs, exp)
exp = np.array([], dtype=int)
obs = s._munge_to_index_array(
c([False] * 6))
npt.assert_equal(obs, exp)
exp = np.arange(6)
obs = s._munge_to_index_array(
c([True] * 6))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_bool_array(self):
s = Sequence('123456')
for c in (list, tuple, lambda x: np.array(x, dtype=bool),
lambda x: pd.Series(x, dtype=bool)):
with self.assertRaises(ValueError):
s._munge_to_index_array(c([]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True] * 10))
def test_munge_to_index_array_valid_iterable(self):
s = Sequence('')
def slices_only():
return (slice(i, i+1) for i in range(0, 10, 2))
def mixed():
return (slice(i, i+1) if i % 2 == 0 else i for i in range(10))
def unthinkable():
for i in range(10):
if i % 3 == 0:
yield slice(i, i+1)
elif i % 3 == 1:
yield i
else:
yield np.array([i], dtype=int)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(mixed()))
npt.assert_equal(obs, exp)
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(unthinkable()))
npt.assert_equal(obs, exp)
exp = np.arange(10, step=2, dtype=int)
obs = s._munge_to_index_array(c(slices_only()))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_iterable(self):
s = Sequence('')
def bad1():
yield "r"
yield [1, 2, 3]
def bad2():
yield 1
yield 'str'
def bad3():
yield False
yield True
yield 2
def bad4():
yield np.array([False, True])
yield slice(2, 5)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
with self.assertRaises(TypeError):
s._munge_to_index_array(bad1())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad2())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad3())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad4())
if __name__ == "__main__":
main()
|
jensreeder/scikit-bio
|
skbio/sequence/tests/test_sequence.py
|
Python
|
bsd-3-clause
| 60,998
|
[
"scikit-bio"
] |
0bd97d8db480ff2bb48a9f21e506a65377695d7b9d0a1684f9fca3ef20df5ab7
|
import pandas as pd
import numpy as np
from elephant.utils import smooth, norm
# discard neuron 130 (spikes and calcium uncorrelated)
neurons = np.delete(neurons, 129, 0)
datasets = np.delete(datasets, 129, 0)
alldata2 = {}
for i in range(len(neurons)):
dataset_index = int(datasets[i])
neuron_index = int(neurons[i])
if not dataset_index in alldata2:
alldata2[ dataset_index ] = {}
if not neuron_index in alldata2[ dataset_index ]:
alldata2[ dataset_index ][ neuron_index ] = {"spikes": None, "calcium": None}
# kernel for smoothing the ground truth (spikes) to facilitate gradient descent
kernelX = np.exp(-(np.array(range(-spike_size_kernel,spike_size_kernel+1),'float32'))**2/spike_SD_kernel**2)
kernelX = kernelX/np.sum(kernelX)
for iii, n_dataset in enumerate(range(1,11)):
# load using pandas
x1 = pd.read_csv("spikefinder.train/%d.train.calcium.csv" % n_dataset)
y1 = pd.read_csv("spikefinder.train/%d.train.spikes.csv" % n_dataset)
# convert to numpy arrays
x1 = x1.values
y1 = y1.values
number_of_neurons = x1.shape[1]
for neuron_index in range(number_of_neurons):
y1x = y1[:,neuron_index]
x1x = x1[:,neuron_index]
# discard NaNs
idx = ~np.isnan(x1x)
if np.any(neurons[datasets == n_dataset] == neuron_index):
alldata2[ n_dataset ][ neuron_index ][ "spikes" ] = y1x[idx]
alldata2[ n_dataset ][ neuron_index ][ "calcium" ] = norm(x1x[idx])
alldata2[ n_dataset ][ neuron_index ][ "spikes_smooth" ] = norm(np.convolve(y1x[idx], kernelX, mode="same"))
# alldata2[ n_dataset ][ neuron_index ][ "calcium_smooth" ] = smooth(x1x[idx], window_len=calcium_smoothing_windowsize)
# alldata2[ n_dataset ][ neuron_index ][ "calcium_smooth_norm" ] = norm(smooth(x1x[idx], window_len=calcium_smoothing_windowsize))
print("DONE")
|
PTRRupprecht/Spikefinder-Elephant
|
elephant/1_load_data.py
|
Python
|
mit
| 1,881
|
[
"NEURON"
] |
e6f40310e6b1572e7e1647961c16f4e9ec039b5e58f07ef94f7c12470c3868f5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# OpenNest, an open source thermostat
# Copyright (C) 2014 Brian Gregg
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, glob, time, math, os
from PyQt4.QtCore import QObject, QEvent, pyqtProperty, pyqtSignal
import RPi.GPIO as GPIO
class Thermostat(QObject):
changed = pyqtSignal(QObject)
HEAT = 17
COOL = 18
FAN = 22
LIGHT = 252
HIST = 10
def __init__(self, parent=None, units='imperial', test=False):
QObject.__init__(self, parent)
self._t = None
self._test = test
self._temp = (0, 0)
self._history = []
self._set = 38.0 * 5.0 / 9.0
self._state = [False, False, False]
self._units = units
self._auto = True
self._mode = 1
self._secondsSinceTouch = 0
self._lightOn = True
if parent is not None:
parent.setProperty('thermostat', self)
self.changed.connect(self.onChange)
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '10-*')[0]
self.device_file = device_folder + '/w1_slave'
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.HEAT, GPIO.OUT)
GPIO.output(self.HEAT, self._state[0])
GPIO.setup(self.COOL, GPIO.OUT)
GPIO.output(self.COOL, self._state[1])
GPIO.setup(self.FAN, GPIO.OUT)
GPIO.output(self.FAN, self._state[2])
os.system("echo 'out' > /sys/class/gpio/gpio%i/direction" % self.LIGHT)
self.light(self._lightOn)
@pyqtProperty(bool)
def heat(self):
return self._state[0]
@heat.setter
def heat(self, value):
self._state[0] = value
GPIO.output(self.HEAT, value)
@pyqtProperty(bool)
def cool(self):
return self._state[1]
@cool.setter
def cool(self, value):
self._state[1] = value
GPIO.output(self.COOL, value)
@pyqtProperty(bool)
def fan(self):
return self._state[2]
@fan.setter
def fan(self, value):
self._state[2] = value
GPIO.output(self.FAN, value)
@pyqtProperty(bool)
def auto(self):
return self._auto
@auto.setter
def auto(self, value):
self._auto = value
self.changed.emit(self)
@pyqtProperty(int)
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
self.changed.emit(self)
@pyqtProperty(str)
def units(self):
return self._units
@units.setter
def units(self, value):
self._units = value
if value == 'metric':
self._set = round(self._set)
else:
self._set = self.convertFromDisp(round(self.convertToDisp(self._set)))
self.stop()
self.start()
@pyqtProperty(int)
def temp(self):
if(self.units == 'imperial'): return int(self._temp[1])
return int(self._temp[0])
@pyqtProperty(int)
def setTemp(self):
return self.convertToDisp(self._set)
@setTemp.setter
def setTemp(self, value):
self._set = self.convertFromDisp(value)
self.changed.emit(self)
def convertFromDisp(self, t):
if self.units == 'imperial':
return (t - 32.0) * 5.0 / 9.0
return t
def convertToDisp(self, t):
if self.units == 'imperial':
return t * 9.0 / 5.0 + 32.0
return t
def run(self):
self._temp = self.read_temp()
self._history.append(self._temp[0])
while len(self._history) > self.HIST:
self._history.pop(0)
self._t = threading.Timer(1, self.run)
self._t.daemon = True
self._t.start()
self._secondsSinceTouch += 1
if self._secondsSinceTouch > 30 and self._lightOn:
self.light(False)
def light(self, val):
os.system("echo '%i' > /sys/class/gpio/gpio%i/value" % (int(val), self.LIGHT))
self._lightOn = val
if val: self._secondsSinceTouch = 0
def read_temp_raw(self):
f = open(self.device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp(self):
lines = self.read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = self.read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
if self.units == 'metric' and self._test: print '%f C' % temp_c
temp_f = temp_c * 9.0 / 5.0 + 32.0
if self.units == 'imperial' and self._test: print '%f F' % temp_f
self.changed.emit(self)
return (temp_c, temp_f)
return self._temp
def onChange(self):
if self._test: print '%f C' % self._temp[0], self._set, self._history
if self._mode == 1 and all(t < (self._set - 0.5) for t in self._history):
self.heat = True
self.cool = False
self.fan = not self._auto
elif self._mode == -1 and all(t > (self._set + 0.5) for t in self._history):
self.heat = False
self.cool = True
self.fan = True
else:
self.heat = False
self.cool = False
self.fan = False if self._mode == 0 else not self._auto
def eventFilter(self, obj, event):
if event.type() == QEvent.MouseButtonPress:
self.light(True)
return False
def start(self):
self.stop()
self.run()
def stop(self):
if(self._t is not None):
print 'Stopping thread...'
self._t.cancel()
self._t = None
GPIO.output(self.HEAT, False)
GPIO.output(self.COOL, False)
GPIO.output(self.FAN, False)
if __name__ == "__main__":
from PyQt4.QtCore import QCoreApplication
import sys, pdb
app = QCoreApplication(sys.argv)
# Testing the thermostat on the console
t = Thermostat(parent=app, test=True)
t.start()
def uncaught(type, value, traceback):
print type, value, traceback
QCoreApplication.quit()
sys.excepthook = uncaught
app.exec_()
print 'Stopping...'
t.stop()
|
baritonehands/opennest
|
thermostat.py
|
Python
|
gpl-3.0
| 7,132
|
[
"Brian"
] |
e1fc2d43298808741809faef9fe2df40c6530a834f17b779bb87cb9c6c91559a
|
import datetime
from datetime import timedelta
from unittest import skip
from unittest.mock import patch
from django.core import mail
from django.utils import timezone
from django.test import TestCase
from attivita.forms import ModuloOrganizzaAttivitaReferente
from attivita.models import Attivita, Area, Turno, Partecipazione
from anagrafica.costanti import LOCALE
from anagrafica.models import Sede, Persona, Appartenenza, Delega
from anagrafica.permessi.applicazioni import REFERENTE, PRESIDENTE, DELEGATO_CO
from anagrafica.permessi.costanti import GESTIONE_CENTRALE_OPERATIVA_SEDE
from autenticazione.utils_test import TestFunzionale
from base.utils import poco_fa
from base.utils_tests import crea_persona, crea_persona_sede_appartenenza, crea_area_attivita, crea_turno, crea_partecipazione, \
email_fittizzia, crea_appartenenza
from base.models import Autorizzazione
class TestAttivita(TestCase):
def test_attivita(self):
##Sicilia -> [Fiumefreddo, Mascali]
##Calabria ->[]
sicilia = Sede(
nome="Comitato Regionale di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
sicilia.save()
fiumefreddo = Sede(
nome="Comitato Locale di Fiumefreddo di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
genitore=sicilia,
)
fiumefreddo.save()
mascali = Sede(
nome="Comitato Locale di Mascali",
tipo=Sede.COMITATO,
estensione=LOCALE,
genitore=sicilia,
)
mascali.save()
calabria = Sede(
nome="Comitato Regionale di Calabria",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
calabria.save()
area = Area(
nome="6",
obiettivo=6,
sede=sicilia,
)
area.save()
a = Attivita(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=sicilia,
estensione=sicilia,
)
a.save()
a1 = Attivita(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=fiumefreddo,
estensione=sicilia,
)
a1.save()
t = Turno(
attivita=a,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 11, 10),
fine=datetime.datetime(2015, 11, 30),
minimo=1,
massimo=6,
)
t.save()
t1 = Turno(
attivita=a,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 10, 10),
fine=datetime.datetime(2015, 10, 30)
)
t1.save()
t2 = Turno(
attivita=a1,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 11, 10),
fine=datetime.datetime(2015, 11, 30)
)
t2.save()
p = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJSIKAJDO",
data_nascita="1994-2-5"
)
p.save()
p1 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJSIRAJDO",
data_nascita="1994-2-5"
)
p1.save()
p2 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJNOKAJDO",
data_nascita="1994-2-5"
)
p2.save()
p3 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJNOKAJMI",
data_nascita="1994-2-5"
)
p3.save()
app = Appartenenza(
persona=p,
sede=sicilia,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app.save()
app1 = Appartenenza(
persona=p1,
sede=fiumefreddo,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app1.save()
app2 = Appartenenza(
persona=p2,
sede=mascali,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app2.save()
app3 = Appartenenza(
persona=p3,
sede=calabria,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app3.save()
self.assertTrue(
p.calendario_turni(datetime.date(2015, 11, 1), datetime.date(2015, 11, 30)).filter(pk=t.pk).exists(),
msg="Il turno viene trovato nel calendario - attivita' creata dalla sede del volontario"
)
self.assertFalse(
p.calendario_turni(datetime.date(2015, 11, 1), datetime.date(2015, 11, 30)).filter(pk=t1.pk).exists(),
msg="Il turno non viene trovato nel calendario - attivita' creata dalla sede del volontario"
)
def test_pagina_turni(self):
sicilia = Sede.objects.create(
nome="Comitato Regionale di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
area = Area.objects.create(
nome="6",
obiettivo=6,
sede=sicilia,
)
attivita = Attivita.objects.create(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=sicilia,
estensione=sicilia,
)
oggi = timezone.now()
for day in range(1, 11):
giorno_1 = oggi - timedelta(days=day)
Turno.objects.create(
attivita=attivita,
prenotazione=giorno_1 - timedelta(days=1),
inizio=giorno_1,
fine=giorno_1 + timedelta(days=1),
minimo=1,
massimo=6,
)
giorno_1 = oggi + timedelta(days=20 + day)
Turno.objects.create(
attivita=attivita,
prenotazione=giorno_1 - timedelta(days=1),
inizio=giorno_1,
fine=giorno_1 + timedelta(days=1),
minimo=1,
massimo=6,
)
# Esistono 10 turni che finiscono prima di oggi, quindi ci posizioniamo sulla prima pagina
self.assertEqual(attivita.pagina_turni_oggi(), 1)
for day in range(1, 2):
giorno_1 = oggi - timedelta(days=day)
Turno.objects.create(
attivita=attivita,
prenotazione=giorno_1 - timedelta(days=1),
inizio=giorno_1,
fine=giorno_1 + timedelta(days=1),
minimo=1,
massimo=6,
)
# Esistono 11 turni che finiscono prima di oggi, quindi ci posizioniamo sulla seconda pagina
self.assertEqual(attivita.pagina_turni_oggi(), 2)
def test_attivita_estesa(self):
sicilia = Sede(
nome="Comitato Regionale di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
sicilia.save()
fiumefreddo = Sede(
nome="Comitato Locale di Fiumefreddo di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
genitore=sicilia,
)
fiumefreddo.save()
mascali = Sede(
nome="Comitato Locale di Mascali",
tipo=Sede.COMITATO,
estensione=LOCALE,
genitore=sicilia,
)
mascali.save()
calabria = Sede(
nome="Comitato Regionale di Calabria",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
calabria.save()
area = Area(
nome="6",
obiettivo=6,
sede=sicilia,
)
area.save()
a = Attivita(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=sicilia,
estensione=sicilia,
)
a.save()
a1 = Attivita(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=fiumefreddo,
estensione=sicilia,
)
a1.save()
t = Turno(
attivita=a,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 11, 10),
fine=datetime.datetime(2015, 11, 30),
minimo=1,
massimo=6,
)
t.save()
t1 = Turno(
attivita=a,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 10, 10),
fine=datetime.datetime(2015, 10, 30)
)
t1.save()
t2 = Turno(
attivita=a1,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 11, 10),
fine=datetime.datetime(2015, 11, 30)
)
t2.save()
p = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJSIKAJDO",
data_nascita="1994-2-5"
)
p.save()
p1 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJSIRAJDO",
data_nascita="1994-2-5"
)
p1.save()
p2 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJNOKAJDO",
data_nascita="1994-2-5"
)
p2.save()
p3 = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJNOKAJMI",
data_nascita="1994-2-5"
)
p3.save()
app = Appartenenza(
persona=p,
sede=sicilia,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app.save()
app1 = Appartenenza(
persona=p1,
sede=fiumefreddo,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app1.save()
app2 = Appartenenza(
persona=p2,
sede=mascali,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app2.save()
app3 = Appartenenza(
persona=p3,
sede=calabria,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app3.save()
self.assertTrue(
p2.calendario_turni(datetime.date(2015, 11, 1), datetime.date(2015, 11, 30)).filter(pk=t2.pk).exists(),
msg="Il turno viene trovato nel calendario - attivita' estesa al volontario"
)
self.assertFalse(
p3.calendario_turni(datetime.date(2015, 11, 1), datetime.date(2015, 11, 30)).filter(pk=t2.pk).exists(),
msg="Il turno non viene trovato nel calendario - attivita' estesa al volontario"
)
def test_permessi_attivita(self):
fiumefreddo = Sede(
nome="Comitato Locale di Fiumefreddo di Sicilia",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
fiumefreddo.save()
mascali = Sede(
nome="Comitato Locale di Mascali",
tipo=Sede.COMITATO,
estensione=LOCALE,
)
mascali.save()
area = Area(
nome="6",
obiettivo=6,
sede=fiumefreddo,
)
area.save()
a = Attivita(
stato=Attivita.VISIBILE,
nome="Att 1",
apertura=Attivita.APERTA,
area=area,
descrizione="1",
sede=mascali,
)
a.save()
p = Persona(
nome="Mario",
cognome="Rossi",
codice_fiscale="FRSSAKJNOKAJMI",
data_nascita="1994-2-5"
)
p.save()
app = Appartenenza(
persona=p,
sede=fiumefreddo,
membro=Appartenenza.VOLONTARIO,
inizio="1980-12-10",
)
app.save()
t = Turno(
attivita=a,
prenotazione=datetime.datetime(2015, 11, 10),
inizio=datetime.datetime(2015, 11, 10),
fine=datetime.datetime(2015, 11, 30),
minimo=1,
massimo=6,
)
t.save()
delega = Delega(
oggetto=a,
persona=p,
tipo=REFERENTE,
inizio="2015-11-15",
)
delega.save()
self.assertTrue(
p.calendario_turni(datetime.date(2015, 11, 1), datetime.date(2015, 11, 30)).filter(pk=t.pk).exists(),
msg="Il turno viene trovato nel calendario - attivita' creata dalla sede del volontario"
)
def test_autorizzazioni_automatiche_non_scadute(self):
presidente = crea_persona()
persona, sede, app = crea_persona_sede_appartenenza(presidente=presidente)
ora = timezone.now()
area, attivita = crea_area_attivita(sede)
domani_inizio = ora + timedelta(days=24)
domani_fine = ora + timedelta(days=180)
t1 = crea_turno(attivita, inizio=domani_inizio, fine=domani_fine)
partecipazione = crea_partecipazione(persona, t1)
attivita.centrale_operativa = Attivita.CO_AUTO
attivita.save()
self.assertEqual(0, Autorizzazione.objects.count())
partecipazione.richiedi()
self.assertEqual(0, len(mail.outbox))
self.assertEqual(1, Autorizzazione.objects.count())
autorizzazione = Autorizzazione.objects.first()
Autorizzazione.gestisci_automatiche()
self.assertEqual(0, len(mail.outbox))
self.assertFalse(partecipazione.automatica)
Autorizzazione.gestisci_automatiche()
self.assertEqual(0, len(mail.outbox))
self.assertFalse(partecipazione.automatica)
def test_autorizzazioni_automatiche_scadute(self):
presidente = crea_persona()
persona, sede, app = crea_persona_sede_appartenenza(presidente=presidente)
persona.email_contatto = email_fittizzia()
persona.save()
ora = timezone.now()
area, attivita = crea_area_attivita(sede)
domani_inizio = ora + timedelta(days=24)
domani_fine = ora + timedelta(days=180)
t1 = crea_turno(attivita, inizio=domani_inizio, fine=domani_fine)
partecipazione = crea_partecipazione(persona, t1)
attivita.centrale_operativa = Attivita.CO_AUTO
attivita.save()
self.assertEqual(0, Autorizzazione.objects.count())
partecipazione.richiedi()
self.assertNotIn(partecipazione, Partecipazione.con_esito_ok())
self.assertEqual(0, len(mail.outbox))
self.assertEqual(1, Autorizzazione.objects.count())
autorizzazione = Autorizzazione.objects.first()
self.assertNotEqual(autorizzazione.scadenza, None)
autorizzazione.scadenza = timezone.now() - timedelta(days=10)
autorizzazione.save()
self.assertFalse(autorizzazione.concessa)
Autorizzazione.gestisci_automatiche()
self.assertEqual(1, len(mail.outbox))
messaggio = mail.outbox[0]
self.assertTrue(messaggio.subject.find('Richiesta di partecipazione attività RESPINTA') > -1)
self.assertFalse(messaggio.subject.find('Richiesta di partecipazione attività APPROVATA') > -1)
self.assertTrue(messaggio.body.find('una tua richiesta è rimasta in attesa per 30 giorni e come da policy') == -1)
self.assertTrue(autorizzazione.oggetto.automatica)
Autorizzazione.gestisci_automatiche()
self.assertEqual(1, len(mail.outbox))
self.assertEqual(autorizzazione.concessa, None)
self.assertIn(partecipazione, Partecipazione.con_esito_no())
class TestFunzionaleAttivita(TestFunzionale):
def test_crea_area(self):
presidente = crea_persona()
persona, sede, appartenenza = crea_persona_sede_appartenenza(presidente=presidente)
if not presidente.volontario:
crea_appartenenza(presidente, sede)
sessione_presidente = self.sessione_utente(persona=presidente)
#sessione_persona = self.sessione_utente(persona=persona)
# Crea area di intervento
sessione_presidente.click_link_by_partial_href("/attivita/")
sessione_presidente.click_link_by_partial_text("Aree di intervento")
sessione_presidente.click_link_by_partial_text(sede.nome)
sessione_presidente.fill('nome', "Area 42")
sessione_presidente.fill('obiettivo', '6')
sessione_presidente.find_by_xpath("//button[@type='submit']").first.click()
# Nomina la persona come responsabile
self.seleziona_delegato(sessione_presidente, persona)
self.assertTrue(
sessione_presidente.is_text_present("Area 42"),
"La nuova area è stata creata con successo",
)
self.assertTrue(
sessione_presidente.is_text_present(persona.nome_completo),
"La nuova area ha il responsabile assegnato",
)
self.assertTrue(
sessione_presidente.is_text_present("0 attività"),
"La nuova area non ha alcuna attività",
)
def test_crea_attivita(self):
presidente = crea_persona()
persona, sede, appartenenza = crea_persona_sede_appartenenza(presidente=presidente)
if not presidente.volontario:
crea_appartenenza(presidente, sede)
area = Area(sede=sede, nome="Area 42", obiettivo=6)
area.save()
# Crea le sessioni
sessione_presidente = self.sessione_utente(persona=presidente)
sessione_persona = self.sessione_utente(persona=persona)
# Presidente: Vai a organizza attivita
sessione_presidente.click_link_by_partial_href("/attivita/")
sessione_presidente.click_link_by_partial_text("Organizza attività")
# Presidente: Riempi dettagli attivita
sessione_presidente.fill('nome', "Fine del mondo")
sessione_presidente.select('area', area.pk)
sessione_presidente.select('scelta', ModuloOrganizzaAttivitaReferente.SONO_IO)
# Presidente: Invia il modulo
sessione_presidente.find_by_xpath("//button[@type='submit']").first.click()
# Presidente: Torna all'elenco attività, naviga fino a nuovo turno.
sessione_presidente.click_link_by_partial_text("Gestione turni")
sessione_presidente.click_link_by_partial_text("Crea nuovo turno")
inizio = (timezone.now()).strftime("%d/%m/%Y %H:%m")
fine = (timezone.now() + timedelta(hours=30)).strftime("%d/%m/%Y %H:%m")
# Presidente: Riempi i dettagli del nuovo turno
sessione_presidente.fill('nome', "Vedetta")
sessione_presidente.fill('inizio', inizio)
sessione_presidente.fill('fine', fine)
sessione_presidente.fill('minimo', 1)
sessione_presidente.fill('massimo', 5)
sessione_presidente.fill('prenotazione', inizio)
sessione_presidente.execute_script('window.scrollTo(0, document.body.scrollHeight)')
# Presidente: Invia il modulo
sessione_presidente.find_by_css("button.btn-primary").first.click()
# Volontario: Vai in attività
sessione_persona.click_link_by_partial_text("Attività")
self.assertFalse(sessione_persona.is_text_present("Vedetta"),
msg="L'attività non è visibile.")
# Presidente: Modifica attività
sessione_presidente.click_link_by_partial_text("Elenco attività")
sessione_presidente.click_link_by_partial_text("modifica info")
sessione_presidente.click_link_by_partial_text("Gestione attività")
# Presidente: Imposta stato come VISIBILE
sessione_presidente.select('stato', Attivita.VISIBILE)
# Presidente: Invia il modulo
sessione_presidente.find_by_xpath("//button[@type='submit']").first.click()
# Volontario: Vai in attività
sessione_persona.click_link_by_partial_text("Attività")
self.assertTrue(sessione_persona.is_text_present("Vedetta"),
msg="L'attività è ora visibile.")
# Volontario: Clicca sul turno
sessione_persona.click_link_by_partial_text("Vedetta")
self.assertTrue(sessione_persona.is_text_present("Scoperto!"),
msg="Viene mostrata correttamente come scoperta.")
def test_richiesta_partecipazione(self):
referente = crea_persona()
volontario, sede, appartenenza = crea_persona_sede_appartenenza()
area, attivita = crea_area_attivita(sede=sede)
inizio = timezone.now() + timedelta(hours=12)
fine = inizio + timedelta(hours=2)
turno = crea_turno(attivita, inizio=inizio, fine=fine)
attivita.aggiungi_delegato(REFERENTE, referente)
# Crea le sessioni
sessione_referente = self.sessione_utente(persona=referente)
sessione_volontario = self.sessione_utente(persona=volontario)
# Volontario: Apri la pagina dell'attivita'
sessione_volontario.visit("%s%s" % (self.live_server_url, attivita.url))
# Volontario: Apri pagina turni
sessione_volontario.click_link_by_partial_text("Turni")
# Volontario: Chiedi di partecipare
sessione_volontario.click_link_by_partial_text("Partecipa")
self.assertTrue(sessione_volontario.is_text_present("Richiesta inoltrata"),
msg="La richiesta e stata inoltrata")
# Volontario: Apri la pagina dell'attivita', pagina turni
sessione_volontario.visit("%s%s" % (self.live_server_url, attivita.url))
sessione_volontario.click_link_by_partial_text("Turni")
self.assertTrue(sessione_volontario.is_text_present("Hai chiesto di partecipare"),
msg="Utente ha feedback sull'aver chiesto di partecipare")
# Volontario: Vai allo storico
sessione_volontario.click_link_by_partial_text("Miei turni")
self.assertTrue(sessione_volontario.is_text_present("In attesa"),
msg="Storico mostra stato in attesa della richiesta")
# Referente: Trova la richiesta
sessione_referente.click_link_by_partial_text("Richieste")
self.assertTrue(sessione_referente.is_text_present(volontario.nome_completo),
msg="La richiesta mostra il nome del volontario")
self.assertTrue(sessione_referente.is_text_present(turno.nome),
msg="La richiesta mostra il nome del turno")
# Referente: Trova la richiesta
sessione_referente.click_link_by_partial_text("Conferma")
# Volontario: Vai allo storico
sessione_volontario.click_link_by_partial_text("Miei turni")
self.assertTrue(sessione_volontario.is_text_present("Approvata"),
msg="La richiesta risulta ora approvata")
# Volontario: Vai al turno
sessione_volontario.click_link_by_partial_text(turno.nome)
self.assertTrue(sessione_volontario.is_text_present("Partecipazione confermata"),
msg="La partecipazione risulta nel turno")
def test_campo_centrale_operativa_disabilitata(self):
presidente = crea_persona()
referente = crea_persona()
volontario, sede, appartenenza = crea_persona_sede_appartenenza()
delega = Delega(
oggetto=sede,
persona=presidente,
tipo=PRESIDENTE,
inizio="2005-11-15",
)
delega.save()
delega_2 = Delega(
oggetto=sede,
persona=referente,
tipo=DELEGATO_CO,
inizio="2005-11-15",
)
delega_2.save()
area, attivita = crea_area_attivita(sede=sede)
inizio = timezone.now() + timedelta(hours=12)
fine = inizio + timedelta(hours=2)
turno = crea_turno(attivita, inizio=inizio, fine=fine)
attivita.aggiungi_delegato(REFERENTE, volontario)
attivita.aggiungi_delegato(REFERENTE, referente)
# Crea le sessioni
sessione_referente = self.sessione_utente(persona=referente)
sessione_volontario = self.sessione_utente(persona=volontario)
sessione_presidente = self.sessione_utente(persona=presidente)
# Volontario: Apri la pagina dell'attivita'
sessione_volontario.visit("%s%smodifica/" % (self.live_server_url, attivita.url))
self.assertIn('disabled', sessione_volontario.find_by_id('id_centrale_operativa')[0].outer_html)
sessione_presidente.visit("%s%smodifica/" % (self.live_server_url, attivita.url))
self.assertNotIn('disabled', sessione_presidente.find_by_id('id_centrale_operativa')[0].outer_html)
sessione_referente.visit("%s%smodifica/" % (self.live_server_url, attivita.url))
self.assertNotIn('disabled', sessione_referente.find_by_id('id_centrale_operativa')[0].outer_html)
|
CroceRossaItaliana/jorvik
|
attivita/tests.py
|
Python
|
gpl-3.0
| 25,417
|
[
"VisIt"
] |
38bff532ce8dfcbe656ddd4ff994ffb1700c704cbafc6eac84029c99d40e755d
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class AtomDft(MakefilePackage):
"""ATOM is a program for DFT calculations in atoms and pseudopotential
generation."""
homepage = "https://departments.icmab.es/leem/siesta/Pseudopotentials/"
url = "https://departments.icmab.es/leem/siesta/Pseudopotentials/Code/atom-4.2.6.tgz"
version('4.2.6', sha256='489f0d883af35525647a8b8f691e7845c92fe6b5a25b13e1ed368edfd0391ed2')
depends_on('libgridxc')
depends_on('xmlf90')
def edit(self, spec, prefix):
copy('arch.make.sample', 'arch.make')
@property
def build_targets(self):
return ['XMLF90_ROOT=%s' % self.spec['xmlf90'].prefix,
'GRIDXC_ROOT=%s' % self.spec['libgridxc'].prefix,
'FC=fc']
def install(self, spec, prefix):
mkdir(prefix.bin)
install('atm', prefix.bin)
|
iulian787/spack
|
var/spack/repos/builtin/packages/atom-dft/package.py
|
Python
|
lgpl-2.1
| 1,050
|
[
"SIESTA"
] |
a582db82d30f57337c9f0a898ecf2367d475052d9042e2223a2570833bb6e5b4
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf.pbc.df import mdf
import pyscf.pbc.gto as pgto
from pyscf.pbc.lib import kpts_helper
from pyscf import ao2mo
L = 5.
n = 3
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n,n,n])
cell.atom = '''He 3. 2. 3.
He 1. 1. 1.'''
cell.basis = 'ccpvdz'
cell.verbose = 0
cell.rcut = 17
cell.build(0,0)
nao = cell.nao_nr()
def finger(a):
w = np.cos(np.arange(a.size))
return np.dot(w, a.ravel())
class KnowValues(unittest.TestCase):
def test_eri1111(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = -numpy.einsum('ij->j', kpts[:3])
with_df = mdf.MDF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_eri0110(self):
kpts = numpy.random.random((4,3)) * .25
kpts[3] = kpts[0]
kpts[2] = kpts[1]
with_df = mdf.MDF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = kpts
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 8)
def test_eri0000(self):
with_df = mdf.MDF(cell).set(auxbasis='weigend')
with_df.linear_dep_threshold = 1e-7
with_df.kpts = numpy.zeros((4,3))
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = ao2mo.restore(1, with_df.get_eri(with_df.kpts), nao)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, with_df.kpts)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
mo = mo.real
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, with_df.kpts, compact=False)
self.assertAlmostEqual(abs(eri1.reshape(eri0.shape)-eri0).sum(), 0, 9)
def test_ao2mo_7d(self):
L = 3.
n = 6
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = [n,n,n]
cell.atom = '''He 2. 2.2 2.
He 1.2 1. 1.'''
cell.basis = {'He': [[0, (1.2, 1)], [1, (0.6, 1)]]}
cell.verbose = 0
cell.build(0,0)
kpts = cell.make_kpts([1,3,1])
nkpts = len(kpts)
nao = cell.nao_nr()
numpy.random.seed(1)
mo =(numpy.random.random((nkpts,nao,nao)) +
numpy.random.random((nkpts,nao,nao))*1j)
with_df = mdf.MDF(cell, kpts)
out = with_df.ao2mo_7d(mo, kpts)
ref = numpy.empty_like(out)
kconserv = kpts_helper.get_kconserv(cell, kpts)
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kj, kk]
tmp = with_df.ao2mo((mo[ki], mo[kj], mo[kk], mo[kl]), kpts[[ki,kj,kk,kl]])
ref[ki,kj,kk] = tmp.reshape([nao]*4)
self.assertAlmostEqual(abs(out-ref).max(), 0, 12)
if __name__ == '__main__':
print("Full Tests for mdf ao2mo")
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/df/test/test_mdf_ao2mo.py
|
Python
|
apache-2.0
| 4,940
|
[
"PySCF"
] |
709a77549b42e6013c46e61eb8a4e5e7d160ffa0f3186b0de6004108c2682109
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Thu Feb 11 04:08:55 2010
import wx
import webbrowser
import wx.lib.iewin_old as iewin
import wx.lib.sized_controls as sc
import config
import update
import time
import httplib
import os
# begin wxGlade: extracode
# end wxGlade
class UpdatedDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: UpdatedDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.label_updated = wx.StaticText(self, -1, "Your Database is up to date.")
self.button_updated_ok = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: UpdatedDialog.__set_properties
self.SetTitle("Your DB is up to date")
# end wxGlade
def __do_layout(self):
# begin wxGlade: UpdatedDialog.__do_layout
sizer_updated = wx.BoxSizer(wx.VERTICAL)
sizer_updated.Add(self.label_updated, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
sizer_updated.Add(self.button_updated_ok, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.SetSizer(sizer_updated)
sizer_updated.Fit(self)
self.Layout()
# end wxGlade
# end of class UpdatedDialog
class ConfigDialog(sc.SizedDialog):
def __init__(self, parent, id, title):
sc.SizedDialog.__init__(self, None, -1, title,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.pane = self.GetContentsPane()
self.pane.SetSizerType("form")
# row port
wx.StaticText(self.pane, -1, "Port")
self.textCtrl_port = wx.TextCtrl(self.pane, -1, config.SS_PORT)
self.textCtrl_port.SetSizerProps(expand=True)
# row timestamp
wx.StaticText(self.pane, -1, "TimeStamp(GMT)")
dt = time.strftime('%Y/%m/%d %H:%M:%S',time.localtime(int(config.SS_DB_UPDTIME)))
self.textCtrl_updt = wx.TextCtrl(self.pane, -1, dt)
self.textCtrl_updt.SetSizerProps(expand=True)
# row threads
wx.StaticText(self.pane, -1, "Max Number of Threads(VeryCD only)")
self.textCtrl_mt = wx.TextCtrl(self.pane, -1, config.SS_MAX_THREADS)
self.textCtrl_mt.SetSizerProps(expand=True)
# row title
wx.StaticText(self.pane, -1, "Update From(simplecd is usually 4-5 times faster)")
# here's how to add a 'nested sizer' using sized_controls
self.radioPane = sc.SizedPanel(self.pane, -1)
self.radioPane.SetSizerType("horizontal")
self.radioPane.SetSizerProps(expand=True)
# make these children of the radioPane to have them use
# the horizontal layout
self.rb1 = wx.RadioButton(self.radioPane, -1, "SimpleCD")
self.rb2 = wx.RadioButton(self.radioPane, -1, "VeryCD")
if config.SS_UPDATE_SOURCE == 'simplecd':
self.rb1.SetValue(True)
elif config.SS_UPDATE_SOURCE == 'verycd':
self.rb2.SetValue(True)
# end row title
# add dialog buttons
self.SetButtonSizer(self.CreateStdDialogButtonSizer(wx.OK | wx.CANCEL))
self.Bind(wx.EVT_BUTTON,self.onOK,id=wx.ID_OK)
# a little trick to make sure that you can't resize the dialog to
# less screen space than the controls need
self.Fit()
self.SetMinSize(self.GetSize())
def onOK(self,event):
config.SS_PORT = self.textCtrl_port.GetValue()
try:
config.SS_DB_UPDTIME = str(int(time.mktime(time.strptime(self.textCtrl_updt.GetValue(),'%Y/%m/%d %H:%M:%S'))))
except Exception as what:
print what.__str__()
config.SS_MAX_THREADS = self.textCtrl_mt.GetValue()
config.SS_UPDATE_SOURCE = self.rb2.GetValue() and 'verycd' or 'simplecd'
config.savecfg()
self.Close()
class AboutFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: AboutFrame.__init__
kwds["style"] = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.SYSTEM_MENU
wx.Frame.__init__(self, *args, **kwds)
self.about_png = wx.StaticBitmap(self, -1, wx.Bitmap("res\\about_dialog.png", wx.BITMAP_TYPE_ANY))
self.Visit = wx.Button(self, -1, "Visit Homepage")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.visit_home_handler, self.Visit)
# end wxGlade
def __set_properties(self):
# begin wxGlade: AboutFrame.__set_properties
self.SetTitle("About SimpleCD Desktop")
_icon = wx.EmptyIcon()
_icon.CopyFromBitmap(wx.Bitmap("res\\logo.png", wx.BITMAP_TYPE_ANY))
self.SetIcon(_icon)
self.SetSize((480, 320))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.about_png.SetBackgroundColour(wx.Colour(255, 255, 255))
# end wxGlade
def __do_layout(self):
# begin wxGlade: AboutFrame.__do_layout
sizer_about = wx.BoxSizer(wx.VERTICAL)
sizer_about.Add(self.about_png, 0, wx.ALL, 10)
sizer_about.Add(self.Visit, 0, wx.RIGHT|wx.ALIGN_RIGHT|wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_VERTICAL, 20)
self.SetSizer(sizer_about)
self.Layout()
# end wxGlade
def visit_home_handler(self, event): # wxGlade: AboutFrame.<event_handler>
webbrowser.open_new_tab('http://www.simplecd.org')
# end of class AboutFrame
class MainFrame(wx.Frame):
def __init__(self, *args, **kwds):
# run web server
#config.run_server()
# begin wxGlade: MainFrame.__init__
kwds["style"] = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.MAXIMIZE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER|wx.FULL_REPAINT_ON_RESIZE|wx.CLIP_CHILDREN
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.frame_main_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3001, "Home", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(wx.ID_SAVEAS, "Save as", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(wx.ID_EXIT, "Exit", "", wx.ITEM_NORMAL)
self.frame_main_menubar.Append(wxglade_tmp_menu, "File")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3002, "Search", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(3003, "Ranking", "", wx.ITEM_NORMAL)
self.frame_main_menubar.Append(wxglade_tmp_menu, "View")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3004, "Config", "", wx.ITEM_NORMAL)
self.frame_main_menubar.Append(wxglade_tmp_menu, "Config")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3007, "Update from ...", "", wx.ITEM_NORMAL)
self.frame_main_menubar.Append(wxglade_tmp_menu, "DB")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3006, "Help", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(3005, "Update", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(wx.ID_ABOUT, "About", "", wx.ITEM_NORMAL)
self.frame_main_menubar.Append(wxglade_tmp_menu, "Help")
self.SetMenuBar(self.frame_main_menubar)
# Menu Bar end
self.frame_main_statusbar = self.CreateStatusBar(1, 0)
# Tool Bar
self.frame_main_toolbar = wx.ToolBar(self, -1, style=wx.TB_HORIZONTAL|wx.TB_DOCKABLE|wx.TB_3DBUTTONS)
self.SetToolBar(self.frame_main_toolbar)
self.frame_main_toolbar.AddLabelTool(2001, "Home", wx.Bitmap("res\\homepage.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Home", "")
self.frame_main_toolbar.AddLabelTool(2002, "Search", wx.Bitmap("res\\search.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Search", "")
self.frame_main_toolbar.AddLabelTool(2003, "Ranking", wx.Bitmap("res\\ranking.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Ranking", "")
self.frame_main_toolbar.AddSeparator()
self.frame_main_toolbar.AddLabelTool(2004, "Config", wx.Bitmap("res\\config.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Config", "")
self.frame_main_toolbar.AddLabelTool(2005, "Update", wx.Bitmap("res\\update.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Update", "")
self.frame_main_toolbar.AddSeparator()
self.frame_main_toolbar.AddLabelTool(wx.ID_ABOUT, "About", wx.Bitmap("res\\about.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "About", "")
# Tool Bar end
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.home_handler, id=3001)
self.Bind(wx.EVT_MENU, self.saveas_handler, id=wx.ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.exit_handler, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.search_handler, id=3002)
self.Bind(wx.EVT_MENU, self.ranking_handler, id=3003)
self.Bind(wx.EVT_MENU, self.config_handler, id=3004)
self.Bind(wx.EVT_MENU, self.db_update_handler, id=3007)
self.Bind(wx.EVT_MENU, self.help_handler, id=3006)
self.Bind(wx.EVT_MENU, self.update_handler, id=3005)
self.Bind(wx.EVT_MENU, self.about_handler, id=wx.ID_ABOUT)
self.Bind(wx.EVT_TOOL, self.home_handler, id=2001)
self.Bind(wx.EVT_TOOL, self.search_handler, id=2002)
self.Bind(wx.EVT_TOOL, self.ranking_handler, id=2003)
self.Bind(wx.EVT_TOOL, self.config_handler, id=2004)
self.Bind(wx.EVT_TOOL, self.update_handler, id=2005)
self.Bind(wx.EVT_TOOL, self.about_handler, id=wx.ID_ABOUT)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MainFrame.__set_properties
self.SetTitle("SimpleCD Desktop 0.1.2c")
_icon = wx.EmptyIcon()
_icon.CopyFromBitmap(wx.Bitmap("res\\logo.png", wx.BITMAP_TYPE_ANY))
self.SetIcon(_icon)
self.SetSize((900, 599))
self.SetFocus()
self.frame_main_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_main_statusbar_fields = ["Welcome to SimpleCD Desktop"]
for i in range(len(frame_main_statusbar_fields)):
self.frame_main_statusbar.SetStatusText(frame_main_statusbar_fields[i], i)
self.frame_main_toolbar.SetToolBitmapSize((32, 32))
self.frame_main_toolbar.Realize()
# end wxGlade
def __do_layout(self):
# begin wxGlade: MainFrame.__do_layout
sizer_main = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer_main)
self.Layout()
self.Centre()
# end wxGlade
# add homepage
self.ie = iewin.IEHtmlWindow(self)
sizer_main.Add(self.ie, 1, wx.GROW)
self.ie.LoadUrl('http://localhost:%s/'%config.SS_PORT)
def saveas_handler(self, event): # wxGlade: MainFrame.<event_handler>
print "Event handler `saveas_handler' not implemented!"
event.Skip()
def exit_handler(self, event): # wxGlade: MainFrame.<event_handler>
self.Close()
def config_handler(self, event): # wxGlade: MainFrame.<event_handler>
dialog_config = ConfigDialog(None, -1, "Configurations")
app.SetTopWindow(dialog_config)
dialog_config.ShowModal()
dialog_config.Destroy()
def about_handler(self, event): # wxGlade: MainFrame.<event_handler>
frame_about = AboutFrame(None, -1, "")
app.SetTopWindow(frame_main)
frame_about.Show()
def home_handler(self, event): # wxGlade: MainFrame.<event_handler>
self.ie.LoadUrl('http://localhost:%s/'%config.SS_PORT)
def ranking_handler(self, event): # wxGlade: MainFrame.<event_handler>
print "Event handler `ranking_handler' not implemented!"
event.Skip()
def search_handler(self, event): # wxGlade: MainFrame.<event_handler>
print "Event handler `search_handler' not implemented"
event.Skip()
def update_handler(self, event): # wxGlade: MainFrame.<event_handler>
ids = update.get_update_ids()
if len(ids) == 0:
dialog_updated = UpdatedDialog(None, -1, "")
app.SetTopWindow(dialog_updated)
dialog_updated.ShowModal()
dialog_updated.Destroy()
return
elif ids[0].startswith('new:'):
dlg = wx.MessageDialog(self, ids[0][4:],
'Update Error',
wx.OK | wx.ICON_INFORMATION
#wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
elif config.SS_UPDATE_SOURCE == 'verycd':
max = len(ids)
print max
dlg = wx.ProgressDialog("Updating Databases...",
'Processing 1/%d' % (max*2/3),
maximum = max,
parent=self,
style = wx.PD_CAN_ABORT
| wx.PD_APP_MODAL
| wx.PD_ELAPSED_TIME
| wx.PD_ESTIMATED_TIME
#| wx.PD_REMAINING_TIME
)
import fetchvc
lastid = fetchvc.update_ids(ids,dlg.Update,max)
print lastid
dlg.Update(max,'Finished')
update.update_db_updtime(lastid)
dlg.Destroy()
elif config.SS_UPDATE_SOURCE == 'simplecd':
group = 5
max = ((len(ids)-1)/group+1)*3*2
dlg = wx.ProgressDialog("Updating Databases...",
"Downloading 1/%d"%(max/2),
maximum = max,
parent=self,
style = wx.PD_CAN_ABORT
| wx.PD_APP_MODAL
| wx.PD_ELAPSED_TIME
| wx.PD_ESTIMATED_TIME
#| wx.PD_REMAINING_TIME
)
keepGoing = True
count = 0
httpconn = httplib.HTTPConnection("www.simplecd.org")
# group ids, 100 per group
for i in range(0,max/3/2):
subids = ids[i*group:i*group+group]
for dbname in ['verycd','lock']:
update.download_updates(dbname,subids,httpconn=httpconn)
count += 1
(keepGoing, skip) = dlg.Update(count, "Downloading %d/%d"%(count,max/2))
if not keepGoing:
break
if not keepGoing:
break
# Apply_updates
update.apply_updates('verycd',dlg.Update,max)
#update.apply_updates('comment',dlg.Update,max)
open(config.SS_HOME_DIR+'/comment.updates','w').write('')
update.apply_updates('lock',dlg.Update,max)
dlg.Update(max,'Finished')
# others
if not update.delete_tempfiles().startswith('error'):
update.update_db_updtime(ids[i*group-1])
if i>=max/6:
print ids
update.update_db_updtime(ids[-1])
dlg.Destroy()
def help_handler(self, event): # wxGlade: MainFrame.<event_handler>
webbrowser.open_new_tab('http://www.simplecd.org/bbs/')
def db_update_handler(self, event): # wxGlade: MainFrame.<event_handler>
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard="targz files (*.tar.gz)|*.tar.gz",
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR
)
paths = None
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
dlg.Destroy()
updts = paths
if updts:
max = len(updts)*3*100000
dlg = wx.ProgressDialog("Updating from files...",
"Extracting...Please be patient ",
maximum = max,
parent=self,
style = wx.PD_CAN_ABORT
| wx.PD_APP_MODAL
| wx.PD_ELAPSED_TIME
| wx.PD_ESTIMATED_TIME
#| wx.PD_REMAINING_TIME
)
count = 1
for updt in updts:
import tarfile
tar = tarfile.open(updt)
tar.extractall(path=config.SS_HOME_DIR)
tar.close()
update.apply_updates('verycd',dlg.Update,max-1)
update.apply_updates('comment',dlg.Update,max-1)
update.apply_updates('lock',dlg.Update,max-1)
dlg.Update(max,'Finished')
update.delete_tempfiles()
update.update_timestamp()
dlg.Destroy()
# end of class MainFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_main = MainFrame(None, -1, "")
app.SetTopWindow(frame_main)
frame_main.Show()
app.MainLoop()
config.stop_server()
|
ptphp/PyLib
|
src/wx/desktop.py
|
Python
|
apache-2.0
| 17,345
|
[
"VisIt"
] |
3e991806301b424b59d34a891b20bd73c686382a714552e2a3178df41853af03
|
# -*- coding: utf-8 -*-
"""
Smoothers.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.ndimage
from bruges.bruges import BrugesError
from bruges.util import nearest
from bruges.util import rms as rms_
# TODO:
# - 1D and 2D Gaussian (or, better, n-D)
# - See how these handle Nans, consider removing, interpolating, replacing.
def mean(arr, size=5):
"""
A linear n-D smoothing filter. Can be used as a moving average on 1D data.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, np.mean, size=size)
def rms(arr, size=5):
"""
A linear n-D smoothing filter. Can be used as a moving average on 1D data.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, rms_, size=size)
def median(arr, size=5):
"""
A nonlinear n-D edge-preserving smoothing filter.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, np.median, size=size)
def mode(arr, size=5, tie='smallest'):
"""
A nonlinear n-D categorical smoothing filter. Use this to filter non-
continuous variables, such as categorical integers, e.g. to label facies.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
tie (str): `'smallest'` or `'largest`'. In the event of a tie (i.e. two
or more values having the same count in the kernel), whether to
give back the smallest of the tying values, or the largest.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, tie):
if tie == 'smallest':
m, _ = scipy.stats.mode(this)
else:
m, _ = -scipy.stats.mode(-this)
return np.squeeze(m)
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, func, size=size,
extra_keywords={'tie': tie}
)
def snn(arr, size=5, include=True):
"""
Symmetric nearest neighbour, a nonlinear 2D smoothing filter.
http://subsurfwiki.org/wiki/Symmetric_nearest_neighbour_filter
Args:
arr (ndarray): a 2D array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
include (bool): whether to include the central pixel itself.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, pairs, include):
"""
Deal with this patch.
"""
centre = this[this.size // 2]
select = [nearest(this[p], centre) for p in pairs]
if include:
select += [centre]
return np.mean(select)
arr = np.array(arr, dtype=np.float)
if arr.ndim != 2:
raise BrugesError("arr must have 2-dimensions")
if not size // 2:
size += 1
pairs = [[i, size**2-1 - i] for i in range(size**2 // 2)]
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'pairs': pairs,
'include': include}
)
def kuwahara(arr, size=5):
"""
Kuwahara, a nonlinear 2D smoothing filter.
http://subsurfwiki.org/wiki/Kuwahara_filter
Args:
arr (ndarray): a 2D array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, s, k):
"""
Deal with this patch.
"""
t = this.reshape((s, s))
sub = np.array([t[:k, :k].flatten(),
t[:k, k-1:].flatten(),
t[k-1:, :k].flatten(),
t[k-1:, k-1:].flatten()]
)
select = sub[np.argmin(np.var(sub, axis=1))]
return np.mean(select)
arr = np.array(arr, dtype=np.float)
if arr.ndim != 2:
raise BrugesError("arr must have 2-dimensions")
if not size // 2:
size += 1
k = int(np.ceil(size / 2))
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'s': size,
'k': k,
}
)
def conservative(arr, size=5, supercon=False):
"""
Conservative, a nonlinear n-D despiking filter. Very conservative! Only
changes centre value if it is outside the range of all the other values
in the kernel. Read http://subsurfwiki.org/wiki/Conservative_filter
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5 (in a 2D arr). Should be
odd, rounded up if not.
supercon (bool): whether to be superconservative. If True, replaces
pixel with min or max of kernel. If False (default), replaces pixel
with mean of kernel.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, k, supercon):
this = this.flatten()
centre = this[k]
rest = [this[:k], this[-k:]]
mi, ma = np.nanmin(rest), np.nanmax(rest)
if centre < mi:
return mi if supercon else np.mean(rest)
elif centre > ma:
return ma if supercon else np.mean(rest)
else:
return centre
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
k = int(np.floor(size**arr.ndim / 2))
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'k': k,
'supercon': supercon,
}
)
|
agile-geoscience/agilegeo
|
bruges/filters/filters.py
|
Python
|
apache-2.0
| 7,285
|
[
"Gaussian"
] |
fb141f4bcb3f81a50b51f47d3510500623fabb3380fbb6fa20c946f9ecb40bbc
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Magnetic space groups.
"""
import os
import sqlite3
import textwrap
from array import array
from fractions import Fraction
import numpy as np
from monty.design_patterns import cached_class
from pymatgen.core.operations import MagSymmOp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.groups import SymmetryGroup, in_array_list
from pymatgen.symmetry.settings import JonesFaithfulTransformation
from pymatgen.util.string import transformation_to_string
__author__ = "Matthew Horton, Shyue Ping Ong"
MAGSYMM_DATA = os.path.join(os.path.dirname(__file__), "symm_data_magnetic.sqlite")
@cached_class
class MagneticSpaceGroup(SymmetryGroup):
"""
Representation of a magnetic space group.
"""
def __init__(self, id, setting_transformation="a,b,c;0,0,0"):
"""
Initializes a MagneticSpaceGroup from its Belov, Neronova and
Smirnova (BNS) number supplied as a list or its label supplied
as a string. To create a magnetic structure in pymatgen, the
Structure.from_magnetic_spacegroup() method can be used, which
relies on this class.
The main difference between magnetic space groups and normal
crystallographic space groups is the inclusion of a time reversal
operator that acts on an atom's magnetic moment. This is
indicated by a prime symbol (') next to the respective symmetry
operation in its label, e.g. the standard crystallographic
space group Pnma has magnetic subgroups Pn'ma, Pnm'a, Pnma',
Pn'm'a, Pnm'a', Pn'ma', Pn'm'a'.
The magnetic space groups are classified as one of 4 types
where G = magnetic space group, and F = parent crystallographic
space group:
1. G=F no time reversal, i.e. the same as corresponding
crystallographic group
2. G=F+F1', "grey" groups, where avg. magnetic moment is zero,
e.g. a paramagnet in zero ext. mag. field
3. G=D+(F-D)1', where D is an equi-translation subgroup of F of
index 2, lattice translations do not include time reversal
4. G=D+(F-D)1', where D is an equi-class subgroup of F of index 2
There are two common settings for magnetic space groups, BNS
and OG. In case 4, the BNS setting != OG setting, and so a
transformation to go between the two settings is required:
specifically, the BNS setting is derived from D, and the OG
setting is derived from F.
This means that the OG setting refers to the unit cell if magnetic
order is neglected, and requires multiple unit cells to reproduce
the full crystal periodicity when magnetic moments are present.
This does not make the OG setting, in general, useful for
electronic structure calculations and the BNS setting is preferred.
However, this class does contain information on the OG setting and
can be initialized from OG labels or numbers if required.
Conventions: ITC monoclinic unique axis b, monoclinic cell choice 1,
hexagonal axis for trigonal groups, origin choice 2 for groups with
more than one origin choice (ISO-MAG).
Raw data comes from ISO-MAG, ISOTROPY Software Suite, iso.byu.edu
http://stokes.byu.edu/iso/magnetic_data.txt
with kind permission from Professor Branton Campbell, BYU
Data originally compiled from:
(1) Daniel B. Litvin, Magnetic Group Tables (International Union
of Crystallography, 2013) www.iucr.org/publ/978-0-9553602-2-0.
(2) C. J. Bradley and A. P. Cracknell, The Mathematical Theory of
Symmetry in Solids (Clarendon Press, Oxford, 1972).
See http://stokes.byu.edu/iso/magneticspacegroupshelp.php for more
information on magnetic symmetry.
:param id: BNS number supplied as list of 2 ints or BNS label as
str or index as int (1-1651) to iterate over all space groups"""
self._data = {}
# Datafile is stored as sqlite3 database since (a) it can be easily
# queried for various different indexes (BNS/OG number/labels) and (b)
# allows binary data to be stored in a compact form similar to that in
# the source data file, significantly reducing file size.
# Note that a human-readable JSON format was tested first but was 20x
# larger and required *much* longer initial loading times.
# retrieve raw data
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
id = "".join(id.split()) # remove any white space
c.execute("SELECT * FROM space_groups WHERE BNS_label=?;", (id,))
elif isinstance(id, list):
c.execute("SELECT * FROM space_groups WHERE BNS1=? AND BNS2=?;", (id[0], id[1]))
elif isinstance(id, int):
# OG3 index is a 'master' index, going from 1 to 1651
c.execute("SELECT * FROM space_groups WHERE OG3=?;", (id,))
raw_data = list(c.fetchone())
# Jones Faithful transformation
self.jf = JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0")
if isinstance(setting_transformation, str):
if setting_transformation != "a,b,c;0,0,0":
self.jf = JonesFaithfulTransformation.from_transformation_string(setting_transformation)
elif isinstance(setting_transformation, JonesFaithfulTransformation):
if setting_transformation != self.jf:
self.jf = setting_transformation
self._data["magtype"] = raw_data[0] # int from 1 to 4
self._data["bns_number"] = [raw_data[1], raw_data[2]]
self._data["bns_label"] = raw_data[3]
self._data["og_number"] = [raw_data[4], raw_data[5], raw_data[6]]
self._data["og_label"] = raw_data[7] # can differ from BNS_label
def _get_point_operator(idx):
"""Retrieve information on point operator (rotation matrix and Seitz label)."""
hex = self._data["bns_number"][0] >= 143 and self._data["bns_number"][0] <= 194
c.execute(
"SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;",
(idx - 1, hex),
)
op = c.fetchone()
op = {
"symbol": op[0],
"matrix": np.array(op[1].split(","), dtype="f").reshape(3, 3),
}
return op
def _parse_operators(b):
"""Parses compact binary representation into list of MagSymmOps."""
if len(b) == 0: # e.g. if magtype != 4, OG setting == BNS setting, and b == [] for OG symmops
return None
raw_symops = [b[i : i + 6] for i in range(0, len(b), 6)]
symops = []
for r in raw_symops:
point_operator = _get_point_operator(r[0])
translation_vec = [r[1] / r[4], r[2] / r[4], r[3] / r[4]]
time_reversal = r[5]
op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=point_operator["matrix"],
translation_vec=translation_vec,
time_reversal=time_reversal,
)
# store string representation, e.g. (2x|1/2,1/2,1/2)'
seitz = "({}|{},{},{})".format(
point_operator["symbol"],
Fraction(translation_vec[0]),
Fraction(translation_vec[1]),
Fraction(translation_vec[2]),
)
if time_reversal == -1:
seitz += "'"
symops.append({"op": op, "str": seitz})
return symops
def _parse_wyckoff(b):
"""Parses compact binary representation into list of Wyckoff sites."""
if len(b) == 0:
return None
wyckoff_sites = []
def get_label(idx):
if idx <= 25:
return chr(97 + idx) # returns a-z when idx 0-25
return "alpha" # when a-z labels exhausted, use alpha, only relevant for a few space groups
o = 0 # offset
n = 1 # nth Wyckoff site
num_wyckoff = b[0]
while len(wyckoff_sites) < num_wyckoff:
m = b[1 + o] # multiplicity
label = str(b[2 + o] * m) + get_label(num_wyckoff - n)
sites = []
for j in range(m):
s = b[3 + o + (j * 22) : 3 + o + (j * 22) + 22] # data corresponding to specific Wyckoff position
translation_vec = [s[0] / s[3], s[1] / s[3], s[2] / s[3]]
matrix = [
[s[4], s[7], s[10]],
[s[5], s[8], s[11]],
[s[6], s[9], s[12]],
]
matrix_magmom = [
[s[13], s[16], s[19]],
[s[14], s[17], s[20]],
[s[15], s[18], s[21]],
]
# store string representation, e.g. (x,y,z;mx,my,mz)
wyckoff_str = "({};{})".format(
transformation_to_string(matrix, translation_vec),
transformation_to_string(matrix_magmom, c="m"),
)
sites.append(
{
"translation_vec": translation_vec,
"matrix": matrix,
"matrix_magnetic": matrix_magmom,
"str": wyckoff_str,
}
)
# only keeping string representation of Wyckoff sites for now
# could do something else with these in future
wyckoff_sites.append({"label": label, "str": " ".join([s["str"] for s in sites])})
n += 1
o += m * 22 + 2
return wyckoff_sites
def _parse_lattice(b):
"""Parses compact binary representation into list of lattice vectors/centerings."""
if len(b) == 0:
return None
raw_lattice = [b[i : i + 4] for i in range(0, len(b), 4)]
lattice = []
for r in raw_lattice:
lattice.append(
{
"vector": [r[0] / r[3], r[1] / r[3], r[2] / r[3]],
"str": "({},{},{})+".format(
Fraction(r[0] / r[3]).limit_denominator(),
Fraction(r[1] / r[3]).limit_denominator(),
Fraction(r[2] / r[3]).limit_denominator(),
),
}
)
return lattice
def _parse_transformation(b):
"""Parses compact binary representation into transformation between OG and BNS settings."""
if len(b) == 0:
return None
# capital letters used here by convention,
# IUCr defines P and p specifically
P = [[b[0], b[3], b[6]], [b[1], b[4], b[7]], [b[2], b[5], b[8]]]
p = [b[9] / b[12], b[10] / b[12], b[11] / b[12]]
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=("a", "b", "c"))
p_string = "{},{},{}".format(
Fraction(p[0]).limit_denominator(),
Fraction(p[1]).limit_denominator(),
Fraction(p[2]).limit_denominator(),
)
return P_string + ";" + p_string
for i in range(8, 15):
try:
raw_data[i] = array("b", raw_data[i]) # construct array from sql binary blobs
except Exception:
# array() behavior changed, need to explicitly convert buffer to str in earlier Python
raw_data[i] = array("b", str(raw_data[i]))
self._data["og_bns_transform"] = _parse_transformation(raw_data[8])
self._data["bns_operators"] = _parse_operators(raw_data[9])
self._data["bns_lattice"] = _parse_lattice(raw_data[10])
self._data["bns_wyckoff"] = _parse_wyckoff(raw_data[11])
self._data["og_operators"] = _parse_operators(raw_data[12])
self._data["og_lattice"] = _parse_lattice(raw_data[13])
self._data["og_wyckoff"] = _parse_wyckoff(raw_data[14])
db.close()
@classmethod
def from_og(cls, id):
"""
Initialize from Opechowski and Guccione (OG) label or number.
:param id: OG number supplied as list of 3 ints or
or OG label as str
:return:
"""
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
c.execute("SELECT BNS_label FROM space_groups WHERE OG_label=?", (id,))
elif isinstance(id, list):
c.execute(
"SELECT BNS_label FROM space_groups WHERE OG1=? and OG2=? and OG3=?",
(id[0], id[1], id[2]),
)
bns_label = c.fetchone()[0]
db.close()
return cls(bns_label)
def __eq__(self, other):
return self._data == other._data
@property
def crystal_system(self):
"""
:return: Crystal system, e.g., cubic, hexagonal, etc.
"""
i = self._data["bns_number"][0]
if i <= 2:
return "triclinic"
if i <= 15:
return "monoclinic"
if i <= 74:
return "orthorhombic"
if i <= 142:
return "tetragonal"
if i <= 167:
return "trigonal"
if i <= 194:
return "hexagonal"
return "cubic"
@property
def sg_symbol(self):
"""
:return: Space group symbol
"""
return self._data["bns_label"]
@property
def symmetry_ops(self):
"""
Retrieve magnetic symmetry operations of the space group.
:return: List of :class:`pymatgen.core.operations.MagSymmOp`
"""
ops = [op_data["op"] for op_data in self._data["bns_operators"]]
# add lattice centerings
centered_ops = []
lattice_vectors = [l["vector"] for l in self._data["bns_lattice"]]
for vec in lattice_vectors:
if not (np.array_equal(vec, [1, 0, 0]) or np.array_equal(vec, [0, 1, 0]) or np.array_equal(vec, [0, 0, 1])):
for op in ops:
new_vec = op.translation_vector + vec
new_op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
op.rotation_matrix,
translation_vec=new_vec,
time_reversal=op.time_reversal,
)
centered_ops.append(new_op)
ops = ops + centered_ops
# apply jones faithful transformation
ops = [self.jf.transform_symmop(op) for op in ops]
return ops
def get_orbit(self, p, m, tol=1e-5):
"""
Returns the orbit for a point and its associated magnetic moment.
Args:
p: Point as a 3x1 array.
m: A magnetic moment, compatible with
:class:`pymatgen.electronic_structure.core.Magmom`
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
(([array], [array])) Tuple of orbit for point and magnetic moments for orbit.
"""
orbit = []
orbit_magmoms = []
m = Magmom(m)
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
mm = o.operate_magmom(m)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
orbit_magmoms.append(mm)
return orbit, orbit_magmoms
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
# function from pymatgen.symmetry.groups.SpaceGroup
abc = lattice.lengths
angles = lattice.angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all(abs(i - j) < tolerance for i, j in zip(param, ref) if j is not None)
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "hexagonal" or (crys_system == "trigonal" and self.sg_symbol.endswith("H")):
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 120], angle_tol)
if crys_system == "trigonal":
a = abc[0]
return check(abc, [a, a, a], tol)
if crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
if crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
def data_str(self, include_og=True):
"""
Get description of all data, including information for OG setting.
:return: str
"""
# __str__() omits information on OG setting to reduce confusion
# as to which set of symops are active, this property gives
# all stored data including OG setting
desc = {} # dictionary to hold description strings
description = ""
# parse data into strings
# indicate if non-standard setting specified
if self.jf != JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0"):
description += "Non-standard setting: .....\n"
description += self.jf.__repr__()
description += "\n\nStandard setting information: \n"
desc["magtype"] = self._data["magtype"]
desc["bns_number"] = ".".join(map(str, self._data["bns_number"]))
desc["bns_label"] = self._data["bns_label"]
desc["og_id"] = (
"\t\tOG: " + ".".join(map(str, self._data["og_number"])) + " " + self._data["og_label"]
if include_og
else ""
)
desc["bns_operators"] = " ".join([op_data["str"] for op_data in self._data["bns_operators"]])
desc["bns_lattice"] = (
" ".join([lattice_data["str"] for lattice_data in self._data["bns_lattice"][3:]])
if len(self._data["bns_lattice"]) > 3
else ""
) # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+
desc["bns_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["bns_wyckoff"]
]
)
desc["og_bns_transformation"] = (
f"OG-BNS Transform: ({self._data['og_bns_transform']})\n" if desc["magtype"] == 4 and include_og else ""
)
bns_operators_prefix = f"Operators{' (BNS)' if desc['magtype'] == 4 and include_og else ''}: "
bns_wyckoff_prefix = f"Wyckoff Positions{' (BNS)' if desc['magtype'] == 4 and include_og else ''}: "
# apply textwrap on long lines
desc["bns_operators"] = textwrap.fill(
desc["bns_operators"],
initial_indent=bns_operators_prefix,
subsequent_indent=" " * len(bns_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += (
"BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
"{d[og_bns_transformation]}"
"{d[bns_operators]}\n"
"{bns_wyckoff_prefix}{d[bns_lattice]}\n"
"{d[bns_wyckoff]}"
).format(d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)
if desc["magtype"] == 4 and include_og:
desc["og_operators"] = " ".join([op_data["str"] for op_data in self._data["og_operators"]])
# include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
# not always present in OG setting
desc["og_lattice"] = " ".join([lattice_data["str"] for lattice_data in self._data["og_lattice"]])
desc["og_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["og_wyckoff"]
]
)
og_operators_prefix = "Operators (OG): "
# apply textwrap on long lines
desc["og_operators"] = textwrap.fill(
desc["og_operators"],
initial_indent=og_operators_prefix,
subsequent_indent=" " * len(og_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += ("\n{d[og_operators]}\nWyckoff Positions (OG): {d[og_lattice]}\n" "{d[og_wyckoff]}").format(
d=desc
)
elif desc["magtype"] == 4:
description += "\nAlternative OG setting exists for this space group."
return description
def __str__(self):
"""
String representation of the space group, specifying the setting
of the space group, its magnetic symmetry operators and Wyckoff
positions.
:return: str
"""
return self.data_str(include_og=False)
def _write_all_magnetic_space_groups_to_file(filename):
"""
Write all magnetic space groups to a human-readable text file.
Should contain same information as text files provided by ISO-MAG.
:param filename:
:return:
"""
s = (
"Data parsed from raw data from:\n"
"ISO-MAG, ISOTROPY Software Suite, iso.byu.edu\n"
"http://stokes.byu.edu/iso/magnetic_data.txt\n"
"Used with kind permission from Professor Branton Campbell, BYU\n\n"
)
all_msgs = []
for i in range(1, 1652):
all_msgs.append(MagneticSpaceGroup(i))
for msg in all_msgs:
s += f"\n{msg.data_str()}\n\n--------\n"
with open(filename, "w") as f:
f.write(s)
|
materialsproject/pymatgen
|
pymatgen/symmetry/maggroups.py
|
Python
|
mit
| 23,343
|
[
"CRYSTAL",
"pymatgen"
] |
5724b69ee77335c97d5ba6dadb2be9a29ec1ee6168dbb2b69329a706e54e205b
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-proxy-upload.py
# Author : Adrian Casajus
###########################################################from DIRAC.Core.Base import Script#############
import sys
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyUpload import CLIParams, uploadProxy
__RCSID__ = "$Id$"
if __name__ == "__main__":
cliParams = CLIParams()
cliParams.registerCLISwitches()
Script.parseCommandLine()
retVal = uploadProxy( cliParams )
if not retVal[ 'OK' ]:
print retVal[ 'Message' ]
sys.exit( 1 )
sys.exit( 0 )
|
Andrew-McNab-UK/DIRAC
|
FrameworkSystem/scripts/dirac-admin-proxy-upload.py
|
Python
|
gpl-3.0
| 655
|
[
"DIRAC"
] |
2a49f6d73155507aa58b04d528fe81d865c4407d14e6413a3b06d8bbeebe8166
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <a000680@c14526.ad.smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read new PPS netCDF formattet data on swath, and remap and store in old hdf5
format. Start with Cloud Type, and extend to other products!
"""
import time
import h5py
import numpy as np
import logging
from datetime import datetime
from nwcsaf_formats.pps_conversions import (map_cloudtypes,
old_ctype_palette,
old_ctype_palette_data,
old_ctth_press_palette_data,
old_ctth_temp_palette_data,
old_ctth_height_palette_data,
ctype_convert_flags,
ctth_convert_flags,
old_processing_flag_palette
)
LOG = logging.getLogger(__name__)
def write_product(ppsobj, filename):
"""Write the product data to hdf5, pps v2012 format.
"""
h5f = h5py.File(filename, 'w')
try:
h5f.attrs['description'] = np.string_(ppsobj.mda['title'])
except KeyError:
h5f.attrs['description'] = np.string_(ppsobj.mda['description'])
try:
h5f.attrs['orbit_number'] = np.int32(ppsobj.mda['orbit_number'])
except KeyError:
h5f.attrs['orbit_number'] = np.int32(ppsobj.mda['orbit'])
try:
h5f.attrs['satellite_id'] = np.string_(ppsobj.mda['platform'])
except KeyError:
h5f.attrs['satellite_id'] = np.string_(ppsobj.mda['satellite'])
try:
dtobj = datetime.strptime(ppsobj.mda['time_coverage_start'][:-2],
'%Y%m%dT%H%M%S')
except KeyError:
dtobj = ppsobj.mda['time_slot']
h5f.attrs['sec_1970'] = time.mktime(dtobj.timetuple())
try:
h5f.attrs['version'] = np.string_(ppsobj.mda['source'])
except KeyError:
h5f.attrs['version'] = np.string_(ppsobj.mda['version'])
# Create the region data:
comp_type = np.dtype([('area_extent', 'f8', (4,)),
('xsize', 'i4'),
('ysize', 'i4'),
('xscale', 'f4'),
('yscale', 'f4'),
('lat_0', 'f4'),
('lon_0', 'f4'),
('lat_ts', 'f4'),
('id', np.str, 64),
('name', np.str_, 64),
('pcs_id', np.str_, 128),
('pcs_def', np.str_, 128)])
# Save the datatype!
h5f['Region'] = comp_type
region = h5f.create_dataset("region", (1,), comp_type)
# Outputvaluenamelist:
ov_comp_type = get_output_valuenamelist_compund_dtype()
# The datatype for the palette:
h5f['OutputValueNameList'] = ov_comp_type
aobj = ppsobj.area
pcs_def = aobj.proj4_string.replace(' +', ',').strip('+')
data = np.array([(aobj.area_extent,
aobj.x_size, aobj.y_size,
aobj.pixel_size_x, aobj.pixel_size_y,
aobj.proj_dict.get('lat_0', 0.0),
aobj.proj_dict.get('lon_0', 0.0),
aobj.proj_dict.get('lat_ts', 0.0),
aobj.area_id, aobj.name,
aobj.proj_id, pcs_def)], dtype=comp_type)
region[...] = data
product = ppsobj.name
make_palettes[product](h5f, ppsobj)
make_dataset[product](h5f, ppsobj)
make_flags[product](h5f, ppsobj)
h5f.close()
return
def make_palettes_ct(h5f, ppsobj):
# Make the palette:
shape = (256, 3)
palette = h5f.create_dataset("PALETTE", shape, dtype='u1')
try:
dummy = ppsobj.mda['ct_pal'].data
palette_data = old_ctype_palette_data()
except KeyError:
palette_data = ppsobj.mda['PALETTE']
palette[...] = palette_data
palette.attrs['CLASS'] = np.string_("PALETTE")
palette.attrs['PAL_COLORMODEL'] = np.string_("RGB")
palette.attrs['PAL_TYPE'] = np.string_("STANDARD8")
palette.attrs['PAL_VERSION'] = np.string_("1.2")
def make_palettes_pc(h5f, ppsobj):
pass
def make_palettes_ctth(h5f, ppsobj):
# Make the palette:
shape = (256, 3)
palette = h5f.create_dataset("HEIGHT_PALETTE", shape, dtype='u1')
palette_data = old_ctth_height_palette_data()
palette[...] = palette_data
palette.attrs['CLASS'] = np.string_("PALETTE")
palette.attrs['PAL_COLORMODEL'] = np.string_("RGB")
palette.attrs['PAL_TYPE'] = np.string_("STANDARD8")
palette.attrs['PAL_VERSION'] = np.string_("1.2")
palette = h5f.create_dataset("PRESSURE_PALETTE", shape, dtype='u1')
palette_data = old_ctth_press_palette_data()
palette[...] = palette_data
palette.attrs['CLASS'] = np.string_("PALETTE")
palette.attrs['PAL_COLORMODEL'] = np.string_("RGB")
palette.attrs['PAL_TYPE'] = np.string_("STANDARD8")
palette.attrs['PAL_VERSION'] = np.string_("1.2")
palette = h5f.create_dataset("TEMPERATURE_PALETTE", shape, dtype='u1')
palette_data = old_ctth_temp_palette_data()
palette[...] = palette_data
palette.attrs['CLASS'] = np.string_("PALETTE")
palette.attrs['PAL_COLORMODEL'] = np.string_("RGB")
palette.attrs['PAL_TYPE'] = np.string_("STANDARD8")
palette.attrs['PAL_VERSION'] = np.string_("1.2")
def get_output_valuenamelist_compund_dtype():
"""Define the compound datatype for the Outputvaluenamelist and return the
numpy dtype"""
# Outputvaluenamelist:
return np.dtype([('outval_name', np.string_, 128), ])
def make_dataset_ct(h5f, ppsobj):
# Make the cloudtype dataset:
# shape = (2, 3)
try:
shape = ppsobj.ct.data.shape
except AttributeError:
shape = ppsobj.cloudtype.data.shape
cloudtype = h5f.create_dataset("cloudtype", shape, dtype='u1',
compression="gzip", compression_opts=6)
# Outputvaluenamelist:
ov_comp_type = get_output_valuenamelist_compund_dtype()
try:
cloudtype[...] = map_cloudtypes(ppsobj.ct.data.filled(0))
print("Cloudtype categories mapped!")
palette = old_ctype_palette()
except AttributeError:
print("Cloudtype categories *not* mapped!")
cloudtype[...] = ppsobj.cloudtype.data.filled(0)
vnamelist = []
for i, item in zip(ppsobj.ct.info['flag_values'],
str(ppsobj.ct.info['flag_meanings']).split(' ')):
vnamelist.append(str(i) + ":" + " " + item)
vnamelist.insert(0, '0: Not processed')
palette = np.array(vnamelist, dtype=ov_comp_type)
cloudtype.attrs["output_value_namelist"] = palette
cloudtype.attrs['CLASS'] = np.string_("IMAGE")
cloudtype.attrs['IMAGE_VERSION'] = np.string_("1.2")
#cloudtype.attrs['PALETTE'] = h5f['PALETTE'].ref
cloudtype.attrs['description'] = np.string_("Cloud type classification")
def make_dataset_pc(h5f, ppsobj):
shape = ppsobj.pc_precip_moderate.data.shape
precipitation1 = h5f.create_dataset("precipitation1", shape, dtype='u1',
compression="gzip", compression_opts=6)
precipitation1_data = ppsobj.pc_precip_light.data.filled()
precipitation1[...] = precipitation1_data.astype(np.uint8)
precipitation1.attrs['description'] = np.string_(
"Likelihood for precipitation intensity in class 1")
precipitation1.attrs['intensity_class_lowerlimit'] = np.float32(0.1)
precipitation1.attrs['intensity_class_upperlimit'] = np.float32(0.5)
precipitation2 = h5f.create_dataset("precipitation2", shape, dtype='u1',
compression="gzip", compression_opts=6)
precipitation2_data = ppsobj.pc_precip_moderate.data.filled()
precipitation2[...] = precipitation2_data.astype(np.uint8)
precipitation2.attrs['description'] = np.string_(
"Likelihood for precipitation intensity in class 2")
precipitation2.attrs['intensity_class_lowerlimit'] = np.float32(0.5)
precipitation2.attrs['intensity_class_upperlimit'] = np.float32(5.0)
precipitation3 = h5f.create_dataset("precipitation3", shape, dtype='u1',
compression="gzip", compression_opts=6)
precipitation3_data = ppsobj.pc_precip_intense.data.filled()
precipitation3[...] = precipitation3_data.astype(np.uint8)
precipitation3.attrs['description'] = np.string_(
"Likelihood for precipitation intensity in class 3")
precipitation3.attrs['intensity_class_lowerlimit'] = np.float32(5.0)
precipitation3.attrs['intensity_class_upperlimit'] = np.float32(1000.0)
def make_dataset_ctth(h5f, ppsobj):
shape = ppsobj.ctth_alti.data.shape
alti = h5f.create_dataset("height", shape, dtype='u1',
compression="gzip", compression_opts=6)
alti_data = (ppsobj.ctth_alti.data.filled() / 200.0)
alti_data[alti_data > 255] = 255
alti[...] = alti_data.astype(np.uint8)
alti.attrs['CLASS'] = np.string_("IMAGE")
alti.attrs['IMAGE_VERSION'] = np.string_("1.2")
#alti.attrs['PALETTE'] = h5f['HEIGHT_PALETTE'].ref
alti.attrs['description'] = np.string_("scaled Height (m)")
alti.attrs['gain'] = np.float32(200.0)
alti.attrs['intercept'] = np.float32(0.0)
alti.attrs['no_data_value'] = np.uint8(255)
tempe = h5f.create_dataset("temperature", shape, dtype='u1',
compression="gzip", compression_opts=6)
tempe_data = (ppsobj.ctth_tempe.data.filled() - 100.0)
tempe_data[tempe_data > 255] = 255
tempe[...] = tempe_data.astype(np.uint8)
tempe.attrs['CLASS'] = np.string_("IMAGE")
tempe.attrs['IMAGE_VERSION'] = np.string_("1.2")
#tempe.attrs['PALETTE'] = h5f['TEMPERATURE_PALETTE'].ref
tempe.attrs['description'] = np.string_("scaled Temperature (K)")
tempe.attrs['gain'] = np.float32(1.0)
tempe.attrs['intercept'] = np.float32(100.0)
tempe.attrs['no_data_value'] = np.uint8(255)
pres = h5f.create_dataset("pressure", shape, dtype='u1',
compression="gzip", compression_opts=6)
pres_data = (ppsobj.ctth_pres.data.filled() / 2500.0) # scale 25, Pa->hPa
pres_data[pres_data > 255] = 255
pres[...] = pres_data.astype(np.uint8)
pres.attrs['CLASS'] = np.string_("IMAGE")
pres.attrs['IMAGE_VERSION'] = np.string_("1.2")
#pres.attrs['PALETTE'] = h5f['PRESSURE_PALETTE'].ref
pres.attrs['description'] = np.string_("scaled Pressure (hPa)")
pres.attrs['gain'] = np.float32(25.0)
pres.attrs['intercept'] = np.float32(0.0)
pres.attrs['no_data_value'] = np.uint8(255)
def make_flags_ct(h5f, ppsobj):
"""Map the cloudtype quality flags from new to old"""
shape = ppsobj.ct.data.shape
# quality_flag:
qualityflags = h5f.create_dataset("quality_flag", shape, dtype='u2',
compression="gzip", compression_opts=6)
sflags = ppsobj.ct_status_flag.data.filled(0)
cflags = ppsobj.ct_conditions.data.filled(0)
qflags = ppsobj.ct_quality.data.filled(0)
oldflags = ctype_convert_flags(sflags, cflags, qflags)
qualityflags[...] = oldflags
qualityflags.attrs[
'description'] = np.string_("Bitwise quality or AVHRR Processing flag")
qualityflags.attrs[
"output_value_namelist"] = old_processing_flag_palette('cloudtype')
def make_flags_ctth(h5f, ppsobj):
"""Map the ctth flags from new to old"""
shape = ppsobj.ctth_tempe.data.shape
# processing_flag:
qualityflags = h5f.create_dataset("processing_flag", shape, dtype='u2',
compression="gzip", compression_opts=6)
sflags = ppsobj.ctth_status_flag.data.filled(0)
cflags = ppsobj.ctth_conditions.data.filled(0)
qflags = ppsobj.ctth_quality.data.filled(0)
oldflags = ctth_convert_flags(sflags, cflags, qflags)
qualityflags[...] = oldflags
qualityflags.attrs[
'description'] = np.string_("16 bit Processing flag")
qualityflags.attrs[
"output_value_namelist"] = old_processing_flag_palette('ctth')
return
def make_flags_pc(h5f, ppsobj):
pass
make_palettes = {
"CT": make_palettes_ct,
"CTTH": make_palettes_ctth,
"PC": make_palettes_pc,
}
make_dataset = {
"CT": make_dataset_ct,
"CTTH": make_dataset_ctth,
"PC": make_dataset_pc,
}
make_flags = {
"CT": make_flags_ct,
"CTTH": make_flags_ctth,
"PC": make_flags_pc,
}
if __name__ == '__main__':
from mpop.satellites import PolarFactory
import datetime
from mpop.utils import debug_on
debug_on()
time_slot = datetime.datetime(2014, 11, 11, 6, 39, 59)
# time_slot = datetime.datetime(2014, 11, 11, 6, 39)
orbit = '48832'
gbd = PolarFactory.create_scene("noaa", "18", "avhrr", time_slot, orbit)
# gbd.load(['CMA', 'CT', 'CTTH', 'PC'])
# gbd.load(['CT'])
# lcd = gbd.project('euron1')
# filename = (lcd.satname + lcd.number +
# lcd.time_slot.strftime('_%Y%m%d_%H%M_') +
# lcd.orbit + '.' + lcd.area.area_id + '.cloudtype.hdf')
# write_product(lcd["CT"], filename)
gbd.load(['CTTH'])
lcd = gbd.project('euron1')
filename = (lcd.satname + lcd.number +
lcd.time_slot.strftime('_%Y%m%d_%H%M_') +
lcd.orbit + '.' + lcd.area.area_id + '.ctth.hdf')
write_product(lcd["CTTH"], filename)
# gbd.load(['PC'])
# lcd = gbd.project('euron1')
# filename = (lcd.satname + lcd.number +
# lcd.time_slot.strftime('_%Y%m%d_%H%M_') +
# lcd.orbit + '.' + lcd.area.area_id + '.precipclouds.hdf')
# write_product(lcd["PC"], filename)
|
adybbroe/mesan_compositer
|
nwcsaf_formats/ppsv2014_to_oldformat.py
|
Python
|
gpl-3.0
| 14,534
|
[
"NetCDF"
] |
72c49d2d76d9ead795377aa7bf46984ee7b9298e38f4fc60b5f0ca39614f599f
|
"""
Bayesian hidden Markov models.
"""
import numpy as np
import copy
import time
#from scipy.misc import logsumexp
import bhmm.hidden as hidden
from bhmm.util.logger import logger
from bhmm.util import config
import msmtools.estimation as msmest
#from bhmm.msm.transition_matrix_sampling_rev import TransitionMatrixSamplerRev
__author__ = "John D. Chodera, Frank Noe"
__copyright__ = "Copyright 2015, John D. Chodera and Frank Noe"
__credits__ = ["John D. Chodera", "Frank Noe"]
__license__ = "LGPL"
__maintainer__ = "John D. Chodera"
__email__="jchodera AT gmail DOT com"
class BayesianHMMSampler(object):
"""Bayesian hidden Markov model sampler.
Examples
--------
First, create some synthetic test data.
>>> import bhmm
>>> bhmm.config.verbose = False
>>> nstates = 3
>>> model = bhmm.testsystems.dalton_model(nstates)
>>> [observations, hidden_states] = model.generate_synthetic_observation_trajectories(ntrajectories=5, length=1000)
Initialize a new BHMM model.
>>> from bhmm import BHMM
>>> bhmm_sampler = BHMM(observations, nstates)
Sample from the posterior.
>>> models = bhmm_sampler.sample(nsamples=10)
"""
def __init__(self, observations, nstates, initial_model=None,
reversible=True, transition_matrix_sampling_steps=1000, transition_matrix_prior=None,
type='gaussian'):
"""Initialize a Bayesian hidden Markov model sampler.
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
nstates : int
The number of states in the model.
initial_model : HMM, optional, default=None
If specified, the given initial model will be used to initialize the BHMM.
Otherwise, a heuristic scheme is used to generate an initial guess.
reversible : bool, optional, default=True
If True, a prior that enforces reversible transition matrices (detailed balance) is used;
otherwise, a standard non-reversible prior is used.
transition_matrix_sampling_steps : int, optional, default=1000
number of transition matrix sampling steps per BHMM cycle
transition_matrix_prior : str or ndarray(n,n)
prior count matrix to be used for transition matrix sampling, or a keyword specifying the prior mode
| None (default), -1 prior is used that ensures consistency between mean and MLE. Can lead to sampling
disconnected matrices in the low-data regime. If you have disconnectivity problems, consider
using 'init-connect'
| 'init-connect', prior count matrix ensuring the same connectivity as in the initial model. 1 count
is added to all diagonals. All off-diagonals share one prior count distributed proportional to
the row of the initial transition matrix.
output_model_type : str, optional, default='gaussian'
Output model type. ['gaussian', 'discrete']
"""
# Sanity checks.
if len(observations) == 0:
raise Exception("No observations were provided.")
# Store options.
self.reversible = reversible
# Store the number of states.
self.nstates = nstates
# Store a copy of the observations.
self.observations = copy.deepcopy(observations)
self.nobs = len(observations)
self.Ts = [len(o) for o in observations]
self.maxT = np.max(self.Ts)
# initial model
if initial_model:
# Use user-specified initial model, if provided.
self.model = copy.deepcopy(initial_model)
else:
# Generate our own initial model.
self.model = self._generateInitialModel(type)
# prior counts
if transition_matrix_prior is None:
self.prior = np.zeros((self.nstates, self.nstates))
elif isinstance(transition_matrix_prior, np.ndarray):
if np.array_equal(transition_matrix_prior.shape, (self.nstates, self.nstates)):
self.prior = np.array(transition_matrix_prior)
elif transition_matrix_prior == 'init-connect':
Pinit = self.model.transition_matrix
self.prior = Pinit - np.diag(Pinit) # add off-diagonals from initial T-matrix
self.prior /= self.prior.sum(axis=1)[:, None] # scale off-diagonals to row sum 1
self.prior += np.eye(nstates) # add diagonal 1.
else:
raise ValueError('transition matrix prior mode undefined: '+str(transition_matrix_prior))
# sampling options
self.transition_matrix_sampling_steps = transition_matrix_sampling_steps
# implementation options
hidden.set_implementation(config.kernel)
self.model.output_model.set_implementation(config.kernel)
# pre-construct hidden variables
self.alpha = np.zeros((self.maxT,self.nstates), config.dtype, order='C')
self.pobs = np.zeros((self.maxT,self.nstates), config.dtype, order='C')
return
def sample(self, nsamples, nburn=0, nthin=1, save_hidden_state_trajectory=False,
call_back=None):
"""Sample from the BHMM posterior.
Parameters
----------
nsamples : int
The number of samples to generate.
nburn : int, optional, default=0
The number of samples to discard to burn-in, following which `nsamples` will be generated.
nthin : int, optional, default=1
The number of Gibbs sampling updates used to generate each returned sample.
save_hidden_state_trajectory : bool, optional, default=False
If True, the hidden state trajectory for each sample will be saved as well.
call_back : function, optional, default=None
a call back function with no arguments, which if given is being called
after each computed sample. This is useful for implementing progress bars.
Returns
-------
models : list of bhmm.HMM
The sampled HMM models from the Bayesian posterior.
Examples
--------
>>> from bhmm import testsystems
>>> [model, observations, states, sampled_model] = testsystems.generate_random_bhmm(ntrajectories=5, length=1000)
>>> nburn = 5 # run the sampler a bit before recording samples
>>> nsamples = 10 # generate 10 samples
>>> nthin = 2 # discard one sample in between each recorded sample
>>> samples = sampled_model.sample(nsamples, nburn=nburn, nthin=nthin)
"""
# Run burn-in.
for iteration in range(nburn):
logger().info("Burn-in %8d / %8d" % (iteration, nburn))
self._update()
# Collect data.
models = list()
for iteration in range(nsamples):
logger().info("Iteration %8d / %8d" % (iteration, nsamples))
# Run a number of Gibbs sampling updates to generate each sample.
for thin in range(nthin):
self._update()
# Save a copy of the current model.
model_copy = copy.deepcopy(self.model)
#print "Sampled: \n",repr(model_copy)
if not save_hidden_state_trajectory:
model_copy.hidden_state_trajectory = None
models.append(model_copy)
if call_back is not None:
call_back()
# Return the list of models saved.
return models
def _update(self):
"""Update the current model using one round of Gibbs sampling.
"""
initial_time = time.time()
self._updateHiddenStateTrajectories()
self._updateEmissionProbabilities()
self._updateTransitionMatrix()
final_time = time.time()
elapsed_time = final_time - initial_time
logger().info("BHMM update iteration took %.3f s" % elapsed_time)
def _updateHiddenStateTrajectories(self):
"""Sample a new set of state trajectories from the conditional distribution P(S | T, E, O)
"""
self.model.hidden_state_trajectories = list()
for trajectory_index in range(self.nobs):
hidden_state_trajectory = self._sampleHiddenStateTrajectory(self.observations[trajectory_index])
self.model.hidden_state_trajectories.append(hidden_state_trajectory)
return
def _sampleHiddenStateTrajectory(self, obs, dtype=np.int32):
"""Sample a hidden state trajectory from the conditional distribution P(s | T, E, o)
Parameters
----------
o_t : numpy.array with dimensions (T,)
observation[n] is the nth observation
dtype : numpy.dtype, optional, default=numpy.int32
The dtype to to use for returned state trajectory.
Returns
-------
s_t : numpy.array with dimensions (T,) of type `dtype`
Hidden state trajectory, with s_t[t] the hidden state corresponding to observation o_t[t]
Examples
--------
>>> import bhmm
>>> [model, observations, states, sampled_model] = bhmm.testsystems.generate_random_bhmm(ntrajectories=5, length=1000)
>>> o_t = observations[0]
>>> s_t = sampled_model._sampleHiddenStateTrajectory(o_t)
"""
# Determine observation trajectory length
T = obs.shape[0]
# Convenience access.
A = self.model.transition_matrix
pi = self.model.initial_distribution
# compute output probability matrix
self.model.output_model.p_obs(obs, out=self.pobs)
# forward variables
logprob = hidden.forward(A, self.pobs, pi, T = T, alpha_out=self.alpha)[0]
# sample path
S = hidden.sample_path(self.alpha, A, self.pobs, T = T)
return S
def _updateEmissionProbabilities(self):
"""Sample a new set of emission probabilites from the conditional distribution P(E | S, O)
"""
observations_by_state = [ self.model.collect_observations_in_state(self.observations, state) for state in range(self.model.nstates) ]
self.model.output_model._sample_output_model(observations_by_state)
return
def _updateTransitionMatrix(self):
"""
Updates the hidden-state transition matrix
"""
C = self.model.count_matrix()
# apply prior
C += self.prior
# sample T-matrix
Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps, reversible=self.reversible)
self.model.update(Tij)
def _generateInitialModel(self, output_model_type):
"""Initialize using an MLHMM.
"""
logger().info("Generating initial model for BHMM using MLHMM...")
from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator
mlhmm = MaximumLikelihoodEstimator(self.observations, self.nstates, reversible=self.reversible, type=output_model_type)
model = mlhmm.fit()
return model
|
marscher/bhmm
|
bhmm/estimators/bayesian_sampling.py
|
Python
|
lgpl-3.0
| 11,187
|
[
"Gaussian"
] |
82402be73878c411cc53f331f09b5fefa0682939315717fa22f159aab25fe1b8
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.21237
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/getcurrlocation.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getcurrlocation(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getcurrlocation, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_93545997 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2locations>
\t<e2location>''')
_v = VFFSL(SL,"location",True) # u'$location' on line 4, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$location')) # from line 4, col 14.
write(u'''</e2location>
</e2locations>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_93545997
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getcurrlocation= 'respond'
## END CLASS DEFINITION
if not hasattr(getcurrlocation, '_initCheetahAttributes'):
templateAPIClass = getattr(getcurrlocation, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getcurrlocation)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getcurrlocation()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/getcurrlocation.py
|
Python
|
gpl-2.0
| 5,000
|
[
"VisIt"
] |
85be7c3803b551428ecfa0c45bfe79d8be8b28fc0d2ebe88839481033a9d2976
|
#!/usr/bin/env python
# HTSeq_QA.py
#
# (c) Simon Anders, European Molecular Biology Laboratory, 2010
# released under GNU General Public License
import sys, time, os.path, optparse
from itertools import *
import numpy
import HTSeq
def main():
try:
import matplotlib
except ImportError:
sys.stderr.write("This script needs the 'matplotlib' library, which ")
sys.stderr.write("was not found. Please install it." )
matplotlib.use('PDF')
from matplotlib import pyplot
# **** Parse command line ****
optParser = optparse.OptionParser( usage = "%prog [options] read_file",
description=
"This script take a file with high-throughput sequencing reads " +
"(supported formats: SAM, Solexa _export.txt, FASTQ, Solexa " +
"_sequence.txt) and performs a simply quality assessment by " +
"producing plots showing the distribution of called bases and " +
"base-call quality scores by position within the reads. The " +
"plots are output as a PDF file.",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
" Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
" Public License v3. Part of the 'HTSeq' framework, version %s." % HTSeq.__version__ )
optParser.add_option( "-t", "--type", type="choice", dest="type",
choices = ("sam", "bam", "solexa-export", "fastq", "solexa-fastq"),
default = "sam", help="type of read_file (one of: sam [default], bam, " +
"solexa-export, fastq, solexa-fastq)" )
optParser.add_option( "-o", "--outfile", type="string", dest="outfile",
help="output filename (default is <read_file>.pdf)" )
optParser.add_option( "-r", "--readlength", type="int", dest="readlen",
help="the maximum read length (when not specified, the script guesses from the file" )
optParser.add_option( "-g", "--gamma", type="float", dest="gamma",
default = 0.3,
help="the gamma factor for the contrast adjustment of the quality score plot" )
optParser.add_option( "-n", "--nosplit", action="store_true", dest="nosplit",
help="do not split reads in unaligned and aligned ones" )
optParser.add_option( "-m", "--maxqual", type="int", dest="maxqual", default=41,
help="the maximum quality score that appears in the data (default: 41)" )
if len( sys.argv ) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len( args ) != 1:
sys.stderr.write( sys.argv[0] + ": Error: Please provide one argument (the read_file).\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
readfilename = args[0]
if opts.type == "sam":
readfile = HTSeq.SAM_Reader( readfilename )
isAlnmntFile = True
elif opts.type == "bam":
readfile = HTSeq.BAM_Reader( readfilename )
isAlnmntFile = True
elif opts.type == "solexa-export":
readfile = HTSeq.SolexaExportReader( readfilename )
isAlnmntFile = True
elif opts.type == "fastq":
readfile = HTSeq.FastqReader( readfilename )
isAlnmntFile = False
elif opts.type == "solexa-fastq":
readfile = HTSeq.FastqReader( readfilename, "solexa" )
isAlnmntFile = False
else:
sys.error( "Oops." )
twoColumns = isAlnmntFile and not opts.nosplit
if opts.outfile is None:
outfilename = os.path.basename( readfilename ) + ".pdf"
else:
outfilename = opts.outfile
# **** Get read length ****
if opts.readlen is not None:
readlen = opts.readlen
else:
readlen = 0
if isAlnmntFile:
reads = ( a.read for a in readfile )
else:
reads = readfile
for r in islice( reads, 10000 ):
if len( r ) > readlen:
readlen = len( r )
max_qual = opts.maxqual
gamma = opts.gamma
# **** Initialize count arrays ****
base_arr_U = numpy.zeros( ( readlen, 5 ), numpy.int )
qual_arr_U = numpy.zeros( ( readlen, max_qual+1 ), numpy.int )
if twoColumns:
base_arr_A = numpy.zeros( ( readlen, 5 ), numpy.int )
qual_arr_A = numpy.zeros( ( readlen, max_qual+1 ), numpy.int )
# **** Main counting loop ****
i = 0
try:
for a in readfile:
if isAlnmntFile:
r = a.read
else:
r = a
if twoColumns and (isAlnmntFile and a.aligned):
r.add_bases_to_count_array( base_arr_A )
r.add_qual_to_count_array( qual_arr_A )
else:
r.add_bases_to_count_array( base_arr_U )
r.add_qual_to_count_array( qual_arr_U )
i += 1
if i % 200000 == 0:
print i, "reads processed"
except:
sys.stderr.write( "Error occured in: %s\n" %
readfile.get_line_number_string() )
raise
print i, "reads processed"
# **** Normalize result ****
def norm_by_pos( arr ):
arr = numpy.array( arr, numpy.float )
arr_n = ( arr.T / arr.sum( 1 ) ).T
arr_n[ arr == 0 ] = 0
return arr_n
def norm_by_start( arr ):
arr = numpy.array( arr, numpy.float )
arr_n = ( arr.T / arr.sum( 1 )[ 0 ] ).T
arr_n[ arr == 0 ] = 0
return arr_n
base_arr_U_n = norm_by_pos( base_arr_U )
qual_arr_U_n = norm_by_start( qual_arr_U )
nreads_U = base_arr_U[0,:].sum()
if twoColumns:
base_arr_A_n = norm_by_pos( base_arr_A )
qual_arr_A_n = norm_by_start( qual_arr_A )
nreads_A = base_arr_A[0,:].sum()
# **** Make plot ****
def plot_bases( arr ):
xg = numpy.arange( readlen )
pyplot.plot( xg, arr[ : , 0 ], marker='.', color='red')
pyplot.plot( xg, arr[ : , 1 ], marker='.', color='darkgreen')
pyplot.plot( xg, arr[ : , 2 ], marker='.',color='lightgreen')
pyplot.plot( xg, arr[ : , 3 ], marker='.',color='orange')
pyplot.plot( xg, arr[ : , 4 ], marker='.',color='grey')
pyplot.axis( (0, readlen-1, 0, 1 ) )
pyplot.text( readlen*.70, .9, "A", color="red" )
pyplot.text( readlen*.75, .9, "C", color="darkgreen" )
pyplot.text( readlen*.80, .9, "G", color="lightgreen" )
pyplot.text( readlen*.85, .9, "T", color="orange" )
pyplot.text( readlen*.90, .9, "N", color="grey" )
pyplot.figure()
pyplot.subplots_adjust( top=.85 )
pyplot.suptitle( os.path.basename(readfilename), fontweight='bold' )
if twoColumns:
pyplot.subplot( 221 )
plot_bases( base_arr_U_n )
pyplot.ylabel( "proportion of base" )
pyplot.title( "non-aligned reads\n%.0f%% (%.3f million)" %
( 100. * nreads_U / (nreads_U+nreads_A), nreads_U / 1e6 ) )
pyplot.subplot( 222 )
plot_bases( base_arr_A_n )
pyplot.title( "aligned reads\n%.0f%% (%.3f million)" %
( 100. * nreads_A / (nreads_U+nreads_A), nreads_A / 1e6 ) )
pyplot.subplot( 223 )
pyplot.pcolor( qual_arr_U_n.T ** gamma, cmap=pyplot.cm.Greens,
norm=pyplot.normalize( 0, 1 ) )
pyplot.axis( (0, readlen-1, 0, max_qual+1 ) )
pyplot.xlabel( "position in read" )
pyplot.ylabel( "base-call quality score" )
pyplot.subplot( 224 )
pyplot.pcolor( qual_arr_A_n.T ** gamma, cmap=pyplot.cm.Greens,
norm=pyplot.normalize( 0, 1 ) )
pyplot.axis( (0, readlen-1, 0, max_qual+1 ) )
pyplot.xlabel( "position in read" )
else:
pyplot.subplot( 211 )
plot_bases( base_arr_U_n )
pyplot.ylabel( "proportion of base" )
pyplot.title( "%.3f million reads" % ( nreads_U / 1e6 ) )
pyplot.subplot( 212 )
pyplot.pcolor( qual_arr_U_n.T ** gamma, cmap=pyplot.cm.Greens,
norm=pyplot.normalize( 0, 1 ) )
pyplot.axis( (0, readlen-1, 0, max_qual+1 ) )
pyplot.xlabel( "position in read" )
pyplot.ylabel( "base-call quality score" )
pyplot.savefig( outfilename )
if __name__ == "__main__":
main()
|
detrout/python-htseq
|
HTSeq/scripts/qa.py
|
Python
|
gpl-3.0
| 7,729
|
[
"HTSeq"
] |
ddc96d5d63a273387489e1292a7365b8d55fc4a59f384fca3d52e1fae5fc72a1
|
# this is from https://github.com/mledoze/countries
countries_info = [
{
"name": {
"common": "Afghanistan",
"official": "Islamic Republic of Afghanistan",
"native": {
"common": "\u0627\u0641\u063a\u0627\u0646\u0633\u062a\u0627\u0646",
"official": "\u062f \u0627\u0641\u063a\u0627\u0646\u0633\u062a\u0627\u0646 \u0627\u0633\u0644\u0627\u0645\u064a \u062c\u0645\u0647\u0648\u0631\u06cc\u062a"
}
},
"tld": [".af"],
"cca2": "AF",
"ccn3": "004",
"cca3": "AFG",
"currency": ["AFN"],
"callingCode": ["93"],
"capital": "Kabul",
"altSpellings": ["AF", "Af\u0121\u0101nist\u0101n"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "pus",
"languages": {
"prs": "Dari",
"pus": "Pashto",
"tuk": "Turkmen"
},
"translations": {
"cym": "Affganistan",
"deu": "Afghanistan",
"fra": "Afghanistan",
"hrv": "Afganistan",
"ita": "Afghanistan",
"jpn": "\u30a2\u30d5\u30ac\u30cb\u30b9\u30bf\u30f3",
"nld": "Afghanistan",
"rus": "\u0410\u0444\u0433\u0430\u043d\u0438\u0441\u0442\u0430\u043d",
"spa": "Afganist\u00e1n"
},
"latlng": [33, 65],
"demonym": "Afghan",
"borders": ["IRN", "PAK", "TKM", "UZB", "TJK", "CHN"],
"area": 652230
},
{
"name": {
"common": "\u00c5land Islands",
"official": "\u00c5land Islands",
"native": {
"common": "\u00c5land",
"official": "Landskapet \u00c5land"
}
},
"tld": [".ax"],
"cca2": "AX",
"ccn3": "248",
"cca3": "ALA",
"currency": ["EUR"],
"callingCode": ["358"],
"capital": "Mariehamn",
"altSpellings": ["AX", "Aaland", "Aland", "Ahvenanmaa"],
"relevance": "0",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "swe",
"languages": {
"swe": "Swedish"
},
"translations": {
"deu": "\u00c5land",
"fra": "\u00c5land",
"hrv": "\u00c5landski otoci",
"ita": "Isole Aland",
"jpn": "\u30aa\u30fc\u30e9\u30f3\u30c9\u8af8\u5cf6",
"nld": "\u00c5landeilanden",
"rus": "\u0410\u043b\u0430\u043d\u0434\u0441\u043a\u0438\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Alandia"
},
"latlng": [60.116667, 19.9],
"demonym": "\u00c5landish",
"borders": [],
"area": 1580
},
{
"name": {
"common": "Albania",
"official": "Republic of Albania",
"native": {
"common": "Shqip\u00ebria",
"official": "Republika e Shqip\u00ebris\u00eb"
}
},
"tld": [".al"],
"cca2": "AL",
"ccn3": "008",
"cca3": "ALB",
"currency": ["ALL"],
"callingCode": ["355"],
"capital": "Tirana",
"altSpellings": [
"AL",
"Shqip\u00ebri",
"Shqip\u00ebria",
"Shqipnia"
],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "sqi",
"languages": {
"sqi": "Albanian"
},
"translations": {
"cym": "Albania",
"deu": "Albanien",
"fra": "Albanie",
"hrv": "Albanija",
"ita": "Albania",
"jpn": "\u30a2\u30eb\u30d0\u30cb\u30a2",
"nld": "Albani\u00eb",
"rus": "\u0410\u043b\u0431\u0430\u043d\u0438\u044f",
"spa": "Albania"
},
"latlng": [41, 20],
"demonym": "Albanian",
"borders": ["MNE", "GRC", "MKD", "KOS"],
"area": 28748
},
{
"name": {
"common": "Algeria",
"official": "People's Democratic Republic of Algeria",
"native": {
"common": "\u0627\u0644\u062c\u0632\u0627\u0626\u0631",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u062f\u064a\u0645\u0642\u0631\u0627\u0637\u064a\u0629 \u0627\u0644\u0634\u0639\u0628\u064a\u0629 \u0627\u0644\u062c\u0632\u0627\u0626\u0631\u064a\u0629"
}
},
"tld": [".dz", "\u0627\u0644\u062c\u0632\u0627\u0626\u0631."],
"cca2": "DZ",
"ccn3": "012",
"cca3": "DZA",
"currency": ["DZD"],
"callingCode": ["213"],
"capital": "Algiers",
"altSpellings": ["DZ", "Dzayer", "Alg\u00e9rie"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"cym": "Algeria",
"deu": "Algerien",
"fra": "Alg\u00e9rie",
"hrv": "Al\u017eir",
"ita": "Algeria",
"jpn": "\u30a2\u30eb\u30b8\u30a7\u30ea\u30a2",
"nld": "Algerije",
"rus": "\u0410\u043b\u0436\u0438\u0440",
"spa": "Argelia"
},
"latlng": [28, 3],
"demonym": "Algerian",
"borders": ["TUN", "LBY", "NER", "ESH", "MRT", "MLI", "MAR"],
"area": 2381741
},
{
"name": {
"common": "American Samoa",
"official": "American Samoa",
"native": {
"common": "American Samoa",
"official": "American Samoa"
}
},
"tld": [".as"],
"cca2": "AS",
"ccn3": "016",
"cca3": "ASM",
"currency": ["USD"],
"callingCode": ["1684"],
"capital": "Pago Pago",
"altSpellings": ["AS", "Amerika S\u0101moa", "Amelika S\u0101moa", "S\u0101moa Amelika"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"smo": "Samoan"
},
"translations": {
"deu": "Amerikanisch-Samoa",
"fra": "Samoa am\u00e9ricaines",
"hrv": "Ameri\u010dka Samoa",
"ita": "Samoa Americane",
"jpn": "\u30a2\u30e1\u30ea\u30ab\u9818\u30b5\u30e2\u30a2",
"nld": "Amerikaans Samoa",
"rus": "\u0410\u043c\u0435\u0440\u0438\u043a\u0430\u043d\u0441\u043a\u043e\u0435 \u0421\u0430\u043c\u043e\u0430",
"spa": "Samoa Americana"
},
"latlng": [-14.33333333, -170],
"demonym": "American Samoan",
"borders": [],
"area": 199
},
{
"name": {
"common": "Andorra",
"official": "Principality of Andorra",
"native": {
"common": "Andorra",
"official": "Principat d'Andorra"
}
},
"tld": [".ad"],
"cca2": "AD",
"ccn3": "020",
"cca3": "AND",
"currency": ["EUR"],
"callingCode": ["376"],
"capital": "Andorra la Vella",
"altSpellings": ["AD", "Principality of Andorra", "Principat d'Andorra"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "cat",
"languages": {
"cat": "Catalan"
},
"translations": {
"cym": "Andorra",
"deu": "Andorra",
"fra": "Andorre",
"hrv": "Andora",
"ita": "Andorra",
"jpn": "\u30a2\u30f3\u30c9\u30e9",
"nld": "Andorra",
"rus": "\u0410\u043d\u0434\u043e\u0440\u0440\u0430",
"spa": "Andorra"
},
"latlng": [42.5, 1.5],
"demonym": "Andorran",
"borders": ["FRA", "ESP"],
"area": 468
},
{
"name": {
"common": "Angola",
"official": "Republic of Angola",
"native": {
"common": "Angola",
"official": "Rep\u00fablica de Angola"
}
},
"tld": [".ao"],
"cca2": "AO",
"ccn3": "024",
"cca3": "AGO",
"currency": ["AOA"],
"callingCode": ["244"],
"capital": "Luanda",
"altSpellings": ["AO", "Rep\u00fablica de Angola", "\u0281\u025bpublika de an'\u0261\u0254la"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"cym": "Angola",
"deu": "Angola",
"fra": "Angola",
"hrv": "Angola",
"ita": "Angola",
"jpn": "\u30a2\u30f3\u30b4\u30e9",
"nld": "Angola",
"rus": "\u0410\u043d\u0433\u043e\u043b\u0430",
"spa": "Angola"
},
"latlng": [-12.5, 18.5],
"demonym": "Angolan",
"borders": ["COG", "COD", "ZMB", "NAM"],
"area": 1246700
},
{
"name": {
"common": "Anguilla",
"official": "Anguilla",
"native": {
"common": "Anguilla",
"official": "Anguilla"
}
},
"tld": [".ai"],
"cca2": "AI",
"ccn3": "660",
"cca3": "AIA",
"currency": ["XCD"],
"callingCode": ["1264"],
"capital": "The Valley",
"altSpellings": ["AI"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Anguilla",
"fra": "Anguilla",
"hrv": "Angvila",
"ita": "Anguilla",
"jpn": "\u30a2\u30f3\u30ae\u30e9",
"nld": "Anguilla",
"rus": "\u0410\u043d\u0433\u0438\u043b\u044c\u044f",
"spa": "Anguilla"
},
"latlng": [18.25, -63.16666666],
"demonym": "Anguillian",
"borders": [],
"area": 91
},
{
"name": {
"common": "Antarctica",
"official": "Antarctica",
"native": {
"common": "",
"official": ""
}
},
"tld": [".aq"],
"cca2": "AQ",
"ccn3": "010",
"cca3": "ATA",
"currency": [],
"callingCode": [],
"capital": "",
"altSpellings": ["AQ"],
"relevance": "0",
"region": "",
"subregion": "",
"nativeLanguage": "",
"languages": {},
"translations": {
"cym": "Antarctica",
"deu": "Antarktis",
"fra": "Antarctique",
"hrv": "Antarktika",
"ita": "Antartide",
"jpn": "\u5357\u6975",
"nld": "Antarctica",
"rus": "\u0410\u043d\u0442\u0430\u0440\u043a\u0442\u0438\u0434\u0430",
"spa": "Ant\u00e1rtida"
},
"latlng": [-90, 0],
"demonym": "Antarctican",
"borders": [],
"area": 14000000
},
{
"name": {
"common": "Antigua and Barbuda",
"official": "Antigua and Barbuda",
"native": {
"common": "Antigua and Barbuda",
"official": "Antigua and Barbuda"
}
},
"tld": [".ag"],
"cca2": "AG",
"ccn3": "028",
"cca3": "ATG",
"currency": ["XCD"],
"callingCode": ["1268"],
"capital": "Saint John's",
"altSpellings": ["AG"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Antigwa a Barbiwda",
"deu": "Antigua und Barbuda",
"fra": "Antigua-et-Barbuda",
"hrv": "Antigva i Barbuda",
"ita": "Antigua e Barbuda",
"jpn": "\u30a2\u30f3\u30c6\u30a3\u30b0\u30a2\u30fb\u30d0\u30fc\u30d6\u30fc\u30c0",
"nld": "Antigua en Barbuda",
"rus": "\u0410\u043d\u0442\u0438\u0433\u0443\u0430 \u0438 \u0411\u0430\u0440\u0431\u0443\u0434\u0430",
"spa": "Antigua y Barbuda"
},
"latlng": [17.05, -61.8],
"demonym": "Antiguan, Barbudan",
"borders": [],
"area": 442
},
{
"name": {
"common": "Argentina",
"official": "Argentine Republic",
"native": {
"common": "Argentina",
"official": "Rep\u00fablica Argentina"
}
},
"tld": [".ar"],
"cca2": "AR",
"ccn3": "032",
"cca3": "ARG",
"currency": ["ARS"],
"callingCode": ["54"],
"capital": "Buenos Aires",
"altSpellings": ["AR", "Argentine Republic", "Rep\u00fablica Argentina"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"grn": "Guaran\u00ed",
"spa": "Spanish"
},
"translations": {
"cym": "Ariannin",
"deu": "Argentinien",
"fra": "Argentine",
"hrv": "Argentina",
"ita": "Argentina",
"jpn": "\u30a2\u30eb\u30bc\u30f3\u30c1\u30f3",
"nld": "Argentini\u00eb",
"rus": "\u0410\u0440\u0433\u0435\u043d\u0442\u0438\u043d\u0430",
"spa": "Argentina"
},
"latlng": [-34, -64],
"demonym": "Argentinean",
"borders": ["BOL", "BRA", "CHL", "PRY", "URY"],
"area": 2780400
},
{
"name": {
"common": "Armenia",
"official": "Republic of Armenia",
"native": {
"common": "\u0540\u0561\u0575\u0561\u057d\u057f\u0561\u0576",
"official": "\u0540\u0561\u0575\u0561\u057d\u057f\u0561\u0576\u056b \u0540\u0561\u0576\u0580\u0561\u057a\u0565\u057f\u0578\u0582\u0569\u0575\u0578\u0582\u0576"
}
},
"tld": [".am"],
"cca2": "AM",
"ccn3": "051",
"cca3": "ARM",
"currency": ["AMD"],
"callingCode": ["374"],
"capital": "Yerevan",
"altSpellings": ["AM", "Hayastan", "Republic of Armenia", "\u0540\u0561\u0575\u0561\u057d\u057f\u0561\u0576\u056b \u0540\u0561\u0576\u0580\u0561\u057a\u0565\u057f\u0578\u0582\u0569\u0575\u0578\u0582\u0576"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "hye",
"languages": {
"hye": "Armenian",
"rus": "Russian"
},
"translations": {
"cym": "Armenia",
"deu": "Armenien",
"fra": "Arm\u00e9nie",
"hrv": "Armenija",
"ita": "Armenia",
"jpn": "\u30a2\u30eb\u30e1\u30cb\u30a2",
"nld": "Armeni\u00eb",
"rus": "\u0410\u0440\u043c\u0435\u043d\u0438\u044f",
"spa": "Armenia"
},
"latlng": [40, 45],
"demonym": "Armenian",
"borders": ["AZE", "GEO", "IRN", "TUR"],
"area": 29743
},
{
"name": {
"common": "Aruba",
"official": "Aruba",
"native": {
"common": "Aruba",
"official": "Aruba"
}
},
"tld": [".aw"],
"cca2": "AW",
"ccn3": "533",
"cca3": "ABW",
"currency": ["AWG"],
"callingCode": ["297"],
"capital": "Oranjestad",
"altSpellings": ["AW"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "nld",
"languages": {
"nld": "Dutch",
"pap": "Papiamento"
},
"translations": {
"deu": "Aruba",
"fra": "Aruba",
"hrv": "Aruba",
"ita": "Aruba",
"jpn": "\u30a2\u30eb\u30d0",
"nld": "Aruba",
"rus": "\u0410\u0440\u0443\u0431\u0430",
"spa": "Aruba"
},
"latlng": [12.5, -69.96666666],
"demonym": "Aruban",
"borders": [],
"area": 180
},
{
"name": {
"common": "Australia",
"official": "Commonwealth of Australia",
"native": {
"common": "Australia",
"official": "Commonwealth of Australia"
}
},
"tld": [".au"],
"cca2": "AU",
"ccn3": "036",
"cca3": "AUS",
"currency": ["AUD"],
"callingCode": ["61"],
"capital": "Canberra",
"altSpellings": ["AU"],
"relevance": "1.5",
"region": "Oceania",
"subregion": "Australia and New Zealand",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Awstralia",
"deu": "Australien",
"fra": "Australie",
"hrv": "Australija",
"ita": "Australia",
"jpn": "\u30aa\u30fc\u30b9\u30c8\u30e9\u30ea\u30a2",
"nld": "Australi\u00eb",
"rus": "\u0410\u0432\u0441\u0442\u0440\u0430\u043b\u0438\u044f",
"spa": "Australia"
},
"latlng": [-27, 133],
"demonym": "Australian",
"borders": [],
"area": 7692024
},
{
"name": {
"common": "Austria",
"official": "Republic of Austria",
"native": {
"common": "\u00d6sterreich",
"official": "Republik \u00d6sterreich"
}
},
"tld": [".at"],
"cca2": "AT",
"ccn3": "040",
"cca3": "AUT",
"currency": ["EUR"],
"callingCode": ["43"],
"capital": "Vienna",
"altSpellings": ["AT", "\u00d6sterreich", "Osterreich", "Oesterreich"],
"relevance": "0",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "deu",
"languages": {
"deu": "German"
},
"translations": {
"cym": "Awstria",
"deu": "\u00d6sterreich",
"fra": "Autriche",
"hrv": "Austrija",
"ita": "Austria",
"jpn": "\u30aa\u30fc\u30b9\u30c8\u30ea\u30a2",
"nld": "Oostenrijk",
"rus": "\u0410\u0432\u0441\u0442\u0440\u0438\u044f",
"spa": "Austria"
},
"latlng": [47.33333333, 13.33333333],
"demonym": "Austrian",
"borders": ["CZE", "DEU", "HUN", "ITA", "LIE", "SVK", "SVN", "CHE"],
"area": 83871
},
{
"name": {
"common": "Azerbaijan",
"official": "Republic of Azerbaijan",
"native": {
"common": "Az\u0259rbaycan",
"official": "Az\u0259rbaycan Respublikas\u0131"
}
},
"tld": [".az"],
"cca2": "AZ",
"ccn3": "031",
"cca3": "AZE",
"currency": ["AZN"],
"callingCode": ["994"],
"capital": "Baku",
"altSpellings": ["AZ", "Republic of Azerbaijan", "Az\u0259rbaycan Respublikas\u0131"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "aze",
"languages": {
"aze": "Azerbaijani",
"hye": "Armenian"
},
"translations": {
"cym": "Aserbaijan",
"deu": "Aserbaidschan",
"fra": "Azerba\u00efdjan",
"hrv": "Azerbajd\u017ean",
"ita": "Azerbaijan",
"jpn": "\u30a2\u30bc\u30eb\u30d0\u30a4\u30b8\u30e3\u30f3",
"nld": "Azerbeidzjan",
"rus": "\u0410\u0437\u0435\u0440\u0431\u0430\u0439\u0434\u0436\u0430\u043d",
"spa": "Azerbaiy\u00e1n"
},
"latlng": [40.5, 47.5],
"demonym": "Azerbaijani",
"borders": ["ARM", "GEO", "IRN", "RUS", "TUR"],
"area": 86600
},
{
"name": {
"common": "Bahamas",
"official": "Commonwealth of the Bahamas",
"native": {
"common": "Bahamas",
"official": "Commonwealth of the Bahamas"
}
},
"tld": [".bs"],
"cca2": "BS",
"ccn3": "044",
"cca3": "BHS",
"currency": ["BSD"],
"callingCode": ["1242"],
"capital": "Nassau",
"altSpellings": ["BS", "Commonwealth of the Bahamas"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Bahamas",
"deu": "Bahamas",
"fra": "Bahamas",
"hrv": "Bahami",
"ita": "Bahamas",
"jpn": "\u30d0\u30cf\u30de",
"nld": "Bahama\u2019s",
"rus": "\u0411\u0430\u0433\u0430\u043c\u0441\u043a\u0438\u0435 \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Bahamas"
},
"latlng": [24.25, -76],
"demonym": "Bahamian",
"borders": [],
"area": 13943
},
{
"name": {
"common": "Bahrain",
"official": "Kingdom of Bahrain",
"native": {
"common": "\u200f\u0627\u0644\u0628\u062d\u0631\u064a\u0646",
"official": "\u0645\u0645\u0644\u0643\u0629 \u0627\u0644\u0628\u062d\u0631\u064a\u0646"
}
},
"tld": [".bh"],
"cca2": "BH",
"ccn3": "048",
"cca3": "BHR",
"currency": ["BHD"],
"callingCode": ["973"],
"capital": "Manama",
"altSpellings": ["BH", "Kingdom of Bahrain", "Mamlakat al-Ba\u1e25rayn"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"cym": "Bahrain",
"deu": "Bahrain",
"fra": "Bahre\u00efn",
"hrv": "Bahrein",
"ita": "Bahrein",
"jpn": "\u30d0\u30fc\u30ec\u30fc\u30f3",
"nld": "Bahrein",
"rus": "\u0411\u0430\u0445\u0440\u0435\u0439\u043d",
"spa": "Bahrein"
},
"latlng": [26, 50.55],
"demonym": "Bahraini",
"borders": [],
"area": 765
},
{
"name": {
"common": "Bangladesh",
"official": "People's Republic of Bangladesh",
"native": {
"common": "Bangladesh",
"official": "\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6 \u0997\u09a3\u09aa\u09cd\u09b0\u099c\u09be\u09a4\u09a8\u09cd\u09a4\u09cd\u09b0\u09c0"
}
},
"tld": [".bd"],
"cca2": "BD",
"ccn3": "050",
"cca3": "BGD",
"currency": ["BDT"],
"callingCode": ["880"],
"capital": "Dhaka",
"altSpellings": ["BD", "People's Republic of Bangladesh", "G\u00f4n\u00f4pr\u00f4jat\u00f4ntri Bangladesh"],
"relevance": "2",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "ben",
"languages": {
"ben": "Bengali"
},
"translations": {
"cym": "Bangladesh",
"deu": "Bangladesch",
"fra": "Bangladesh",
"hrv": "Banglade\u0161",
"ita": "Bangladesh",
"jpn": "\u30d0\u30f3\u30b0\u30e9\u30c7\u30b7\u30e5",
"nld": "Bangladesh",
"rus": "\u0411\u0430\u043d\u0433\u043b\u0430\u0434\u0435\u0448",
"spa": "Bangladesh"
},
"latlng": [24, 90],
"demonym": "Bangladeshi",
"borders": ["MMR", "IND"],
"area": 147570
},
{
"name": {
"common": "Barbados",
"official": "Barbados",
"native": {
"common": "Barbados",
"official": "Barbados"
}
},
"tld": [".bb"],
"cca2": "BB",
"ccn3": "052",
"cca3": "BRB",
"currency": ["BBD"],
"callingCode": ["1246"],
"capital": "Bridgetown",
"altSpellings": ["BB"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Barbados",
"deu": "Barbados",
"fra": "Barbade",
"hrv": "Barbados",
"ita": "Barbados",
"jpn": "\u30d0\u30eb\u30d0\u30c9\u30b9",
"nld": "Barbados",
"rus": "\u0411\u0430\u0440\u0431\u0430\u0434\u043e\u0441",
"spa": "Barbados"
},
"latlng": [13.16666666, -59.53333333],
"demonym": "Barbadian",
"borders": [],
"area": 430
},
{
"name": {
"common": "Belarus",
"official": "Republic of Belarus",
"native": {
"common": "\u0411\u0435\u043b\u0430\u0440\u0443\u0301\u0441\u044c",
"official": "\u0420\u044d\u0441\u043f\u0443\u0431\u043b\u0456\u043a\u0430 \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u044c"
}
},
"tld": [".by"],
"cca2": "BY",
"ccn3": "112",
"cca3": "BLR",
"currency": ["BYR"],
"callingCode": ["375"],
"capital": "Minsk",
"altSpellings": ["BY", "Bielaru\u015b", "Republic of Belarus", "\u0411\u0435\u043b\u043e\u0440\u0443\u0441\u0441\u0438\u044f", "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u044c", "Belorussiya", "Respublika Belarus\u2019"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "bel",
"languages": {
"bel": "Belarusian",
"rus": "Russian"
},
"translations": {
"cym": "Belarws",
"deu": "Wei\u00dfrussland",
"fra": "Bi\u00e9lorussie",
"hrv": "Bjelorusija",
"ita": "Bielorussia",
"jpn": "\u30d9\u30e9\u30eb\u30fc\u30b7",
"nld": "Wit-Rusland",
"rus": "\u0411\u0435\u043b\u043e\u0440\u0443\u0441\u0441\u0438\u044f",
"spa": "Bielorrusia"
},
"latlng": [53, 28],
"demonym": "Belarusian",
"borders": ["LVA", "LTU", "POL", "RUS", "UKR"],
"area": 207600
},
{
"name": {
"common": "Belgium",
"official": "Kingdom of Belgium",
"native": {
"common": "Belgi\u00eb",
"official": "Koninkrijk Belgi\u00eb"
}
},
"tld": [".be"],
"cca2": "BE",
"ccn3": "056",
"cca3": "BEL",
"currency": ["EUR"],
"callingCode": ["32"],
"capital": "Brussels",
"altSpellings": ["BE", "Belgi\u00eb", "Belgie", "Belgien", "Belgique", "Kingdom of Belgium", "Koninkrijk Belgi\u00eb", "Royaume de Belgique", "K\u00f6nigreich Belgien"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "nld",
"languages": {
"deu": "German",
"fra": "French",
"nld": "Dutch"
},
"translations": {
"cym": "Gwlad Belg",
"deu": "Belgien",
"fra": "Belgique",
"hrv": "Belgija",
"ita": "Belgio",
"jpn": "\u30d9\u30eb\u30ae\u30fc",
"nld": "Belgi\u00eb",
"rus": "\u0411\u0435\u043b\u044c\u0433\u0438\u044f",
"spa": "B\u00e9lgica"
},
"latlng": [50.83333333, 4],
"demonym": "Belgian",
"borders": ["FRA", "DEU", "LUX", "NLD"],
"area": 30528
},
{
"name": {
"common": "Belize",
"official": "Belize",
"native": {
"common": "Belize",
"official": "Belize"
}
},
"tld": [".bz"],
"cca2": "BZ",
"ccn3": "084",
"cca3": "BLZ",
"currency": ["BZD"],
"callingCode": ["501"],
"capital": "Belmopan",
"altSpellings": ["BZ"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"spa": "Spanish"
},
"translations": {
"cym": "Belize",
"deu": "Belize",
"fra": "Belize",
"hrv": "Belize",
"ita": "Belize",
"jpn": "\u30d9\u30ea\u30fc\u30ba",
"nld": "Belize",
"rus": "\u0411\u0435\u043b\u0438\u0437",
"spa": "Belice"
},
"latlng": [17.25, -88.75],
"demonym": "Belizean",
"borders": ["GTM", "MEX"],
"area": 22966
},
{
"name": {
"common": "Benin",
"official": "Republic of Benin",
"native": {
"common": "B\u00e9nin",
"official": "R\u00e9publique du B\u00e9nin"
}
},
"tld": [".bj"],
"cca2": "BJ",
"ccn3": "204",
"cca3": "BEN",
"currency": ["XOF"],
"callingCode": ["229"],
"capital": "Porto-Novo",
"altSpellings": ["BJ", "Republic of Benin", "R\u00e9publique du B\u00e9nin"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"cym": "Benin",
"deu": "Benin",
"fra": "B\u00e9nin",
"hrv": "Benin",
"ita": "Benin",
"jpn": "\u30d9\u30ca\u30f3",
"nld": "Benin",
"rus": "\u0411\u0435\u043d\u0438\u043d",
"spa": "Ben\u00edn"
},
"latlng": [9.5, 2.25],
"demonym": "Beninese",
"borders": ["BFA", "NER", "NGA", "TGO"],
"area": 112622
},
{
"name": {
"common": "Bermuda",
"official": "Bermuda",
"native": {
"common": "Bermuda",
"official": "Bermuda"
}
},
"tld": [".bm"],
"cca2": "BM",
"ccn3": "060",
"cca3": "BMU",
"currency": ["BMD"],
"callingCode": ["1441"],
"capital": "Hamilton",
"altSpellings": ["BM", "The Islands of Bermuda", "The Bermudas", "Somers Isles"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Bermiwda",
"deu": "Bermuda",
"fra": "Bermudes",
"hrv": "Bermudi",
"ita": "Bermuda",
"jpn": "\u30d0\u30df\u30e5\u30fc\u30c0",
"nld": "Bermuda",
"rus": "\u0411\u0435\u0440\u043c\u0443\u0434\u0441\u043a\u0438\u0435 \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Bermudas"
},
"latlng": [32.33333333, -64.75],
"demonym": "Bermudian",
"borders": [],
"area": 54
},
{
"name": {
"common": "Bhutan",
"official": "Kingdom of Bhutan",
"native": {
"common": "\u0f60\u0f56\u0fb2\u0f74\u0f42\u0f0b\u0f61\u0f74\u0f63\u0f0b",
"official": "\u0f60\u0f56\u0fb2\u0f74\u0f42\u0f0b\u0f62\u0f92\u0fb1\u0f63\u0f0b\u0f41\u0f56\u0f0b"
}
},
"tld": [".bt"],
"cca2": "BT",
"ccn3": "064",
"cca3": "BTN",
"currency": ["BTN", "INR"],
"callingCode": ["975"],
"capital": "Thimphu",
"altSpellings": ["BT", "Kingdom of Bhutan"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "dzo",
"languages": {
"dzo": "Dzongkha"
},
"translations": {
"cym": "Bhwtan",
"deu": "Bhutan",
"fra": "Bhoutan",
"hrv": "Butan",
"ita": "Bhutan",
"jpn": "\u30d6\u30fc\u30bf\u30f3",
"nld": "Bhutan",
"rus": "\u0411\u0443\u0442\u0430\u043d",
"spa": "But\u00e1n"
},
"latlng": [27.5, 90.5],
"demonym": "Bhutanese",
"borders": ["CHN", "IND"],
"area": 38394
},
{
"name": {
"common": "Bolivia",
"official": "Plurinational State of Bolivia",
"native": {
"common": "Bolivia",
"official": "Estado Plurinacional de Bolivia"
}
},
"tld": [".bo"],
"cca2": "BO",
"ccn3": "068",
"cca3": "BOL",
"currency": ["BOB", "BOV"],
"callingCode": ["591"],
"capital": "Sucre",
"altSpellings": ["BO", "Buliwya", "Wuliwya", "Plurinational State of Bolivia", "Estado Plurinacional de Bolivia", "Buliwya Mamallaqta", "Wuliwya Suyu", "Tet\u00e3 Vol\u00edvia"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"aym": "Aymara",
"grn": "Guaran\u00ed",
"que": "Quechua",
"spa": "Spanish"
},
"translations": {
"cym": "Bolifia",
"deu": "Bolivien",
"fra": "Bolivie",
"hrv": "Bolivija",
"ita": "Bolivia",
"jpn": "\u30dc\u30ea\u30d3\u30a2\u591a\u6c11\u65cf\u56fd",
"nld": "Bolivia",
"rus": "\u0411\u043e\u043b\u0438\u0432\u0438\u044f",
"spa": "Bolivia"
},
"latlng": [-17, -65],
"demonym": "Bolivian",
"borders": ["ARG", "BRA", "CHL", "PRY", "PER"],
"area": 1098581
},
{
"name": {
"common": "Bonaire",
"official": "Bonaire",
"native": {
"common": "Bonaire",
"official": "Bonaire"
}
},
"tld": [".an", ".nl"],
"cca2": "BQ",
"ccn3": "535",
"cca3": "BES",
"currency": ["USD"],
"callingCode": ["5997"],
"capital": "Kralendijk",
"altSpellings": ["BQ", "Boneiru"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "nld",
"languages": {
"nld": "Dutch"
},
"translations": {
"rus": "\u0411\u043e\u043d\u044d\u0439\u0440"
},
"latlng": [12.15, -68.266667],
"demonym": "Dutch",
"borders": [],
"area": 294
},
{
"name": {
"common": "Bosnia and Herzegovina",
"official": "Bosnia and Herzegovina",
"native": {
"common": "Bosna i Hercegovina",
"official": "Bosna i Hercegovina"
}
},
"tld": [".ba"],
"cca2": "BA",
"ccn3": "070",
"cca3": "BIH",
"currency": ["BAM"],
"callingCode": ["387"],
"capital": "Sarajevo",
"altSpellings": ["BA", "Bosnia-Herzegovina", "\u0411\u043e\u0441\u043d\u0430 \u0438 \u0425\u0435\u0440\u0446\u0435\u0433\u043e\u0432\u0438\u043d\u0430"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "bos",
"languages": {
"bos": "Bosnian",
"hrv": "Croatian",
"srp": "Serbian"
},
"translations": {
"cym": "Bosnia a Hercegovina",
"deu": "Bosnien und Herzegowina",
"fra": "Bosnie-Herz\u00e9govine",
"hrv": "Bosna i Hercegovina",
"ita": "Bosnia ed Erzegovina",
"jpn": "\u30dc\u30b9\u30cb\u30a2\u30fb\u30d8\u30eb\u30c4\u30a7\u30b4\u30d3\u30ca",
"nld": "Bosni\u00eb en Herzegovina",
"rus": "\u0411\u043e\u0441\u043d\u0438\u044f \u0438 \u0413\u0435\u0440\u0446\u0435\u0433\u043e\u0432\u0438\u043d\u0430",
"spa": "Bosnia y Herzegovina"
},
"latlng": [44, 18],
"demonym": "Bosnian, Herzegovinian",
"borders": ["HRV", "MNE", "SRB"],
"area": 51209
},
{
"name": {
"common": "Botswana",
"official": "Republic of Botswana",
"native": {
"common": "Botswana",
"official": "Republic of Botswana"
}
},
"tld": [".bw"],
"cca2": "BW",
"ccn3": "072",
"cca3": "BWA",
"currency": ["BWP"],
"callingCode": ["267"],
"capital": "Gaborone",
"altSpellings": ["BW", "Republic of Botswana", "Lefatshe la Botswana"],
"relevance": "0",
"region": "Africa",
"subregion": "Southern Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"tsn": "Tswana"
},
"translations": {
"deu": "Botswana",
"fra": "Botswana",
"hrv": "Bocvana",
"ita": "Botswana",
"jpn": "\u30dc\u30c4\u30ef\u30ca",
"nld": "Botswana",
"rus": "\u0411\u043e\u0442\u0441\u0432\u0430\u043d\u0430",
"spa": "Botswana"
},
"latlng": [-22, 24],
"demonym": "Motswana",
"borders": ["NAM", "ZAF", "ZMB", "ZWE"],
"area": 582000
},
{
"name": {
"common": "Bouvet Island",
"official": "Bouvet Island",
"native": {
"common": "Bouvet\u00f8ya",
"official": "Bouvet\u00f8ya"
}
},
"tld": [".bv"],
"cca2": "BV",
"ccn3": "074",
"cca3": "BVT",
"currency": ["NOK"],
"callingCode": [],
"capital": "",
"altSpellings": ["BV", "Bouvet\u00f8ya", "Bouvet-\u00f8ya"],
"relevance": "0",
"region": "",
"subregion": "",
"nativeLanguage": "nor",
"languages": {
"nor": "Norwegian"
},
"translations": {
"deu": "Bouvetinsel",
"fra": "\u00cele Bouvet",
"hrv": "Otok Bouvet",
"ita": "Isola Bouvet",
"jpn": "\u30d6\u30fc\u30d9\u5cf6",
"nld": "Bouveteiland",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432 \u0411\u0443\u0432\u0435",
"spa": "Isla Bouvet"
},
"latlng": [-54.43333333, 3.4],
"demonym": "",
"borders": [],
"area": 49
},
{
"name": {
"common": "Brazil",
"official": "Federative Republic of Brazil",
"native": {
"common": "Brasil",
"official": "Rep\u00fablica Federativa do Brasil"
}
},
"tld": [".br"],
"cca2": "BR",
"ccn3": "076",
"cca3": "BRA",
"currency": ["BRL"],
"callingCode": ["55"],
"capital": "Bras\u00edlia",
"altSpellings": ["BR", "Brasil", "Federative Republic of Brazil", "Rep\u00fablica Federativa do Brasil"],
"relevance": "2",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"cym": "Brasil",
"deu": "Brasilien",
"fra": "Br\u00e9sil",
"hrv": "Brazil",
"ita": "Brasile",
"jpn": "\u30d6\u30e9\u30b8\u30eb",
"nld": "Brazili\u00eb",
"rus": "\u0411\u0440\u0430\u0437\u0438\u043b\u0438\u044f",
"spa": "Brasil"
},
"latlng": [-10, -55],
"demonym": "Brazilian",
"borders": ["ARG", "BOL", "COL", "GUF", "GUY", "PRY", "PER", "SUR", "URY", "VEN"],
"area": 8515767
},
{
"name": {
"common": "British Indian Ocean Territory",
"official": "British Indian Ocean Territory",
"native": {
"common": "British Indian Ocean Territory",
"official": "British Indian Ocean Territory"
}
},
"tld": [".io"],
"cca2": "IO",
"ccn3": "086",
"cca3": "IOT",
"currency": ["USD"],
"callingCode": ["246"],
"capital": "Diego Garcia",
"altSpellings": ["IO"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Tiriogaeth Brydeinig Cefnfor India",
"deu": "Britisches Territorium im Indischen Ozean",
"fra": "Territoire britannique de l'oc\u00e9an Indien",
"hrv": "Britanski Indijskooceanski teritorij",
"ita": "Territorio britannico dell'oceano indiano",
"jpn": "\u30a4\u30ae\u30ea\u30b9\u9818\u30a4\u30f3\u30c9\u6d0b\u5730\u57df",
"nld": "Britse Gebieden in de Indische Oceaan",
"rus": "\u0411\u0440\u0438\u0442\u0430\u043d\u0441\u043a\u0430\u044f \u0442\u0435\u0440\u0440\u0438\u0442\u043e\u0440\u0438\u044f \u0432 \u0418\u043d\u0434\u0438\u0439\u0441\u043a\u043e\u043c \u043e\u043a\u0435\u0430\u043d\u0435",
"spa": "Territorio Brit\u00e1nico del Oc\u00e9ano \u00cdndico"
},
"latlng": [-6, 71.5],
"demonym": "Indian",
"borders": [],
"area": 60
},
{
"name": {
"common": "British Virgin Islands",
"official": "Virgin Islands",
"native": {
"common": "British Virgin Islands",
"official": "Virgin Islands"
}
},
"tld": [".vg"],
"cca2": "VG",
"ccn3": "092",
"cca3": "VGB",
"currency": ["USD"],
"callingCode": ["1284"],
"capital": "Road Town",
"altSpellings": ["VG"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Britische Jungferninseln",
"fra": "\u00celes Vierges britanniques",
"hrv": "Britanski Djevi\u010danski Otoci",
"ita": "Isole Vergini Britanniche",
"jpn": "\u30a4\u30ae\u30ea\u30b9\u9818\u30f4\u30a1\u30fc\u30b8\u30f3\u8af8\u5cf6",
"nld": "Britse Maagdeneilanden",
"rus": "\u0411\u0440\u0438\u0442\u0430\u043d\u0441\u043a\u0438\u0435 \u0412\u0438\u0440\u0433\u0438\u043d\u0441\u043a\u0438\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas V\u00edrgenes del Reino Unido"
},
"latlng": [18.431383, -64.62305],
"demonym": "Virgin Islander",
"borders": [],
"area": 151
},
{
"name": {
"common": "Brunei",
"official": "Nation of Brunei, Abode of Peace",
"native": {
"common": "Negara Brunei Darussalam",
"official": "Nation of Brunei, Abode Damai"
}
},
"tld": [".bn"],
"cca2": "BN",
"ccn3": "096",
"cca3": "BRN",
"currency": ["BND"],
"callingCode": ["673"],
"capital": "Bandar Seri Begawan",
"altSpellings": ["BN", "Nation of Brunei", " the Abode of Peace"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "msa",
"languages": {
"msa": "Malay"
},
"translations": {
"cym": "Brunei",
"deu": "Brunei",
"fra": "Brunei",
"hrv": "Brunej",
"ita": "Brunei",
"jpn": "\u30d6\u30eb\u30cd\u30a4\u30fb\u30c0\u30eb\u30b5\u30e9\u30fc\u30e0",
"nld": "Brunei",
"rus": "\u0411\u0440\u0443\u043d\u0435\u0439",
"spa": "Brunei"
},
"latlng": [4.5, 114.66666666],
"demonym": "Bruneian",
"borders": ["MYS"],
"area": 5765
},
{
"name": {
"common": "Bulgaria",
"official": "Republic of Bulgaria",
"native": {
"common": "\u0411\u044a\u043b\u0433\u0430\u0440\u0438\u044f",
"official": "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0411\u044a\u043b\u0433\u0430\u0440\u0438\u044f"
}
},
"tld": [".bg"],
"cca2": "BG",
"ccn3": "100",
"cca3": "BGR",
"currency": ["BGN"],
"callingCode": ["359"],
"capital": "Sofia",
"altSpellings": ["BG", "Republic of Bulgaria", "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0411\u044a\u043b\u0433\u0430\u0440\u0438\u044f"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "bul",
"languages": {
"bul": "Bulgarian"
},
"translations": {
"cym": "Bwlgaria",
"deu": "Bulgarien",
"fra": "Bulgarie",
"hrv": "Bugarska",
"ita": "Bulgaria",
"jpn": "\u30d6\u30eb\u30ac\u30ea\u30a2",
"nld": "Bulgarije",
"rus": "\u0411\u043e\u043b\u0433\u0430\u0440\u0438\u044f",
"spa": "Bulgaria"
},
"latlng": [43, 25],
"demonym": "Bulgarian",
"borders": ["GRC", "MKD", "ROU", "SRB", "TUR"],
"area": 110879
},
{
"name": {
"common": "Burkina Faso",
"official": "Burkina Faso",
"native": {
"common": "Burkina Faso",
"official": "Burkina Faso"
}
},
"tld": [".bf"],
"cca2": "BF",
"ccn3": "854",
"cca3": "BFA",
"currency": ["XOF"],
"callingCode": ["226"],
"capital": "Ouagadougou",
"altSpellings": ["BF"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"cym": "Burkina Faso",
"deu": "Burkina Faso",
"fra": "Burkina Faso",
"hrv": "Burkina Faso",
"ita": "Burkina Faso",
"jpn": "\u30d6\u30eb\u30ad\u30ca\u30d5\u30a1\u30bd",
"nld": "Burkina Faso",
"rus": "\u0411\u0443\u0440\u043a\u0438\u043d\u0430-\u0424\u0430\u0441\u043e",
"spa": "Burkina Faso"
},
"latlng": [13, -2],
"demonym": "Burkinabe",
"borders": ["BEN", "CIV", "GHA", "MLI", "NER", "TGO"],
"area": 272967
},
{
"name": {
"common": "Burundi",
"official": "Republic of Burundi",
"native": {
"common": "Burundi",
"official": "R\u00e9publique du Burundi"
}
},
"tld": [".bi"],
"cca2": "BI",
"ccn3": "108",
"cca3": "BDI",
"currency": ["BIF"],
"callingCode": ["257"],
"capital": "Bujumbura",
"altSpellings": ["BI", "Republic of Burundi", "Republika y'Uburundi", "R\u00e9publique du Burundi"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "run",
"languages": {
"fra": "French",
"run": "Kirundi"
},
"translations": {
"cym": "Bwrwndi",
"deu": "Burundi",
"fra": "Burundi",
"hrv": "Burundi",
"ita": "Burundi",
"jpn": "\u30d6\u30eb\u30f3\u30b8",
"nld": "Burundi",
"rus": "\u0411\u0443\u0440\u0443\u043d\u0434\u0438",
"spa": "Burundi"
},
"latlng": [-3.5, 30],
"demonym": "Burundian",
"borders": ["COD", "RWA", "TZA"],
"area": 27834
},
{
"name": {
"common": "Cambodia",
"official": "Kingdom of Cambodia",
"native": {
"common": "K\u00e2mp\u016dch\u00e9a",
"official": "\u1796\u17d2\u179a\u17c7\u179a\u17b6\u1787\u17b6\u178e\u17b6\u1785\u1780\u17d2\u179a\u1780\u1798\u17d2\u1796\u17bb\u1787\u17b6"
}
},
"tld": [".kh"],
"cca2": "KH",
"ccn3": "116",
"cca3": "KHM",
"currency": ["KHR"],
"callingCode": ["855"],
"capital": "Phnom Penh",
"altSpellings": ["KH", "Kingdom of Cambodia"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "khm",
"languages": {
"khm": "Khmer"
},
"translations": {
"cym": "Cambodia",
"deu": "Kambodscha",
"fra": "Cambodge",
"hrv": "Kambod\u017ea",
"ita": "Cambogia",
"jpn": "\u30ab\u30f3\u30dc\u30b8\u30a2",
"nld": "Cambodja",
"rus": "\u041a\u0430\u043c\u0431\u043e\u0434\u0436\u0430",
"spa": "Camboya"
},
"latlng": [13, 105],
"demonym": "Cambodian",
"borders": ["LAO", "THA", "VNM"],
"area": 181035
},
{
"name": {
"common": "Cameroon",
"official": "Republic of Cameroon",
"native": {
"common": "Cameroun",
"official": "R\u00e9publique du Cameroun"
}
},
"tld": [".cm"],
"cca2": "CM",
"ccn3": "120",
"cca3": "CMR",
"currency": ["XAF"],
"callingCode": ["237"],
"capital": "Yaound\u00e9",
"altSpellings": ["CM", "Republic of Cameroon", "R\u00e9publique du Cameroun"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "fra",
"languages": {
"eng": "English",
"fra": "French"
},
"translations": {
"cym": "Camer\u0175n",
"deu": "Kamerun",
"fra": "Cameroun",
"hrv": "Kamerun",
"ita": "Camerun",
"jpn": "\u30ab\u30e1\u30eb\u30fc\u30f3",
"nld": "Kameroen",
"rus": "\u041a\u0430\u043c\u0435\u0440\u0443\u043d",
"spa": "Camer\u00fan"
},
"latlng": [6, 12],
"demonym": "Cameroonian",
"borders": ["CAF", "TCD", "COG", "GNQ", "GAB", "NGA"],
"area": 475442
},
{
"name": {
"common": "Canada",
"official": "Canada",
"native": {
"common": "Canada",
"official": "Canada"
}
},
"tld": [".ca"],
"cca2": "CA",
"ccn3": "124",
"cca3": "CAN",
"currency": ["CAD"],
"callingCode": ["1"],
"capital": "Ottawa",
"altSpellings": ["CA"],
"relevance": "2",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"fra": "French"
},
"translations": {
"cym": "Canada",
"deu": "Kanada",
"fra": "Canada",
"hrv": "Kanada",
"ita": "Canada",
"jpn": "\u30ab\u30ca\u30c0",
"nld": "Canada",
"rus": "\u041a\u0430\u043d\u0430\u0434\u0430",
"spa": "Canad\u00e1"
},
"latlng": [60, -95],
"demonym": "Canadian",
"borders": ["USA"],
"area": 9984670
},
{
"name": {
"common": "Cape Verde",
"official": "Republic of Cabo Verde",
"native": {
"common": "Cabo Verde",
"official": "Rep\u00fablica de Cabo Verde"
}
},
"tld": [".cv"],
"cca2": "CV",
"ccn3": "132",
"cca3": "CPV",
"currency": ["CVE"],
"callingCode": ["238"],
"capital": "Praia",
"altSpellings": ["CV", "Republic of Cabo Verde", "Rep\u00fablica de Cabo Verde"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"cym": "Cape Verde",
"deu": "Kap Verde",
"fra": "Cap Vert",
"hrv": "Zelenortska Republika",
"ita": "Capo Verde",
"jpn": "\u30ab\u30fc\u30dc\u30d9\u30eb\u30c7",
"nld": "Kaapverdi\u00eb",
"rus": "\u041a\u0430\u0431\u043e-\u0412\u0435\u0440\u0434\u0435",
"spa": "Cabo Verde"
},
"latlng": [16, -24],
"demonym": "Cape Verdian",
"borders": [],
"area": 4033
},
{
"name": {
"common": "Cayman Islands",
"official": "Cayman Islands",
"native": {
"common": "Cayman Islands",
"official": "Cayman Islands"
}
},
"tld": [".ky"],
"cca2": "KY",
"ccn3": "136",
"cca3": "CYM",
"currency": ["KYD"],
"callingCode": ["1345"],
"capital": "George Town",
"altSpellings": ["KY"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Ynysoedd_Cayman",
"deu": "Kaimaninseln",
"fra": "\u00celes Ca\u00efmans",
"hrv": "Kajmanski otoci",
"ita": "Isole Cayman",
"jpn": "\u30b1\u30a4\u30de\u30f3\u8af8\u5cf6",
"nld": "Caymaneilanden",
"rus": "\u041a\u0430\u0439\u043c\u0430\u043d\u043e\u0432\u044b \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Caim\u00e1n"
},
"latlng": [19.5, -80.5],
"demonym": "Caymanian",
"borders": [],
"area": 264
},
{
"name": {
"common": "Central African Republic",
"official": "Central African Republic",
"native": {
"common": "B\u00eaafr\u00eeka",
"official": "K\u00f6d\u00f6r\u00f6s\u00ease t\u00ee B\u00eaafr\u00eeka"
}
},
"tld": [".cf"],
"cca2": "CF",
"ccn3": "140",
"cca3": "CAF",
"currency": ["XAF"],
"callingCode": ["236"],
"capital": "Bangui",
"altSpellings": ["CF", "Central African Republic", "R\u00e9publique centrafricaine"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "sag",
"languages": {
"fra": "French",
"sag": "Sango"
},
"translations": {
"cym": "Gweriniaeth Canolbarth Affrica",
"deu": "Zentralafrikanische Republik",
"fra": "R\u00e9publique centrafricaine",
"hrv": "Srednjoafri\u010dka Republika",
"ita": "Repubblica Centrafricana",
"jpn": "\u4e2d\u592e\u30a2\u30d5\u30ea\u30ab\u5171\u548c\u56fd",
"nld": "Centraal-Afrikaanse Republiek",
"rus": "\u0426\u0435\u043d\u0442\u0440\u0430\u043b\u044c\u043d\u043e\u0430\u0444\u0440\u0438\u043a\u0430\u043d\u0441\u043a\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430",
"spa": "Rep\u00fablica Centroafricana"
},
"latlng": [7, 21],
"demonym": "Central African",
"borders": ["CMR", "TCD", "COD", "COG", "SSD", "SDN"],
"area": 622984
},
{
"name": {
"common": "Chad",
"official": "Republic of Chad",
"native": {
"common": "Tchad",
"official": "R\u00e9publique du Tchad"
}
},
"tld": [".td"],
"cca2": "TD",
"ccn3": "148",
"cca3": "TCD",
"currency": ["XAF"],
"callingCode": ["235"],
"capital": "N'Djamena",
"altSpellings": ["TD", "Tchad", "Republic of Chad", "R\u00e9publique du Tchad"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"fra": "French"
},
"translations": {
"cym": "Tsiad",
"deu": "Tschad",
"fra": "Tchad",
"hrv": "\u010cad",
"ita": "Ciad",
"jpn": "\u30c1\u30e3\u30c9",
"nld": "Tsjaad",
"rus": "\u0427\u0430\u0434",
"spa": "Chad"
},
"latlng": [15, 19],
"demonym": "Chadian",
"borders": ["CMR", "CAF", "LBY", "NER", "NGA", "SSD"],
"area": 1284000
},
{
"name": {
"common": "Chile",
"official": "Republic of Chile",
"native": {
"common": "Chile",
"official": "Rep\u00fablica de Chile"
}
},
"tld": [".cl"],
"cca2": "CL",
"ccn3": "152",
"cca3": "CHL",
"currency": ["CLF", "CLP"],
"callingCode": ["56"],
"capital": "Santiago",
"altSpellings": ["CL", "Republic of Chile", "Rep\u00fablica de Chile"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Chile",
"deu": "Chile",
"fra": "Chili",
"hrv": "\u010cile",
"ita": "Cile",
"jpn": "\u30c1\u30ea",
"nld": "Chili",
"rus": "\u0427\u0438\u043b\u0438",
"spa": "Chile"
},
"latlng": [-30, -71],
"demonym": "Chilean",
"borders": ["ARG", "BOL", "PER"],
"area": 756102
},
{
"name": {
"common": "China",
"official": "People's Republic of China",
"native": {
"common": "\u4e2d\u56fd",
"official": "\u4e2d\u534e\u4eba\u6c11\u5171\u548c\u56fd"
}
},
"tld": [".cn", ".\u4e2d\u56fd", ".\u4e2d\u570b", ".\u516c\u53f8", ".\u7f51\u7edc"],
"cca2": "CN",
"ccn3": "156",
"cca3": "CHN",
"currency": ["CNY"],
"callingCode": ["86"],
"capital": "Beijing",
"altSpellings": ["CN", "Zh\u014dnggu\u00f3", "Zhongguo", "Zhonghua", "People's Republic of China", "\u4e2d\u534e\u4eba\u6c11\u5171\u548c\u56fd", "Zh\u014dnghu\u00e1 R\u00e9nm\u00edn G\u00f2ngh\u00e9gu\u00f3"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "cmn",
"languages": {
"cmn": "Mandarin"
},
"translations": {
"cym": "Tsieina",
"deu": "China",
"fra": "Chine",
"hrv": "Kina",
"ita": "Cina",
"jpn": "\u4e2d\u56fd",
"nld": "China",
"rus": "\u041a\u0438\u0442\u0430\u0439",
"spa": "China"
},
"latlng": [35, 105],
"demonym": "Chinese",
"borders": ["AFG", "BTN", "MMR", "HKG", "IND", "KAZ", "PRK", "KGZ", "LAO", "MAC", "MNG", "PAK", "RUS", "TJK", "VNM"],
"area": 9706961
},
{
"name": {
"common": "Christmas Island",
"official": "Territory of Christmas Island",
"native": {
"common": "Christmas Island",
"official": "Territory of Christmas Island"
}
},
"tld": [".cx"],
"cca2": "CX",
"ccn3": "162",
"cca3": "CXR",
"currency": ["AUD"],
"callingCode": ["61"],
"capital": "Flying Fish Cove",
"altSpellings": ["CX", "Territory of Christmas Island"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Australia and New Zealand",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Ynys y Nadolig",
"deu": "Weihnachtsinsel",
"fra": "\u00cele Christmas",
"hrv": "Bo\u017ei\u0107ni otok",
"ita": "Isola di Natale",
"jpn": "\u30af\u30ea\u30b9\u30de\u30b9\u5cf6",
"nld": "Christmaseiland",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432 \u0420\u043e\u0436\u0434\u0435\u0441\u0442\u0432\u0430",
"spa": "Isla de Navidad"
},
"latlng": [-10.5, 105.66666666],
"demonym": "Christmas Island",
"borders": [],
"area": 135
},
{
"name": {
"common": "Cocos (Keeling) Islands",
"official": "Territory of the Cocos (Keeling) Islands",
"native": {
"common": "Cocos (Keeling) Islands",
"official": "Territory of the Cocos (Keeling) Islands"
}
},
"tld": [".cc"],
"cca2": "CC",
"ccn3": "166",
"cca3": "CCK",
"currency": ["AUD"],
"callingCode": ["61"],
"capital": "West Island",
"altSpellings": ["CC", "Territory of the Cocos (Keeling) Islands", "Keeling Islands"],
"relevance": "0",
"region": "Oceania",
"subregion": "Australia and New Zealand",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Ynysoedd Cocos",
"deu": "Kokosinseln",
"fra": "\u00celes Cocos",
"hrv": "Kokosovi Otoci",
"ita": "Isole Cocos e Keeling",
"jpn": "\u30b3\u30b3\u30b9\uff08\u30ad\u30fc\u30ea\u30f3\u30b0\uff09\u8af8\u5cf6",
"nld": "Cocoseilanden",
"rus": "\u041a\u043e\u043a\u043e\u0441\u043e\u0432\u044b\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Cocos o Islas Keeling"
},
"latlng": [-12.5, 96.83333333],
"demonym": "Cocos Islander",
"borders": [],
"area": 14
},
{
"name": {
"common": "Colombia",
"official": "Republic of Colombia",
"native": {
"common": "Colombia",
"official": "Rep\u00fablica de Colombia"
}
},
"tld": [".co"],
"cca2": "CO",
"ccn3": "170",
"cca3": "COL",
"currency": ["COP"],
"callingCode": ["57"],
"capital": "Bogot\u00e1",
"altSpellings": ["CO", "Republic of Colombia", "Rep\u00fablica de Colombia"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Colombia",
"deu": "Kolumbien",
"fra": "Colombie",
"hrv": "Kolumbija",
"ita": "Colombia",
"jpn": "\u30b3\u30ed\u30f3\u30d3\u30a2",
"nld": "Colombia",
"rus": "\u041a\u043e\u043b\u0443\u043c\u0431\u0438\u044f",
"spa": "Colombia"
},
"latlng": [4, -72],
"demonym": "Colombian",
"borders": ["BRA", "ECU", "PAN", "PER", "VEN"],
"area": 1141748
},
{
"name": {
"common": "Comoros",
"official": "Union of the Comoros",
"native": {
"common": "Komori",
"official": "Udzima wa Komori"
}
},
"tld": [".km"],
"cca2": "KM",
"ccn3": "174",
"cca3": "COM",
"currency": ["KMF"],
"callingCode": ["269"],
"capital": "Moroni",
"altSpellings": ["KM", "Union of the Comoros", "Union des Comores", "Udzima wa Komori", "al-Itti\u1e25\u0101d al-Qumur\u012b"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "zdj",
"languages": {
"ara": "Arabic",
"fra": "French",
"zdj": "Comorian"
},
"translations": {
"cym": "Comoros",
"deu": "Union der Komoren",
"fra": "Comores",
"hrv": "Komori",
"ita": "Comore",
"jpn": "\u30b3\u30e2\u30ed",
"nld": "Comoren",
"rus": "\u041a\u043e\u043c\u043e\u0440\u044b",
"spa": "Comoras"
},
"latlng": [-12.16666666, 44.25],
"demonym": "Comoran",
"borders": [],
"area": 1862
},
{
"name": {
"common": "Republic of the Congo",
"official": "Republic of the Congo",
"native": {
"common": "R\u00e9publique du Congo",
"official": "R\u00e9publique du Congo"
}
},
"tld": [".cg"],
"cca2": "CG",
"ccn3": "178",
"cca3": "COG",
"currency": ["XAF"],
"callingCode": ["242"],
"capital": "Brazzaville",
"altSpellings": ["CG", "Congo-Brazzaville"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French",
"lin": "Lingala"
},
"translations": {
"cym": "Gweriniaeth y Congo",
"deu": "Kongo",
"fra": "Congo",
"hrv": "Kongo",
"ita": "Congo",
"jpn": "\u30b3\u30f3\u30b4\u5171\u548c\u56fd",
"nld": "Congo",
"rus": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u043e\u043d\u0433\u043e",
"spa": "Congo"
},
"latlng": [-1, 15],
"demonym": "Congolese",
"borders": ["AGO", "CMR", "CAF", "COD", "GAB"],
"area": 342000
},
{
"name": {
"common": "DR Congo",
"official": "Democratic Republic of the Congo",
"native": {
"common": "RD Congo",
"official": "R\u00e9publique d\u00e9mocratique du Congo"
}
},
"tld": [".cd"],
"cca2": "CD",
"ccn3": "180",
"cca3": "COD",
"currency": ["CDF"],
"callingCode": ["243"],
"capital": "Kinshasa",
"altSpellings": ["CD", "DR Congo", "Congo-Kinshasa", "DRC"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "swa",
"languages": {
"fra": "French",
"kon": "Kikongo",
"lin": "Lingala",
"lua": "Tshiluba",
"swa": "Swahili"
},
"translations": {
"cym": "Gweriniaeth Ddemocrataidd Congo",
"deu": "Kongo (Dem. Rep.)",
"fra": "Congo (R\u00e9p. d\u00e9m.)",
"hrv": "Kongo, Demokratska Republika",
"ita": "Congo (Rep. Dem.)",
"jpn": "\u30b3\u30f3\u30b4\u6c11\u4e3b\u5171\u548c\u56fd",
"nld": "Congo (DRC)",
"rus": "\u0414\u0435\u043c\u043e\u043a\u0440\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u043e\u043d\u0433\u043e",
"spa": "Congo (Rep. Dem.)"
},
"latlng": [0, 25],
"demonym": "Congolese",
"borders": ["AGO", "BDI", "CAF", "COG", "RWA", "SSD", "TZA", "UGA", "ZMB"],
"area": 2344858
},
{
"name": {
"common": "Cook Islands",
"official": "Cook Islands",
"native": {
"common": "Cook Islands",
"official": "Cook Islands"
}
},
"tld": [".ck"],
"cca2": "CK",
"ccn3": "184",
"cca3": "COK",
"currency": ["NZD"],
"callingCode": ["682"],
"capital": "Avarua",
"altSpellings": ["CK", "K\u016bki '\u0100irani"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"rar": "Cook Islands M\u0101ori"
},
"translations": {
"cym": "Ynysoedd Cook",
"deu": "Cookinseln",
"fra": "\u00celes Cook",
"hrv": "Cookovo Oto\u010dje",
"ita": "Isole Cook",
"jpn": "\u30af\u30c3\u30af\u8af8\u5cf6",
"nld": "Cookeilanden",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432\u0430 \u041a\u0443\u043a\u0430",
"spa": "Islas Cook"
},
"latlng": [-21.23333333, -159.76666666],
"demonym": "Cook Islander",
"borders": [],
"area": 236
},
{
"name": {
"common": "Costa Rica",
"official": "Republic of Costa Rica",
"native": {
"common": "Costa Rica",
"official": "Rep\u00fablica de Costa Rica"
}
},
"tld": [".cr"],
"cca2": "CR",
"ccn3": "188",
"cca3": "CRI",
"currency": ["CRC"],
"callingCode": ["506"],
"capital": "San Jos\u00e9",
"altSpellings": ["CR", "Republic of Costa Rica", "Rep\u00fablica de Costa Rica"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Costa Rica",
"deu": "Costa Rica",
"fra": "Costa Rica",
"hrv": "Kostarika",
"ita": "Costa Rica",
"jpn": "\u30b3\u30b9\u30bf\u30ea\u30ab",
"nld": "Costa Rica",
"rus": "\u041a\u043e\u0441\u0442\u0430-\u0420\u0438\u043a\u0430",
"spa": "Costa Rica"
},
"latlng": [10, -84],
"demonym": "Costa Rican",
"borders": ["NIC", "PAN"],
"area": 51100
},
{
"name": {
"common": "Croatia",
"official": "Republic of Croatia",
"native": {
"common": "Hrvatska",
"official": "Republika Hrvatska"
}
},
"tld": [".hr"],
"cca2": "HR",
"ccn3": "191",
"cca3": "HRV",
"currency": ["HRK"],
"callingCode": ["385"],
"capital": "Zagreb",
"altSpellings": ["HR", "Hrvatska", "Republic of Croatia", "Republika Hrvatska"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "hrv",
"languages": {
"hrv": "Croatian"
},
"translations": {
"cym": "Croatia",
"deu": "Kroatien",
"fra": "Croatie",
"hrv": "Hrvatska",
"ita": "Croazia",
"jpn": "\u30af\u30ed\u30a2\u30c1\u30a2",
"nld": "Kroati\u00eb",
"rus": "\u0425\u043e\u0440\u0432\u0430\u0442\u0438\u044f",
"spa": "Croacia"
},
"latlng": [45.16666666, 15.5],
"demonym": "Croatian",
"borders": ["BIH", "HUN", "MNE", "SRB", "SVN"],
"area": 56594
},
{
"name": {
"common": "Cuba",
"official": "Republic of Cuba",
"native": {
"common": "Cuba",
"official": "Rep\u00fablica de Cuba"
}
},
"tld": [".cu"],
"cca2": "CU",
"ccn3": "192",
"cca3": "CUB",
"currency": ["CUC", "CUP"],
"callingCode": ["53"],
"capital": "Havana",
"altSpellings": ["CU", "Republic of Cuba", "Rep\u00fablica de Cuba"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Ciwba",
"deu": "Kuba",
"fra": "Cuba",
"hrv": "Kuba",
"ita": "Cuba",
"jpn": "\u30ad\u30e5\u30fc\u30d0",
"nld": "Cuba",
"rus": "\u041a\u0443\u0431\u0430",
"spa": "Cuba"
},
"latlng": [21.5, -80],
"demonym": "Cuban",
"borders": [],
"area": 109884
},
{
"name": {
"common": "Cura\u00e7ao",
"official": "Country of Cura\u00e7ao",
"native": {
"common": "Cura\u00e7ao",
"official": "Land Cura\u00e7ao"
}
},
"tld": [".cw"],
"cca2": "CW",
"ccn3": "531",
"cca3": "CUW",
"currency": ["ANG"],
"callingCode": ["5999"],
"capital": "Willemstad",
"altSpellings": ["CW", "Curacao", "K\u00f2rsou", "Country of Cura\u00e7ao", "Land Cura\u00e7ao", "Pais K\u00f2rsou"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "nld",
"languages": {
"eng": "English",
"nld": "Dutch",
"pap": "Papiamento"
},
"translations": {
"nld": "Cura\u00e7ao",
"rus": "\u041a\u044e\u0440\u0430\u0441\u0430\u043e"
},
"latlng": [12.116667, -68.933333],
"demonym": "Dutch",
"borders": [],
"area": 444
},
{
"name": {
"common": "Cyprus",
"official": "Republic of Cyprus",
"native": {
"common": "\u039a\u03cd\u03c0\u03c1\u03bf\u03c2",
"official": "\u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03b7\u03c2 \u039a\u03cd\u03c0\u03c1\u03bf\u03c2"
}
},
"tld": [".cy"],
"cca2": "CY",
"ccn3": "196",
"cca3": "CYP",
"currency": ["EUR"],
"callingCode": ["357"],
"capital": "Nicosia",
"altSpellings": ["CY", "K\u00fdpros", "K\u0131br\u0131s", "Republic of Cyprus", "\u039a\u03c5\u03c0\u03c1\u03b9\u03b1\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1", "K\u0131br\u0131s Cumhuriyeti"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "ell",
"languages": {
"ell": "Greek",
"tur": "Turkish"
},
"translations": {
"cym": "Cyprus",
"deu": "Zypern",
"fra": "Chypre",
"hrv": "Cipar",
"ita": "Cipro",
"jpn": "\u30ad\u30d7\u30ed\u30b9",
"nld": "Cyprus",
"rus": "\u041a\u0438\u043f\u0440",
"spa": "Chipre"
},
"latlng": [35, 33],
"demonym": "Cypriot",
"borders": ["GBR"],
"area": 9251
},
{
"name": {
"common": "Czech Republic",
"official": "Czech Republic",
"native": {
"common": "\u010cesk\u00e1 republika",
"official": "\u010desk\u00e1 republika"
}
},
"tld": [".cz"],
"cca2": "CZ",
"ccn3": "203",
"cca3": "CZE",
"currency": ["CZK"],
"callingCode": ["420"],
"capital": "Prague",
"altSpellings": ["CZ", "\u010cesk\u00e1 republika", "\u010cesko"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "ces",
"languages": {
"ces": "Czech",
"slk": "Slovak"
},
"translations": {
"cym": "Y Weriniaeth Tsiec",
"deu": "Tschechische Republik",
"fra": "R\u00e9publique tch\u00e8que",
"hrv": "\u010ce\u0161ka",
"ita": "Repubblica Ceca",
"jpn": "\u30c1\u30a7\u30b3",
"nld": "Tsjechi\u00eb",
"rus": "\u0427\u0435\u0445\u0438\u044f",
"spa": "Rep\u00fablica Checa"
},
"latlng": [49.75, 15.5],
"demonym": "Czech",
"borders": ["AUT", "DEU", "POL", "SVK"],
"area": 78865
},
{
"name": {
"common": "Denmark",
"official": "Kingdom of Denmark",
"native": {
"common": "Danmark",
"official": "Kongeriget Danmark"
}
},
"tld": [".dk"],
"cca2": "DK",
"ccn3": "208",
"cca3": "DNK",
"currency": ["DKK"],
"callingCode": ["45"],
"capital": "Copenhagen",
"altSpellings": ["DK", "Danmark", "Kingdom of Denmark", "Kongeriget Danmark"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "dan",
"languages": {
"dan": "Danish"
},
"translations": {
"cym": "Denmarc",
"deu": "D\u00e4nemark",
"fra": "Danemark",
"hrv": "Danska",
"ita": "Danimarca",
"jpn": "\u30c7\u30f3\u30de\u30fc\u30af",
"nld": "Denemarken",
"rus": "\u0414\u0430\u043d\u0438\u044f",
"spa": "Dinamarca"
},
"latlng": [56, 10],
"demonym": "Danish",
"borders": ["DEU"],
"area": 43094
},
{
"name": {
"common": "Djibouti",
"official": "Republic of Djibouti",
"native": {
"common": "Djibouti",
"official": "R\u00e9publique de Djibouti"
}
},
"tld": [".dj"],
"cca2": "DJ",
"ccn3": "262",
"cca3": "DJI",
"currency": ["DJF"],
"callingCode": ["253"],
"capital": "Djibouti",
"altSpellings": ["DJ", "Jabuuti", "Gabuuti", "Republic of Djibouti", "R\u00e9publique de Djibouti", "Gabuutih Ummuuno", "Jamhuuriyadda Jabuuti"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"fra": "French"
},
"translations": {
"cym": "Djibouti",
"deu": "Dschibuti",
"fra": "Djibouti",
"hrv": "D\u017eibuti",
"ita": "Gibuti",
"jpn": "\u30b8\u30d6\u30c1",
"nld": "Djibouti",
"rus": "\u0414\u0436\u0438\u0431\u0443\u0442\u0438",
"spa": "Yibuti"
},
"latlng": [11.5, 43],
"demonym": "Djibouti",
"borders": ["ERI", "ETH", "SOM"],
"area": 23200
},
{
"name": {
"common": "Dominica",
"official": "Commonwealth of Dominica",
"native": {
"common": "Dominica",
"official": "Commonwealth of Dominica"
}
},
"tld": [".dm"],
"cca2": "DM",
"ccn3": "212",
"cca3": "DMA",
"currency": ["XCD"],
"callingCode": ["1767"],
"capital": "Roseau",
"altSpellings": ["DM", "Dominique", "Wai\u2018tu kubuli", "Commonwealth of Dominica"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"cym": "Dominica",
"deu": "Dominica",
"fra": "Dominique",
"hrv": "Dominika",
"ita": "Dominica",
"jpn": "\u30c9\u30df\u30cb\u30ab\u56fd",
"nld": "Dominica",
"rus": "\u0414\u043e\u043c\u0438\u043d\u0438\u043a\u0430",
"spa": "Dominica"
},
"latlng": [15.41666666, -61.33333333],
"demonym": "Dominican",
"borders": [],
"area": 751
},
{
"name": {
"common": "Dominican Republic",
"official": "Dominican Republic",
"native": {
"common": "Rep\u00fablica Dominicana",
"official": "Rep\u00fablica Dominicana"
}
},
"tld": [".do"],
"cca2": "DO",
"ccn3": "214",
"cca3": "DOM",
"currency": ["DOP"],
"callingCode": ["1809", "1829", "1849"],
"capital": "Santo Domingo",
"altSpellings": ["DO"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Gweriniaeth_Dominica",
"deu": "Dominikanische Republik",
"fra": "R\u00e9publique dominicaine",
"hrv": "Dominikanska Republika",
"ita": "Repubblica Dominicana",
"jpn": "\u30c9\u30df\u30cb\u30ab\u5171\u548c\u56fd",
"nld": "Dominicaanse Republiek",
"rus": "\u0414\u043e\u043c\u0438\u043d\u0438\u043a\u0430\u043d\u0441\u043a\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430",
"spa": "Rep\u00fablica Dominicana"
},
"latlng": [19, -70.66666666],
"demonym": "Dominican",
"borders": ["HTI"],
"area": 48671
},
{
"name": {
"common": "Ecuador",
"official": "Republic of Ecuador",
"native": {
"common": "Ecuador",
"official": "Rep\u00fablica del Ecuador"
}
},
"tld": [".ec"],
"cca2": "EC",
"ccn3": "218",
"cca3": "ECU",
"currency": ["USD"],
"callingCode": ["593"],
"capital": "Quito",
"altSpellings": ["EC", "Republic of Ecuador", "Rep\u00fablica del Ecuador"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "Ecwador",
"deu": "Ecuador",
"fra": "\u00c9quateur",
"hrv": "Ekvador",
"ita": "Ecuador",
"jpn": "\u30a8\u30af\u30a2\u30c9\u30eb",
"nld": "Ecuador",
"rus": "\u042d\u043a\u0432\u0430\u0434\u043e\u0440",
"spa": "Ecuador"
},
"latlng": [-2, -77.5],
"demonym": "Ecuadorean",
"borders": ["COL", "PER"],
"area": 276841
},
{
"name": {
"common": "Egypt",
"official": "Arab Republic of Egypt",
"native": {
"common": "\u0645\u0635\u0631",
"official": "\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0645\u0635\u0631 \u0627\u0644\u0639\u0631\u0628\u064a\u0629"
}
},
"tld": [".eg", ".\u0645\u0635\u0631"],
"cca2": "EG",
"ccn3": "818",
"cca3": "EGY",
"currency": ["EGP"],
"callingCode": ["20"],
"capital": "Cairo",
"altSpellings": ["EG", "Arab Republic of Egypt"],
"relevance": "1.5",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"cym": "Yr Aifft",
"deu": "\u00c4gypten",
"fra": "\u00c9gypte",
"hrv": "Egipat",
"ita": "Egitto",
"jpn": "\u30a8\u30b8\u30d7\u30c8",
"nld": "Egypte",
"rus": "\u0415\u0433\u0438\u043f\u0435\u0442",
"spa": "Egipto"
},
"latlng": [27, 30],
"demonym": "Egyptian",
"borders": ["ISR", "LBY", "SDN"],
"area": 1002450
},
{
"name": {
"common": "El Salvador",
"official": "Republic of El Salvador",
"native": {
"common": "El Salvador",
"official": "Rep\u00fablica de El Salvador"
}
},
"tld": [".sv"],
"cca2": "SV",
"ccn3": "222",
"cca3": "SLV",
"currency": ["SVC", "USD"],
"callingCode": ["503"],
"capital": "San Salvador",
"altSpellings": ["SV", "Republic of El Salvador", "Rep\u00fablica de El Salvador"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": "El Salvador",
"deu": "El Salvador",
"fra": "Salvador",
"hrv": "Salvador",
"ita": "El Salvador",
"jpn": "\u30a8\u30eb\u30b5\u30eb\u30d0\u30c9\u30eb",
"nld": "El Salvador",
"rus": "\u0421\u0430\u043b\u044c\u0432\u0430\u0434\u043e\u0440",
"spa": "El Salvador"
},
"latlng": [13.83333333, -88.91666666],
"demonym": "Salvadoran",
"borders": ["GTM", "HND"],
"area": 21041
},
{
"name": {
"common": "Equatorial Guinea",
"official": "Republic of Equatorial Guinea",
"native": {
"common": "Guinea Ecuatorial",
"official": "Rep\u00fablica de Guinea Ecuatorial"
}
},
"tld": [".gq"],
"cca2": "GQ",
"ccn3": "226",
"cca3": "GNQ",
"currency": ["XAF"],
"callingCode": ["240"],
"capital": "Malabo",
"altSpellings": ["GQ", "Republic of Equatorial Guinea", "Rep\u00fablica de Guinea Ecuatorial", "R\u00e9publique de Guin\u00e9e \u00e9quatoriale", "Rep\u00fablica da Guin\u00e9 Equatorial"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "spa",
"languages": {
"fra": "French",
"por": "Portuguese",
"spa": "Spanish"
},
"translations": {
"cym": "Gini Gyhydeddol",
"deu": "\u00c4quatorial-Guinea",
"fra": "Guin\u00e9e-\u00c9quatoriale",
"hrv": "Ekvatorijalna Gvineja",
"ita": "Guinea Equatoriale",
"jpn": "\u8d64\u9053\u30ae\u30cb\u30a2",
"nld": "Equatoriaal-Guinea",
"rus": "\u042d\u043a\u0432\u0430\u0442\u043e\u0440\u0438\u0430\u043b\u044c\u043d\u0430\u044f \u0413\u0432\u0438\u043d\u0435\u044f",
"spa": "Guinea Ecuatorial"
},
"latlng": [2, 10],
"demonym": "Equatorial Guinean",
"borders": ["CMR", "GAB"],
"area": 28051
},
{
"name": {
"common": "Eritrea",
"official": "State of Eritrea",
"native": {
"common": "\u12a4\u122d\u1275\u122b",
"official": "\u1203\u1308\u1228 \u12a4\u122d\u1275\u122b"
}
},
"tld": [".er"],
"cca2": "ER",
"ccn3": "232",
"cca3": "ERI",
"currency": ["ERN"],
"callingCode": ["291"],
"capital": "Asmara",
"altSpellings": ["ER", "State of Eritrea", "\u1203\u1308\u1228 \u12a4\u122d\u1275\u122b", "Dawlat Iritriy\u00e1", "\u02beErtr\u0101", "Iritriy\u0101", ""],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "tir",
"languages": {
"ara": "Arabic",
"eng": "English",
"tir": "Tigrinya"
},
"translations": {
"cym": "Eritrea",
"deu": "Eritrea",
"fra": "\u00c9rythr\u00e9e",
"hrv": "Eritreja",
"ita": "Eritrea",
"jpn": "\u30a8\u30ea\u30c8\u30ea\u30a2",
"nld": "Eritrea",
"rus": "\u042d\u0440\u0438\u0442\u0440\u0435\u044f",
"spa": "Eritrea"
},
"latlng": [15, 39],
"demonym": "Eritrean",
"borders": ["DJI", "ETH", "SDN"],
"area": 117600
},
{
"name": {
"common": "Estonia",
"official": "Republic of Estonia",
"native": {
"common": "Eesti",
"official": "Eesti Vabariik"
}
},
"tld": [".ee"],
"cca2": "EE",
"ccn3": "233",
"cca3": "EST",
"currency": ["EUR"],
"callingCode": ["372"],
"capital": "Tallinn",
"altSpellings": ["EE", "Eesti", "Republic of Estonia", "Eesti Vabariik"],
"relevance": "0",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "est",
"languages": {
"est": "Estonian"
},
"translations": {
"cym": "Estonia",
"deu": "Estland",
"fra": "Estonie",
"hrv": "Estonija",
"ita": "Estonia",
"jpn": "\u30a8\u30b9\u30c8\u30cb\u30a2",
"nld": "Estland",
"rus": "\u042d\u0441\u0442\u043e\u043d\u0438\u044f",
"spa": "Estonia"
},
"latlng": [59, 26],
"demonym": "Estonian",
"borders": ["LVA", "RUS"],
"area": 45227
},
{
"name": {
"common": "Ethiopia",
"official": "Federal Democratic Republic of Ethiopia",
"native": {
"common": "\u12a2\u1275\u12ee\u1335\u12eb",
"official": "\u12e8\u12a2\u1275\u12ee\u1335\u12eb \u134c\u12f4\u122b\u120b\u12ca \u12f2\u121e\u12ad\u122b\u1232\u12eb\u12ca \u122a\u1350\u1265\u120a\u12ad"
}
},
"tld": [".et"],
"cca2": "ET",
"ccn3": "231",
"cca3": "ETH",
"currency": ["ETB"],
"callingCode": ["251"],
"capital": "Addis Ababa",
"altSpellings": ["ET", "\u02be\u012aty\u014d\u1e57\u1e57y\u0101", "Federal Democratic Republic of Ethiopia", "\u12e8\u12a2\u1275\u12ee\u1335\u12eb \u134c\u12f4\u122b\u120b\u12ca \u12f2\u121e\u12ad\u122b\u1232\u12eb\u12ca \u122a\u1350\u1265\u120a\u12ad"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "amh",
"languages": {
"amh": "Amharic"
},
"translations": {
"cym": "Ethiopia",
"deu": "\u00c4thiopien",
"fra": "\u00c9thiopie",
"hrv": "Etiopija",
"ita": "Etiopia",
"jpn": "\u30a8\u30c1\u30aa\u30d4\u30a2",
"nld": "Ethiopi\u00eb",
"rus": "\u042d\u0444\u0438\u043e\u043f\u0438\u044f",
"spa": "Etiop\u00eda"
},
"latlng": [8, 38],
"demonym": "Ethiopian",
"borders": ["DJI", "ERI", "KEN", "SOM", "SSD", "SDN"],
"area": 1104300
},
{
"name": {
"common": "Falkland Islands",
"official": "Falkland Islands",
"native": {
"common": "Falkland Islands",
"official": "Falkland Islands"
}
},
"tld": [".fk"],
"cca2": "FK",
"ccn3": "238",
"cca3": "FLK",
"currency": ["FKP"],
"callingCode": ["500"],
"capital": "Stanley",
"altSpellings": ["FK", "Islas Malvinas"],
"relevance": "0.5",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Falklandinseln",
"fra": "\u00celes Malouines",
"hrv": "Falklandski Otoci",
"ita": "Isole Falkland o Isole Malvine",
"jpn": "\u30d5\u30a9\u30fc\u30af\u30e9\u30f3\u30c9\uff08\u30de\u30eb\u30d3\u30ca\u30b9\uff09\u8af8\u5cf6",
"nld": "Falklandeilanden",
"rus": "\u0424\u043e\u043b\u043a\u043b\u0435\u043d\u0434\u0441\u043a\u0438\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Malvinas"
},
"latlng": [-51.75, -59],
"demonym": "Falkland Islander",
"borders": [],
"area": 12173
},
{
"name": {
"common": "Faroe Islands",
"official": "Faroe Islands",
"native": {
"common": "F\u00f8royar",
"official": "F\u00f8royar"
}
},
"tld": [".fo"],
"cca2": "FO",
"ccn3": "234",
"cca3": "FRO",
"currency": ["DKK"],
"callingCode": ["298"],
"capital": "T\u00f3rshavn",
"altSpellings": ["FO", "F\u00f8royar", "F\u00e6r\u00f8erne"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "fao",
"languages": {
"dan": "Danish",
"fao": "Faroese"
},
"translations": {
"deu": "F\u00e4r\u00f6er-Inseln",
"fra": "\u00celes F\u00e9ro\u00e9",
"hrv": "Farski Otoci",
"ita": "Isole Far Oer",
"jpn": "\u30d5\u30a7\u30ed\u30fc\u8af8\u5cf6",
"nld": "Faer\u00f6er",
"rus": "\u0424\u0430\u0440\u0435\u0440\u0441\u043a\u0438\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Faroe"
},
"latlng": [62, -7],
"demonym": "Faroese",
"borders": [],
"area": 1393
},
{
"name": {
"common": "Fiji",
"official": "Republic of Fiji",
"native": {
"common": "Fiji",
"official": "Republic of Fiji"
}
},
"tld": [".fj"],
"cca2": "FJ",
"ccn3": "242",
"cca3": "FJI",
"currency": ["FJD"],
"callingCode": ["679"],
"capital": "Suva",
"altSpellings": ["FJ", "Viti", "Republic of Fiji", "Matanitu ko Viti", "Fij\u012b Ga\u1e47ar\u0101jya"],
"relevance": "0",
"region": "Oceania",
"subregion": "Melanesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"fij": "Fijian",
"hif": "Fiji Hindi"
},
"translations": {
"deu": "Fidschi",
"fra": "Fidji",
"hrv": "Fi\u0111i",
"ita": "Figi",
"jpn": "\u30d5\u30a3\u30b8\u30fc",
"nld": "Fiji",
"rus": "\u0424\u0438\u0434\u0436\u0438",
"spa": "Fiyi"
},
"latlng": [-18, 175],
"demonym": "Fijian",
"borders": [],
"area": 18272
},
{
"name": {
"common": "Finland",
"official": "Republic of FinlandFinland",
"native": {
"common": "Suomi",
"official": "Tasavallan FinlandFinland"
}
},
"tld": [".fi"],
"cca2": "FI",
"ccn3": "246",
"cca3": "FIN",
"currency": ["EUR"],
"callingCode": ["358"],
"capital": "Helsinki",
"altSpellings": ["FI", "Suomi", "Republic of Finland", "Suomen tasavalta", "Republiken Finland"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "fin",
"languages": {
"fin": "Finnish",
"swe": "Swedish"
},
"translations": {
"deu": "Finnland",
"fra": "Finlande",
"hrv": "Finska",
"ita": "Finlandia",
"jpn": "\u30d5\u30a3\u30f3\u30e9\u30f3\u30c9",
"nld": "Finland",
"rus": "\u0424\u0438\u043d\u043b\u044f\u043d\u0434\u0438\u044f",
"spa": "Finlandia"
},
"latlng": [64, 26],
"demonym": "Finnish",
"borders": ["NOR", "SWE", "RUS"],
"area": 338424
},
{
"name": {
"common": "France",
"official": "French Republic",
"native": {
"common": "France",
"official": "R\u00e9publique fran\u00e7aise"
}
},
"tld": [".fr"],
"cca2": "FR",
"ccn3": "250",
"cca3": "FRA",
"currency": ["EUR"],
"callingCode": ["33"],
"capital": "Paris",
"altSpellings": ["FR", "French Republic", "R\u00e9publique fran\u00e7aise"],
"relevance": "2.5",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Frankreich",
"fra": "France",
"hrv": "Francuska",
"ita": "Francia",
"jpn": "\u30d5\u30e9\u30f3\u30b9",
"nld": "Frankrijk",
"rus": "\u0424\u0440\u0430\u043d\u0446\u0438\u044f",
"spa": "Francia"
},
"latlng": [46, 2],
"demonym": "French",
"borders": ["AND", "BEL", "DEU", "ITA", "LUX", "MCO", "ESP", "CHE"],
"area": 551695
},
{
"name": {
"common": "French Guiana",
"official": "Guiana",
"native": {
"common": "Guyane fran\u00e7aise",
"official": "Guyanes"
}
},
"tld": [".gf"],
"cca2": "GF",
"ccn3": "254",
"cca3": "GUF",
"currency": ["EUR"],
"callingCode": ["594"],
"capital": "Cayenne",
"altSpellings": ["GF", "Guiana", "Guyane"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Franz\u00f6sisch Guyana",
"fra": "Guayane",
"hrv": "Francuska Gvajana",
"ita": "Guyana francese",
"jpn": "\u30d5\u30e9\u30f3\u30b9\u9818\u30ae\u30a2\u30ca",
"nld": "Frans-Guyana",
"rus": "\u0424\u0440\u0430\u043d\u0446\u0443\u0437\u0441\u043a\u0430\u044f \u0413\u0432\u0438\u0430\u043d\u0430",
"spa": "Guayana Francesa"
},
"latlng": [4, -53],
"demonym": "",
"borders": ["BRA", "SUR"],
"area": 83534
},
{
"name": {
"common": "French Polynesia",
"official": "French Polynesia",
"native": {
"common": "Polyn\u00e9sie fran\u00e7aise",
"official": "Polyn\u00e9sie fran\u00e7aise"
}
},
"tld": [".pf"],
"cca2": "PF",
"ccn3": "258",
"cca3": "PYF",
"currency": ["XPF"],
"callingCode": ["689"],
"capital": "Papeet\u0113",
"altSpellings": ["PF", "Polyn\u00e9sie fran\u00e7aise", "French Polynesia", "P\u014dr\u012bnetia Far\u0101ni"],
"relevance": "0",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Franz\u00f6sisch-Polynesien",
"fra": "Polyn\u00e9sie fran\u00e7aise",
"hrv": "Francuska Polinezija",
"ita": "Polinesia Francese",
"jpn": "\u30d5\u30e9\u30f3\u30b9\u9818\u30dd\u30ea\u30cd\u30b7\u30a2",
"nld": "Frans-Polynesi\u00eb",
"rus": "\u0424\u0440\u0430\u043d\u0446\u0443\u0437\u0441\u043a\u0430\u044f \u041f\u043e\u043b\u0438\u043d\u0435\u0437\u0438\u044f",
"spa": "Polinesia Francesa"
},
"latlng": [-15, -140],
"demonym": "French Polynesian",
"borders": [],
"area": 4167
},
{
"name": {
"common": "French Southern and Antarctic Lands",
"official": "Territory of the French Southern and Antarctic Lands",
"native": {
"common": "Territoire des Terres australes et antarctiques fran\u00e7aises",
"official": "Territoire du Sud fran\u00e7aises et des terres de l'Antarctique"
}
},
"tld": [".tf"],
"cca2": "TF",
"ccn3": "260",
"cca3": "ATF",
"currency": ["EUR"],
"callingCode": [],
"capital": "Port-aux-Fran\u00e7ais",
"altSpellings": ["TF"],
"relevance": "0",
"region": "",
"subregion": "",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Franz\u00f6sische S\u00fcd- und Antarktisgebiete",
"fra": "Terres australes et antarctiques fran\u00e7aises",
"hrv": "Francuski ju\u017eni i antarkti\u010dki teritoriji",
"ita": "Territori Francesi del Sud",
"jpn": "\u30d5\u30e9\u30f3\u30b9\u9818\u5357\u65b9\u30fb\u5357\u6975\u5730\u57df",
"nld": "Franse Gebieden in de zuidelijke Indische Oceaan",
"rus": "\u0424\u0440\u0430\u043d\u0446\u0443\u0437\u0441\u043a\u0438\u0435 \u042e\u0436\u043d\u044b\u0435 \u0438 \u0410\u043d\u0442\u0430\u0440\u043a\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0442\u0435\u0440\u0440\u0438\u0442\u043e\u0440\u0438\u0438",
"spa": "Tierras Australes y Ant\u00e1rticas Francesas"
},
"latlng": [-49.25, 69.167],
"demonym": "French",
"borders": [],
"area": 7747
},
{
"name": {
"common": "Gabon",
"official": "Gabonese Republic",
"native": {
"common": "Gabon",
"official": "R\u00e9publique gabonaise"
}
},
"tld": [".ga"],
"cca2": "GA",
"ccn3": "266",
"cca3": "GAB",
"currency": ["XAF"],
"callingCode": ["241"],
"capital": "Libreville",
"altSpellings": ["GA", "Gabonese Republic", "R\u00e9publique Gabonaise"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Gabun",
"fra": "Gabon",
"hrv": "Gabon",
"ita": "Gabon",
"jpn": "\u30ac\u30dc\u30f3",
"nld": "Gabon",
"rus": "\u0413\u0430\u0431\u043e\u043d",
"spa": "Gab\u00f3n"
},
"latlng": [-1, 11.75],
"demonym": "Gabonese",
"borders": ["CMR", "COG", "GNQ"],
"area": 267668
},
{
"name": {
"common": "Gambia",
"official": "Republic of the Gambia",
"native": {
"common": "Gambia",
"official": "Republic of the Gambia"
}
},
"tld": [".gm"],
"cca2": "GM",
"ccn3": "270",
"cca3": "GMB",
"currency": ["GMD"],
"callingCode": ["220"],
"capital": "Banjul",
"altSpellings": ["GM", "Republic of the Gambia"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Gambia",
"fra": "Gambie",
"hrv": "Gambija",
"ita": "Gambia",
"jpn": "\u30ac\u30f3\u30d3\u30a2",
"nld": "Gambia",
"rus": "\u0413\u0430\u043c\u0431\u0438\u044f",
"spa": "Gambia"
},
"latlng": [13.46666666, -16.56666666],
"demonym": "Gambian",
"borders": ["SEN"],
"area": 10689
},
{
"name": {
"common": "Georgia",
"official": "Georgia",
"native": {
"common": "\u10e1\u10d0\u10e5\u10d0\u10e0\u10d7\u10d5\u10d4\u10da\u10dd",
"official": "\u10e1\u10d0\u10e5\u10d0\u10e0\u10d7\u10d5\u10d4\u10da\u10dd"
}
},
"tld": [".ge"],
"cca2": "GE",
"ccn3": "268",
"cca3": "GEO",
"currency": ["GEL"],
"callingCode": ["995"],
"capital": "Tbilisi",
"altSpellings": ["GE", "Sakartvelo"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "kat",
"languages": {
"kat": "Georgian"
},
"translations": {
"deu": "Georgien",
"fra": "G\u00e9orgie",
"hrv": "Gruzija",
"ita": "Georgia",
"jpn": "\u30b0\u30eb\u30b8\u30a2",
"nld": "Georgi\u00eb",
"rus": "\u0413\u0440\u0443\u0437\u0438\u044f",
"spa": "Georgia"
},
"latlng": [42, 43.5],
"demonym": "Georgian",
"borders": ["ARM", "AZE", "RUS", "TUR"],
"area": 69700
},
{
"name": {
"common": "Germany",
"official": "Federal Republic of Germany",
"native": {
"common": "Deutschland",
"official": "Bundesrepublik Deutschland"
}
},
"tld": [".de"],
"cca2": "DE",
"ccn3": "276",
"cca3": "DEU",
"currency": ["EUR"],
"callingCode": ["49"],
"capital": "Berlin",
"altSpellings": ["DE", "Federal Republic of Germany", "Bundesrepublik Deutschland"],
"relevance": "3",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "deu",
"languages": {
"deu": "German"
},
"translations": {
"deu": "Deutschland",
"fra": "Allemagne",
"hrv": "Njema\u010dka",
"ita": "Germania",
"jpn": "\u30c9\u30a4\u30c4",
"nld": "Duitsland",
"rus": "\u0413\u0435\u0440\u043c\u0430\u043d\u0438\u044f",
"spa": "Alemania"
},
"latlng": [51, 9],
"demonym": "German",
"borders": ["AUT", "BEL", "CZE", "DNK", "FRA", "LUX", "NLD", "POL", "CHE"],
"area": 357114
},
{
"name": {
"common": "Ghana",
"official": "Republic of Ghana",
"native": {
"common": "Ghana",
"official": "Republic of Ghana"
}
},
"tld": [".gh"],
"cca2": "GH",
"ccn3": "288",
"cca3": "GHA",
"currency": ["GHS"],
"callingCode": ["233"],
"capital": "Accra",
"altSpellings": ["GH"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Ghana",
"fra": "Ghana",
"hrv": "Gana",
"ita": "Ghana",
"jpn": "\u30ac\u30fc\u30ca",
"nld": "Ghana",
"rus": "\u0413\u0430\u043d\u0430",
"spa": "Ghana"
},
"latlng": [8, -2],
"demonym": "Ghanaian",
"borders": ["BFA", "CIV", "TGO"],
"area": 238533
},
{
"name": {
"common": "Gibraltar",
"official": "Gibraltar",
"native": {
"common": "Gibraltar",
"official": "Gibraltar"
}
},
"tld": [".gi"],
"cca2": "GI",
"ccn3": "292",
"cca3": "GIB",
"currency": ["GIP"],
"callingCode": ["350"],
"capital": "Gibraltar",
"altSpellings": ["GI"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Gibraltar",
"fra": "Gibraltar",
"hrv": "Gibraltar",
"ita": "Gibilterra",
"jpn": "\u30b8\u30d6\u30e9\u30eb\u30bf\u30eb",
"nld": "Gibraltar",
"rus": "\u0413\u0438\u0431\u0440\u0430\u043b\u0442\u0430\u0440",
"spa": "Gibraltar"
},
"latlng": [36.13333333, -5.35],
"demonym": "Gibraltar",
"borders": ["ESP"],
"area": 6
},
{
"name": {
"common": "Greece",
"official": "Hellenic Republic",
"native": {
"common": "\u0395\u03bb\u03bb\u03ac\u03b4\u03b1",
"official": "\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1"
}
},
"tld": [".gr"],
"cca2": "GR",
"ccn3": "300",
"cca3": "GRC",
"currency": ["EUR"],
"callingCode": ["30"],
"capital": "Athens",
"altSpellings": ["GR", "Ell\u00e1da", "Hellenic Republic", "\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "ell",
"languages": {
"ell": "Greek"
},
"translations": {
"deu": "Griechenland",
"fra": "Gr\u00e8ce",
"hrv": "Gr\u010dka",
"ita": "Grecia",
"jpn": "\u30ae\u30ea\u30b7\u30e3",
"nld": "Griekenland",
"rus": "\u0413\u0440\u0435\u0446\u0438\u044f",
"spa": "Grecia"
},
"latlng": [39, 22],
"demonym": "Greek",
"borders": ["ALB", "BGR", "TUR", "MKD"],
"area": 131990
},
{
"name": {
"common": "Greenland",
"official": "Greenland",
"native": {
"common": "Kalaallit Nunaat",
"official": "Kalaallit Nunaat"
}
},
"tld": [".gl"],
"cca2": "GL",
"ccn3": "304",
"cca3": "GRL",
"currency": ["DKK"],
"callingCode": ["299"],
"capital": "Nuuk",
"altSpellings": ["GL", "Gr\u00f8nland"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "kal",
"languages": {
"kal": "Greenlandic"
},
"translations": {
"deu": "Gr\u00f6nland",
"fra": "Groenland",
"hrv": "Grenland",
"ita": "Groenlandia",
"jpn": "\u30b0\u30ea\u30fc\u30f3\u30e9\u30f3\u30c9",
"nld": "Groenland",
"rus": "\u0413\u0440\u0435\u043d\u043b\u0430\u043d\u0434\u0438\u044f",
"spa": "Groenlandia"
},
"latlng": [72, -40],
"demonym": "Greenlandic",
"borders": [],
"area": 2166086
},
{
"name": {
"common": "Grenada",
"official": "Grenada",
"native": {
"common": "Grenada",
"official": "Grenada"
}
},
"tld": [".gd"],
"cca2": "GD",
"ccn3": "308",
"cca3": "GRD",
"currency": ["XCD"],
"callingCode": ["1473"],
"capital": "St. George's",
"altSpellings": ["GD"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Grenada",
"fra": "Grenade",
"hrv": "Grenada",
"ita": "Grenada",
"jpn": "\u30b0\u30ec\u30ca\u30c0",
"nld": "Grenada",
"rus": "\u0413\u0440\u0435\u043d\u0430\u0434\u0430",
"spa": "Grenada"
},
"latlng": [12.11666666, -61.66666666],
"demonym": "Grenadian",
"borders": [],
"area": 344
},
{
"name": {
"common": "Guadeloupe",
"official": "Guadeloupe",
"native": {
"common": "Guadeloupe",
"official": "Guadeloupe"
}
},
"tld": [".gp"],
"cca2": "GP",
"ccn3": "312",
"cca3": "GLP",
"currency": ["EUR"],
"callingCode": ["590"],
"capital": "Basse-Terre",
"altSpellings": ["GP", "Gwadloup"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Guadeloupe",
"fra": "Guadeloupe",
"hrv": "Gvadalupa",
"ita": "Guadeloupa",
"jpn": "\u30b0\u30a2\u30c9\u30eb\u30fc\u30d7",
"nld": "Guadeloupe",
"rus": "\u0413\u0432\u0430\u0434\u0435\u043b\u0443\u043f\u0430",
"spa": "Guadalupe"
},
"latlng": [16.25, -61.583333],
"demonym": "Guadeloupian",
"borders": [],
"area": 1628
},
{
"name": {
"common": "Guam",
"official": "Guam",
"native": {
"common": "Guam",
"official": "Guam"
}
},
"tld": [".gu"],
"cca2": "GU",
"ccn3": "316",
"cca3": "GUM",
"currency": ["USD"],
"callingCode": ["1671"],
"capital": "Hag\u00e5t\u00f1a",
"altSpellings": ["GU", "Gu\u00e5h\u00e5n"],
"relevance": "0",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "eng",
"languages": {
"cha": "Chamorro",
"eng": "English",
"spa": "Spanish"
},
"translations": {
"deu": "Guam",
"fra": "Guam",
"hrv": "Guam",
"ita": "Guam",
"jpn": "\u30b0\u30a2\u30e0",
"nld": "Guam",
"rus": "\u0413\u0443\u0430\u043c",
"spa": "Guam"
},
"latlng": [13.46666666, 144.78333333],
"demonym": "Guamanian",
"borders": [],
"area": 549
},
{
"name": {
"common": "Guatemala",
"official": "Republic of Guatemala",
"native": {
"common": "Guatemala",
"official": "Rep\u00fablica de Guatemala"
}
},
"tld": [".gt"],
"cca2": "GT",
"ccn3": "320",
"cca3": "GTM",
"currency": ["GTQ"],
"callingCode": ["502"],
"capital": "Guatemala City",
"altSpellings": ["GT"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Guatemala",
"fra": "Guatemala",
"hrv": "Gvatemala",
"ita": "Guatemala",
"jpn": "\u30b0\u30a2\u30c6\u30de\u30e9",
"nld": "Guatemala",
"rus": "\u0413\u0432\u0430\u0442\u0435\u043c\u0430\u043b\u0430",
"spa": "Guatemala"
},
"latlng": [15.5, -90.25],
"demonym": "Guatemalan",
"borders": ["BLZ", "SLV", "HND", "MEX"],
"area": 108889
},
{
"name": {
"common": "Guernsey",
"official": "Bailiwick of Guernsey",
"native": {
"common": "Guernsey",
"official": "Bailiwick of Guernsey"
}
},
"tld": [".gg"],
"cca2": "GG",
"ccn3": "831",
"cca3": "GGY",
"currency": ["GBP"],
"callingCode": ["44"],
"capital": "St. Peter Port",
"altSpellings": ["GG", "Bailiwick of Guernsey", "Bailliage de Guernesey"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"fra": "French"
},
"translations": {
"deu": "Guernsey",
"fra": "Guernesey",
"hrv": "Guernsey",
"ita": "Guernsey",
"jpn": "\u30ac\u30fc\u30f3\u30b8\u30fc",
"nld": "Guernsey",
"rus": "\u0413\u0435\u0440\u043d\u0441\u0438",
"spa": "Guernsey"
},
"latlng": [49.46666666, -2.58333333],
"demonym": "Channel Islander",
"borders": [],
"area": 78
},
{
"name": {
"common": "Guinea",
"official": "Republic of Guinea",
"native": {
"common": "Guin\u00e9e",
"official": "R\u00e9publique de Guin\u00e9e"
}
},
"tld": [".gn"],
"cca2": "GN",
"ccn3": "324",
"cca3": "GIN",
"currency": ["GNF"],
"callingCode": ["224"],
"capital": "Conakry",
"altSpellings": ["GN", "Republic of Guinea", "R\u00e9publique de Guin\u00e9e"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Guinea",
"fra": "Guin\u00e9e",
"hrv": "Gvineja",
"ita": "Guinea",
"jpn": "\u30ae\u30cb\u30a2",
"nld": "Guinee",
"rus": "\u0413\u0432\u0438\u043d\u0435\u044f",
"spa": "Guinea"
},
"latlng": [11, -10],
"demonym": "Guinean",
"borders": ["CIV", "GNB", "LBR", "MLI", "SEN", "SLE"],
"area": 245857
},
{
"name": {
"common": "Guinea-Bissau",
"official": "Republic of Guinea-Bissau",
"native": {
"common": "Guin\u00e9-Bissau",
"official": "Rep\u00fablica da Guin\u00e9-Bissau"
}
},
"tld": [".gw"],
"cca2": "GW",
"ccn3": "624",
"cca3": "GNB",
"currency": ["XOF"],
"callingCode": ["245"],
"capital": "Bissau",
"altSpellings": ["GW", "Republic of Guinea-Bissau", "Rep\u00fablica da Guin\u00e9-Bissau"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"deu": "Guinea-Bissau",
"fra": "Guin\u00e9e-Bissau",
"hrv": "Gvineja Bisau",
"ita": "Guinea-Bissau",
"jpn": "\u30ae\u30cb\u30a2\u30d3\u30b5\u30a6",
"nld": "Guinee-Bissau",
"rus": "\u0413\u0432\u0438\u043d\u0435\u044f-\u0411\u0438\u0441\u0430\u0443",
"spa": "Guinea-Bis\u00e1u"
},
"latlng": [12, -15],
"demonym": "Guinea-Bissauan",
"borders": ["GIN", "SEN"],
"area": 36125
},
{
"name": {
"common": "Guyana",
"official": "Co-operative Republic of Guyana",
"native": {
"common": "Guyana",
"official": "Co-operative Republic of Guyana"
}
},
"tld": [".gy"],
"cca2": "GY",
"ccn3": "328",
"cca3": "GUY",
"currency": ["GYD"],
"callingCode": ["592"],
"capital": "Georgetown",
"altSpellings": ["GY", "Co-operative Republic of Guyana"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Guyana",
"fra": "Guyane",
"hrv": "Gvajana",
"ita": "Guyana",
"jpn": "\u30ac\u30a4\u30a2\u30ca",
"nld": "Guyana",
"rus": "\u0413\u0430\u0439\u0430\u043d\u0430",
"spa": "Guyana"
},
"latlng": [5, -59],
"demonym": "Guyanese",
"borders": ["BRA", "SUR", "VEN"],
"area": 214969
},
{
"name": {
"common": "Haiti",
"official": "Republic of Haiti",
"native": {
"common": "Ha\u00efti",
"official": "R\u00e9publique d'Ha\u00efti"
}
},
"tld": [".ht"],
"cca2": "HT",
"ccn3": "332",
"cca3": "HTI",
"currency": ["HTG", "USD"],
"callingCode": ["509"],
"capital": "Port-au-Prince",
"altSpellings": ["HT", "Republic of Haiti", "R\u00e9publique d'Ha\u00efti", "Repiblik Ayiti"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "fra",
"languages": {
"fra": "French",
"hat": "Haitian Creole"
},
"translations": {
"deu": "Haiti",
"fra": "Ha\u00efti",
"hrv": "Haiti",
"ita": "Haiti",
"jpn": "\u30cf\u30a4\u30c1",
"nld": "Ha\u00efti",
"rus": "\u0413\u0430\u0438\u0442\u0438",
"spa": "Haiti"
},
"latlng": [19, -72.41666666],
"demonym": "Haitian",
"borders": ["DOM"],
"area": 27750
},
{
"name": {
"common": "Heard Island and McDonald Islands",
"official": "Heard Island and McDonald Islands",
"native": {
"common": "Heard Island and McDonald Islands",
"official": "Heard Island and McDonald Islands"
}
},
"tld": [".hm", ".aq"],
"cca2": "HM",
"ccn3": "334",
"cca3": "HMD",
"currency": ["AUD"],
"callingCode": [],
"capital": "",
"altSpellings": ["HM"],
"relevance": "0",
"region": "",
"subregion": "",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Heard und die McDonaldinseln",
"fra": "\u00celes Heard-et-MacDonald",
"hrv": "Otok Heard i oto\u010dje McDonald",
"ita": "Isole Heard e McDonald",
"jpn": "\u30cf\u30fc\u30c9\u5cf6\u3068\u30de\u30af\u30c9\u30ca\u30eb\u30c9\u8af8\u5cf6",
"nld": "Heard- en McDonaldeilanden",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432 \u0425\u0435\u0440\u0434 \u0438 \u043e\u0441\u0442\u0440\u043e\u0432\u0430 \u041c\u0430\u043a\u0434\u043e\u043d\u0430\u043b\u044c\u0434",
"spa": "Islas Heard y McDonald"
},
"latlng": [-53.1, 72.51666666],
"demonym": "Heard and McDonald Islander",
"borders": [],
"area": 412
},
{
"name": {
"common": "Vatican City",
"official": "Vatican City State",
"native": {
"common": "Vaticano",
"official": "Stato della Citt\u00E0 del Vaticano"
}
},
"tld": [".va"],
"cca2": "VA",
"ccn3": "336",
"cca3": "VAT",
"currency": ["EUR"],
"callingCode": ["3906698", "379"],
"capital": "Vatican City",
"altSpellings": ["VA", "Vatican City State", "Stato della Citt\u00e0 del Vaticano"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "ita",
"languages": {
"ita": "Italian",
"lat": "Latin"
},
"translations": {
"deu": "Vatikanstadt",
"fra": "Cit\u00e9 du Vatican",
"hrv": "Vatikan",
"ita": "Citt\u00e0 del Vaticano",
"jpn": "\u30d0\u30c1\u30ab\u30f3\u5e02\u56fd",
"nld": "Vaticaanstad",
"rus": "\u0412\u0430\u0442\u0438\u043a\u0430\u043d",
"spa": "Ciudad del Vaticano"
},
"latlng": [41.9, 12.45],
"demonym": "Italian",
"borders": ["ITA"],
"area": 0.44
},
{
"name": {
"common": "Honduras",
"official": "Republic of Honduras",
"native": {
"common": "Honduras",
"official": "Rep\u00fablica de Honduras"
}
},
"tld": [".hn"],
"cca2": "HN",
"ccn3": "340",
"cca3": "HND",
"currency": ["HNL"],
"callingCode": ["504"],
"capital": "Tegucigalpa",
"altSpellings": ["HN", "Republic of Honduras", "Rep\u00fablica de Honduras"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Honduras",
"fra": "Honduras",
"hrv": "Honduras",
"ita": "Honduras",
"jpn": "\u30db\u30f3\u30b8\u30e5\u30e9\u30b9",
"nld": "Honduras",
"rus": "\u0413\u043e\u043d\u0434\u0443\u0440\u0430\u0441",
"spa": "Honduras"
},
"latlng": [15, -86.5],
"demonym": "Honduran",
"borders": ["GTM", "SLV", "NIC"],
"area": 112492
},
{
"name": {
"common": "Hong Kong",
"official": "Hong Kong Special Administrative Region of the People's Republic of China",
"native": {
"common": "\u9999\u6e2f",
"official": "\u9999\u6e2f\u4e2d\u56fd\u7279\u522b\u884c\u653f\u533a\u7684\u4eba\u6c11\u5171\u548c\u56fd"
}
},
"tld": [".hk", ".\u9999\u6e2f"],
"cca2": "HK",
"ccn3": "344",
"cca3": "HKG",
"currency": ["HKD"],
"callingCode": ["852"],
"capital": "City of Victoria",
"altSpellings": ["HK"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "zho",
"languages": {
"eng": "English",
"zho": "Chinese"
},
"translations": {
"deu": "Hongkong",
"fra": "Hong Kong",
"hrv": "Hong Kong",
"ita": "Hong Kong",
"jpn": "\u9999\u6e2f",
"nld": "Hongkong",
"rus": "\u0413\u043e\u043d\u043a\u043e\u043d\u0433",
"spa": "Hong Kong"
},
"latlng": [22.267, 114.188],
"demonym": "Hong Konger",
"borders": ["CHN"],
"area": 1104
},
{
"name": {
"common": "Hungary",
"official": "Hungary",
"native": {
"common": "Magyarorsz\u00e1g",
"official": "Magyarorsz\u00e1g"
}
},
"tld": [".hu"],
"cca2": "HU",
"ccn3": "348",
"cca3": "HUN",
"currency": ["HUF"],
"callingCode": ["36"],
"capital": "Budapest",
"altSpellings": ["HU"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "hun",
"languages": {
"hun": "Hungarian"
},
"translations": {
"deu": "Ungarn",
"fra": "Hongrie",
"hrv": "Ma\u0111arska",
"ita": "Ungheria",
"jpn": "\u30cf\u30f3\u30ac\u30ea\u30fc",
"nld": "Hongarije",
"rus": "\u0412\u0435\u043d\u0433\u0440\u0438\u044f",
"spa": "Hungr\u00eda"
},
"latlng": [47, 20],
"demonym": "Hungarian",
"borders": ["AUT", "HRV", "ROU", "SRB", "SVK", "SVN", "UKR"],
"area": 93028
},
{
"name": {
"common": "Iceland",
"official": "Iceland",
"native": {
"common": "\u00cdsland",
"official": "\u00cdsland"
}
},
"tld": [".is"],
"cca2": "IS",
"ccn3": "352",
"cca3": "ISL",
"currency": ["ISK"],
"callingCode": ["354"],
"capital": "Reykjavik",
"altSpellings": ["IS", "Island", "Republic of Iceland", "L\u00fd\u00f0veldi\u00f0 \u00cdsland"],
"relevance": "0",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "isl",
"languages": {
"isl": "Icelandic"
},
"translations": {
"deu": "Island",
"fra": "Islande",
"hrv": "Island",
"ita": "Islanda",
"jpn": "\u30a2\u30a4\u30b9\u30e9\u30f3\u30c9",
"nld": "IJsland",
"rus": "\u0418\u0441\u043b\u0430\u043d\u0434\u0438\u044f",
"spa": "Islandia"
},
"latlng": [65, -18],
"demonym": "Icelander",
"borders": [],
"area": 103000
},
{
"name": {
"common": "India",
"official": "Republic of India",
"native": {
"common": "\u092d\u093e\u0930\u0924",
"official": "\u092d\u093e\u0930\u0924 \u0917\u0923\u0930\u093e\u091c\u094d\u092f"
}
},
"tld": [".in"],
"cca2": "IN",
"ccn3": "356",
"cca3": "IND",
"currency": ["INR"],
"callingCode": ["91"],
"capital": "New Delhi",
"altSpellings": ["IN", "Bh\u0101rat", "Republic of India", "Bharat Ganrajya"],
"relevance": "3",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "hin",
"languages": {
"eng": "English",
"hin": "Hindi"
},
"translations": {
"deu": "Indien",
"fra": "Inde",
"hrv": "Indija",
"ita": "India",
"jpn": "\u30a4\u30f3\u30c9",
"nld": "India",
"rus": "\u0418\u043d\u0434\u0438\u044f",
"spa": "India"
},
"latlng": [20, 77],
"demonym": "Indian",
"borders": ["AFG", "BGD", "BTN", "MMR", "CHN", "NPL", "PAK", "LKA"],
"area": 3287590
},
{
"name": {
"common": "Indonesia",
"official": "Republic of Indonesia",
"native": {
"common": "Indonesia",
"official": "Republik Indonesia"
}
},
"tld": [".id"],
"cca2": "ID",
"ccn3": "360",
"cca3": "IDN",
"currency": ["IDR"],
"callingCode": ["62"],
"capital": "Jakarta",
"altSpellings": ["ID", "Republic of Indonesia", "Republik Indonesia"],
"relevance": "2",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "ind",
"languages": {
"ind": "Indonesian"
},
"translations": {
"deu": "Indonesien",
"fra": "Indon\u00e9sie",
"hrv": "Indonezija",
"ita": "Indonesia",
"jpn": "\u30a4\u30f3\u30c9\u30cd\u30b7\u30a2",
"nld": "Indonesi\u00eb",
"rus": "\u0418\u043d\u0434\u043e\u043d\u0435\u0437\u0438\u044f",
"spa": "Indonesia"
},
"latlng": [-5, 120],
"demonym": "Indonesian",
"borders": ["TLS", "MYS", "PNG"],
"area": 1904569
},
{
"name": {
"common": "Ivory Coast",
"official": "Republic of C\u00f4te d'Ivoire",
"native": {
"common": "C\u00f4te d'Ivoire",
"official": "R\u00e9publique de C\u00f4te d'Ivoire"
}
},
"tld": [".ci"],
"cca2": "CI",
"ccn3": "384",
"cca3": "CIV",
"currency": ["XOF"],
"callingCode": ["225"],
"capital": "Yamoussoukro",
"altSpellings": ["CI", "Ivory Coast", "Republic of C\u00f4te d'Ivoire", "R\u00e9publique de C\u00f4te d'Ivoire"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Elfenbeink\u00fcste",
"fra": "C\u00f4te d'Ivoire",
"hrv": "Obala Bjelokosti",
"ita": "Costa D'Avorio",
"jpn": "\u30b3\u30fc\u30c8\u30b8\u30dc\u30ef\u30fc\u30eb",
"nld": "Ivoorkust",
"rus": "\u041a\u043e\u0442-\u0434\u2019\u0418\u0432\u0443\u0430\u0440",
"spa": "Costa de Marfil"
},
"latlng": [8, -5],
"demonym": "Ivorian",
"borders": ["BFA", "GHA", "GIN", "LBR", "MLI"],
"area": 322463
},
{
"name": {
"common": "Iran",
"official": "Islamic Republic of Iran",
"native": {
"common": "\u0627\u06cc\u0631\u0627\u0646",
"official": "\u062c\u0645\u0647\u0648\u0631\u06cc \u0627\u0633\u0644\u0627\u0645\u06cc \u0627\u06cc\u0631\u0627\u0646"
}
},
"tld": [".ir", "\u0627\u06cc\u0631\u0627\u0646."],
"cca2": "IR",
"ccn3": "364",
"cca3": "IRN",
"currency": ["IRR"],
"callingCode": ["98"],
"capital": "Tehran",
"altSpellings": ["IR", "Islamic Republic of Iran", "Jomhuri-ye Esl\u0101mi-ye Ir\u0101n"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "fas",
"languages": {
"fas": "Persian"
},
"translations": {
"deu": "Iran",
"fra": "Iran",
"hrv": "Iran",
"jpn": "\u30a4\u30e9\u30f3\u30fb\u30a4\u30b9\u30e9\u30e0\u5171\u548c\u56fd",
"nld": "Iran",
"rus": "\u0418\u0440\u0430\u043d",
"spa": "Iran"
},
"latlng": [32, 53],
"demonym": "Iranian",
"borders": ["AFG", "ARM", "AZE", "IRQ", "PAK", "TUR", "TKM"],
"area": 1648195
},
{
"name": {
"common": "Iraq",
"official": "Republic of Iraq",
"native": {
"common": "\u0627\u0644\u0639\u0631\u0627\u0642",
"official": "\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u0639\u0631\u0627\u0642"
}
},
"tld": [".iq"],
"cca2": "IQ",
"ccn3": "368",
"cca3": "IRQ",
"currency": ["IQD"],
"callingCode": ["964"],
"capital": "Baghdad",
"altSpellings": ["IQ", "Republic of Iraq", "Jumh\u016briyyat al-\u2018Ir\u0101q"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"arc": "Aramaic",
"kur": "Kurdish"
},
"translations": {
"deu": "Irak",
"fra": "Irak",
"hrv": "Irak",
"ita": "Iraq",
"jpn": "\u30a4\u30e9\u30af",
"nld": "Irak",
"rus": "\u0418\u0440\u0430\u043a",
"spa": "Irak"
},
"latlng": [33, 44],
"demonym": "Iraqi",
"borders": ["IRN", "JOR", "KWT", "SAU", "SYR", "TUR"],
"area": 438317
},
{
"name": {
"common": "Ireland",
"official": "Republic of Ireland",
"native": {
"common": "\u00c9ire",
"official": "Poblacht na h\u00c9ireann"
}
},
"tld": [".ie"],
"cca2": "IE",
"ccn3": "372",
"cca3": "IRL",
"currency": ["EUR"],
"callingCode": ["353"],
"capital": "Dublin",
"altSpellings": ["IE", "\u00c9ire", "Republic of Ireland", "Poblacht na h\u00c9ireann"],
"relevance": "1.2",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "gle",
"languages": {
"eng": "English",
"gle": "Irish"
},
"translations": {
"deu": "Irland",
"fra": "Irlande",
"hrv": "Irska",
"ita": "Irlanda",
"jpn": "\u30a2\u30a4\u30eb\u30e9\u30f3\u30c9",
"nld": "Ierland",
"rus": "\u0418\u0440\u043b\u0430\u043d\u0434\u0438\u044f",
"spa": "Irlanda"
},
"latlng": [53, -8],
"demonym": "Irish",
"borders": ["GBR"],
"area": 70273
},
{
"name": {
"common": "Isle of Man",
"official": "Isle of Man",
"native": {
"common": "Isle of Man",
"official": "Isle of Man"
}
},
"tld": [".im"],
"cca2": "IM",
"ccn3": "833",
"cca3": "IMN",
"currency": ["GBP"],
"callingCode": ["44"],
"capital": "Douglas",
"altSpellings": ["IM", "Ellan Vannin", "Mann", "Mannin"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"glv": "Manx"
},
"translations": {
"deu": "Insel Man",
"fra": "\u00cele de Man",
"hrv": "Otok Man",
"ita": "Isola di Man",
"jpn": "\u30de\u30f3\u5cf6",
"nld": "Isle of Man",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432 \u041c\u044d\u043d",
"spa": "Isla de Man"
},
"latlng": [54.25, -4.5],
"demonym": "Manx",
"borders": [],
"area": 572
},
{
"name": {
"common": "Israel",
"official": "State of Israel",
"native": {
"common": "\u05d9\u05e9\u05e8\u05d0\u05dc",
"official": "\u05de\u05d3\u05d9\u05e0\u05ea \u05d9\u05e9\u05e8\u05d0\u05dc"
}
},
"tld": [".il"],
"cca2": "IL",
"ccn3": "376",
"cca3": "ISR",
"currency": ["ILS"],
"callingCode": ["972"],
"capital": "Jerusalem",
"altSpellings": ["IL", "State of Israel", "Med\u012bnat Yisr\u0101'el"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "heb",
"languages": {
"ara": "Arabic",
"heb": "Hebrew"
},
"translations": {
"deu": "Israel",
"fra": "Isra\u00ebl",
"hrv": "Izrael",
"ita": "Israele",
"jpn": "\u30a4\u30b9\u30e9\u30a8\u30eb",
"nld": "Isra\u00ebl",
"rus": "\u0418\u0437\u0440\u0430\u0438\u043b\u044c",
"spa": "Israel"
},
"latlng": [31.47, 35.13],
"demonym": "Israeli",
"borders": ["EGY", "JOR", "LBN", "SYR"],
"area": 20770
},
{
"name": {
"common": "Italy",
"official": "Italian Republic",
"native": {
"common": "Italia",
"official": "Repubblica italiana"
}
},
"tld": [".it"],
"cca2": "IT",
"ccn3": "380",
"cca3": "ITA",
"currency": ["EUR"],
"callingCode": ["39"],
"capital": "Rome",
"altSpellings": ["IT", "Italian Republic", "Repubblica italiana"],
"relevance": "2",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "ita",
"languages": {
"ita": "Italian"
},
"translations": {
"deu": "Italien",
"fra": "Italie",
"hrv": "Italija",
"ita": "Italia",
"jpn": "\u30a4\u30bf\u30ea\u30a2",
"nld": "Itali\u00eb",
"rus": "\u0418\u0442\u0430\u043b\u0438\u044f",
"spa": "Italia"
},
"latlng": [42.83333333, 12.83333333],
"demonym": "Italian",
"borders": ["AUT", "FRA", "SMR", "SVN", "CHE", "VAT"],
"area": 301336
},
{
"name": {
"common": "Jamaica",
"official": "Jamaica",
"native": {
"common": "Jamaica",
"official": "Jamaica"
}
},
"tld": [".jm"],
"cca2": "JM",
"ccn3": "388",
"cca3": "JAM",
"currency": ["JMD"],
"callingCode": ["1876"],
"capital": "Kingston",
"altSpellings": ["JM"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"jam": "Jamaican Patois"
},
"translations": {
"deu": "Jamaika",
"fra": "Jama\u00efque",
"hrv": "Jamajka",
"ita": "Giamaica",
"jpn": "\u30b8\u30e3\u30de\u30a4\u30ab",
"nld": "Jamaica",
"rus": "\u042f\u043c\u0430\u0439\u043a\u0430",
"spa": "Jamaica"
},
"latlng": [18.25, -77.5],
"demonym": "Jamaican",
"borders": [],
"area": 10991
},
{
"name": {
"common": "Japan",
"official": "Japan",
"native": {
"common": "\u65e5\u672c",
"official": "\u65e5\u672c"
}
},
"tld": [".jp", ".\u307f\u3093\u306a"],
"cca2": "JP",
"ccn3": "392",
"cca3": "JPN",
"currency": ["JPY"],
"callingCode": ["81"],
"capital": "Tokyo",
"altSpellings": ["JP", "Nippon", "Nihon"],
"relevance": "2.5",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "jpn",
"languages": {
"jpn": "Japanese"
},
"translations": {
"deu": "Japan",
"fra": "Japon",
"hrv": "Japan",
"ita": "Giappone",
"jpn": "\u65e5\u672c",
"nld": "Japan",
"rus": "\u042f\u043f\u043e\u043d\u0438\u044f",
"spa": "Jap\u00f3n"
},
"latlng": [36, 138],
"demonym": "Japanese",
"borders": [],
"area": 377930
},
{
"name": {
"common": "Jersey",
"official": "Bailiwick of Jersey",
"native": {
"common": "Jersey",
"official": "Bailiwick of Jersey"
}
},
"tld": [".je"],
"cca2": "JE",
"ccn3": "832",
"cca3": "JEY",
"currency": ["GBP"],
"callingCode": ["44"],
"capital": "Saint Helier",
"altSpellings": ["JE", "Bailiwick of Jersey", "Bailliage de Jersey", "Bailliage d\u00e9 J\u00e8rri"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"fra": "French"
},
"translations": {
"deu": "Jersey",
"fra": "Jersey",
"hrv": "Jersey",
"ita": "Isola di Jersey",
"jpn": "\u30b8\u30e3\u30fc\u30b8\u30fc",
"nld": "Jersey",
"rus": "\u0414\u0436\u0435\u0440\u0441\u0438",
"spa": "Jersey"
},
"latlng": [49.25, -2.16666666],
"demonym": "Channel Islander",
"borders": [],
"area": 116
},
{
"name": {
"common": "Jordan",
"official": "Hashemite Kingdom of Jordan",
"native": {
"common": "\u0627\u0644\u0623\u0631\u062f\u0646",
"official": "\u0627\u0644\u0645\u0645\u0644\u0643\u0629 \u0627\u0644\u0623\u0631\u062f\u0646\u064a\u0629 \u0627\u0644\u0647\u0627\u0634\u0645\u064a\u0629"
}
},
"tld": [".jo", "\u0627\u0644\u0627\u0631\u062f\u0646."],
"cca2": "JO",
"ccn3": "400",
"cca3": "JOR",
"currency": ["JOD"],
"callingCode": ["962"],
"capital": "Amman",
"altSpellings": ["JO", "Hashemite Kingdom of Jordan", "al-Mamlakah al-Urdun\u012byah al-H\u0101shim\u012byah"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Jordanien",
"fra": "Jordanie",
"hrv": "Jordan",
"ita": "Giordania",
"jpn": "\u30e8\u30eb\u30c0\u30f3",
"nld": "Jordani\u00eb",
"rus": "\u0418\u043e\u0440\u0434\u0430\u043d\u0438\u044f",
"spa": "Jordania"
},
"latlng": [31, 36],
"demonym": "Jordanian",
"borders": ["IRQ", "ISR", "SAU", "SYR"],
"area": 89342
},
{
"name": {
"common": "Kazakhstan",
"official": "Republic of Kazakhstan",
"native": {
"common": "\u049a\u0430\u0437\u0430\u049b\u0441\u0442\u0430\u043d",
"official": "\u049a\u0430\u0437\u0430\u049b\u0441\u0442\u0430\u043d \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430\u0441\u044b"
}
},
"tld": [".kz", ".\u049b\u0430\u0437"],
"cca2": "KZ",
"ccn3": "398",
"cca3": "KAZ",
"currency": ["KZT"],
"callingCode": ["76", "77"],
"capital": "Astana",
"altSpellings": ["KZ", "Qazaqstan", "\u041a\u0430\u0437\u0430\u0445\u0441\u0442\u0430\u043d", "Republic of Kazakhstan", "\u049a\u0430\u0437\u0430\u049b\u0441\u0442\u0430\u043d \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430\u0441\u044b", "Qazaqstan Respubl\u00efkas\u0131", "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u0430\u0437\u0430\u0445\u0441\u0442\u0430\u043d", "Respublika Kazakhstan"],
"relevance": "0",
"region": "Asia",
"subregion": "Central Asia",
"nativeLanguage": "kaz",
"languages": {
"kaz": "Kazakh",
"rus": "Russian"
},
"translations": {
"deu": "Kasachstan",
"fra": "Kazakhstan",
"hrv": "Kazahstan",
"ita": "Kazakistan",
"jpn": "\u30ab\u30b6\u30d5\u30b9\u30bf\u30f3",
"nld": "Kazachstan",
"rus": "\u041a\u0430\u0437\u0430\u0445\u0441\u0442\u0430\u043d",
"spa": "Kazajist\u00e1n"
},
"latlng": [48, 68],
"demonym": "Kazakhstani",
"borders": ["CHN", "KGZ", "RUS", "TKM", "UZB"],
"area": 2724900
},
{
"name": {
"common": "Kenya",
"official": "Republic of Kenya",
"native": {
"common": "Kenya",
"official": "Republic of Kenya"
}
},
"tld": [".ke"],
"cca2": "KE",
"ccn3": "404",
"cca3": "KEN",
"currency": ["KES"],
"callingCode": ["254"],
"capital": "Nairobi",
"altSpellings": ["KE", "Republic of Kenya", "Jamhuri ya Kenya"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "swa",
"languages": {
"eng": "English",
"swa": "Swahili"
},
"translations": {
"deu": "Kenia",
"fra": "Kenya",
"hrv": "Kenija",
"ita": "Kenya",
"jpn": "\u30b1\u30cb\u30a2",
"nld": "Kenia",
"rus": "\u041a\u0435\u043d\u0438\u044f",
"spa": "Kenia"
},
"latlng": [1, 38],
"demonym": "Kenyan",
"borders": ["ETH", "SOM", "SSD", "TZA", "UGA"],
"area": 580367
},
{
"name": {
"common": "Kiribati",
"official": "Independent and Sovereign Republic of Kiribati",
"native": {
"common": "Kiribati",
"official": "Independent and Sovereign Republic of Kiribati"
}
},
"tld": [".ki"],
"cca2": "KI",
"ccn3": "296",
"cca3": "KIR",
"currency": ["AUD"],
"callingCode": ["686"],
"capital": "South Tarawa",
"altSpellings": ["KI", "Republic of Kiribati", "Ribaberiki Kiribati"],
"relevance": "0",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"gil": "Gilbertese"
},
"translations": {
"deu": "Kiribati",
"fra": "Kiribati",
"hrv": "Kiribati",
"ita": "Kiribati",
"jpn": "\u30ad\u30ea\u30d0\u30b9",
"nld": "Kiribati",
"rus": "\u041a\u0438\u0440\u0438\u0431\u0430\u0442\u0438",
"spa": "Kiribati"
},
"latlng": [1.41666666, 173],
"demonym": "I-Kiribati",
"borders": [],
"area": 811
},
{
"name": {
"common": "Kuwait",
"official": "State of Kuwait",
"native": {
"common": "\u0627\u0644\u0643\u0648\u064a\u062a",
"official": "\u062f\u0648\u0644\u0629 \u0627\u0644\u0643\u0648\u064a\u062a"
}
},
"tld": [".kw"],
"cca2": "KW",
"ccn3": "414",
"cca3": "KWT",
"currency": ["KWD"],
"callingCode": ["965"],
"capital": "Kuwait City",
"altSpellings": ["KW", "State of Kuwait", "Dawlat al-Kuwait"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Kuwait",
"fra": "Kowe\u00eft",
"hrv": "Kuvajt",
"ita": "Kuwait",
"jpn": "\u30af\u30a6\u30a7\u30fc\u30c8",
"nld": "Koeweit",
"rus": "\u041a\u0443\u0432\u0435\u0439\u0442",
"spa": "Kuwait"
},
"latlng": [29.5, 45.75],
"demonym": "Kuwaiti",
"borders": ["IRN", "SAU"],
"area": 17818
},
{
"name": {
"common": "Kyrgyzstan",
"official": "Kyrgyz Republic",
"native": {
"common": "\u041a\u044b\u0440\u0433\u044b\u0437\u0441\u0442\u0430\u043d",
"official": "\u041a\u044b\u0440\u0433\u044b\u0437 \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430\u0441\u044b"
}
},
"tld": [".kg"],
"cca2": "KG",
"ccn3": "417",
"cca3": "KGZ",
"currency": ["KGS"],
"callingCode": ["996"],
"capital": "Bishkek",
"altSpellings": ["KG", "\u041a\u0438\u0440\u0433\u0438\u0437\u0438\u044f", "Kyrgyz Republic", "\u041a\u044b\u0440\u0433\u044b\u0437 \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430\u0441\u044b", "Kyrgyz Respublikasy"],
"relevance": "0",
"region": "Asia",
"subregion": "Central Asia",
"nativeLanguage": "kir",
"languages": {
"kir": "Kyrgyz",
"rus": "Russian"
},
"translations": {
"deu": "Kirgisistan",
"fra": "Kirghizistan",
"hrv": "Kirgistan",
"ita": "Kirghizistan",
"jpn": "\u30ad\u30eb\u30ae\u30b9",
"nld": "Kirgizi\u00eb",
"rus": "\u041a\u0438\u0440\u0433\u0438\u0437\u0438\u044f",
"spa": "Kirguizist\u00e1n"
},
"latlng": [41, 75],
"demonym": "Kirghiz",
"borders": ["CHN", "KAZ", "TJK", "UZB"],
"area": 199951
},
{
"name": {
"common": "Laos",
"official": "Lao People's Democratic Republic",
"native": {
"common": "\u0eaa\u0e9b\u0e9b\u0ea5\u0eb2\u0ea7",
"official": "\u0eaa\u0eb2\u0e97\u0eb2\u0ea5\u0eb0\u0e99\u0eb0 \u0e8a\u0eb2\u0e97\u0eb4\u0e9b\u0eb0\u0ec4\u0e95 \u0e84\u0ebb\u0e99\u0ea5\u0eb2\u0ea7 \u0e82\u0ead\u0e87"
}
},
"tld": [".la"],
"cca2": "LA",
"ccn3": "418",
"cca3": "LAO",
"currency": ["LAK"],
"callingCode": ["856"],
"capital": "Vientiane",
"altSpellings": ["LA", "Lao", "Lao People's Democratic Republic", "Sathalanalat Paxathipatai Paxaxon Lao"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "lao",
"languages": {
"lao": "Lao"
},
"translations": {
"deu": "Laos",
"fra": "Laos",
"hrv": "Laos",
"ita": "Laos",
"jpn": "\u30e9\u30aa\u30b9\u4eba\u6c11\u6c11\u4e3b\u5171\u548c\u56fd",
"nld": "Laos",
"rus": "\u041b\u0430\u043e\u0441",
"spa": "Laos"
},
"latlng": [18, 105],
"demonym": "Laotian",
"borders": ["MMR", "KHM", "CHN", "THA", "VNM"],
"area": 236800
},
{
"name": {
"common": "Latvia",
"official": "Republic of Latvia",
"native": {
"common": "Latvija",
"official": "Latvijas Republikas"
}
},
"tld": [".lv"],
"cca2": "LV",
"ccn3": "428",
"cca3": "LVA",
"currency": ["EUR"],
"callingCode": ["371"],
"capital": "Riga",
"altSpellings": ["LV", "Republic of Latvia", "Latvijas Republika"],
"relevance": "0",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "lav",
"languages": {
"lav": "Latvian"
},
"translations": {
"deu": "Lettland",
"fra": "Lettonie",
"hrv": "Latvija",
"ita": "Lettonia",
"jpn": "\u30e9\u30c8\u30d3\u30a2",
"nld": "Letland",
"rus": "\u041b\u0430\u0442\u0432\u0438\u044f",
"spa": "Letonia"
},
"latlng": [57, 25],
"demonym": "Latvian",
"borders": ["BLR", "EST", "LTU", "RUS"],
"area": 64559
},
{
"name": {
"common": "Lebanon",
"official": "Lebanese Republic",
"native": {
"common": "\u0644\u0628\u0646\u0627\u0646",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u0644\u0628\u0646\u0627\u0646\u064a\u0629"
}
},
"tld": [".lb"],
"cca2": "LB",
"ccn3": "422",
"cca3": "LBN",
"currency": ["LBP"],
"callingCode": ["961"],
"capital": "Beirut",
"altSpellings": ["LB", "Lebanese Republic", "Al-Jumh\u016br\u012byah Al-Libn\u0101n\u012byah"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"fra": "French"
},
"translations": {
"deu": "Libanon",
"fra": "Liban",
"hrv": "Libanon",
"ita": "Libano",
"jpn": "\u30ec\u30d0\u30ce\u30f3",
"nld": "Libanon",
"rus": "\u041b\u0438\u0432\u0430\u043d",
"spa": "L\u00edbano"
},
"latlng": [33.83333333, 35.83333333],
"demonym": "Lebanese",
"borders": ["ISR", "SYR"],
"area": 10452
},
{
"name": {
"common": "Lesotho",
"official": "Kingdom of Lesotho",
"native": {
"common": "Lesotho",
"official": "Kingdom of Lesotho"
}
},
"tld": [".ls"],
"cca2": "LS",
"ccn3": "426",
"cca3": "LSO",
"currency": ["LSL", "ZAR"],
"callingCode": ["266"],
"capital": "Maseru",
"altSpellings": ["LS", "Kingdom of Lesotho", "Muso oa Lesotho"],
"relevance": "0",
"region": "Africa",
"subregion": "Southern Africa",
"nativeLanguage": "sot",
"languages": {
"eng": "English",
"sot": "Sotho"
},
"translations": {
"deu": "Lesotho",
"fra": "Lesotho",
"hrv": "Lesoto",
"ita": "Lesotho",
"jpn": "\u30ec\u30bd\u30c8",
"nld": "Lesotho",
"rus": "\u041b\u0435\u0441\u043e\u0442\u043e",
"spa": "Lesotho"
},
"latlng": [-29.5, 28.5],
"demonym": "Mosotho",
"borders": ["ZAF"],
"area": 30355
},
{
"name": {
"common": "Liberia",
"official": "Republic of Liberia",
"native": {
"common": "Liberia",
"official": "Republic of Liberia"
}
},
"tld": [".lr"],
"cca2": "LR",
"ccn3": "430",
"cca3": "LBR",
"currency": ["LRD"],
"callingCode": ["231"],
"capital": "Monrovia",
"altSpellings": ["LR", "Republic of Liberia"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Liberia",
"fra": "Liberia",
"hrv": "Liberija",
"ita": "Liberia",
"jpn": "\u30ea\u30d9\u30ea\u30a2",
"nld": "Liberia",
"rus": "\u041b\u0438\u0431\u0435\u0440\u0438\u044f",
"spa": "Liberia"
},
"latlng": [6.5, -9.5],
"demonym": "Liberian",
"borders": ["GIN", "CIV", "SLE"],
"area": 111369
},
{
"name": {
"common": "Libya",
"official": "State of Libya",
"native": {
"common": "\u200f\u0644\u064a\u0628\u064a\u0627",
"official": "\u0627\u0644\u062f\u0648\u0644\u0629 \u0644\u064a\u0628\u064a\u0627"
}
},
"tld": [".ly"],
"cca2": "LY",
"ccn3": "434",
"cca3": "LBY",
"currency": ["LYD"],
"callingCode": ["218"],
"capital": "Tripoli",
"altSpellings": ["LY", "State of Libya", "Dawlat Libya"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Libyen",
"fra": "Libye",
"hrv": "Libija",
"ita": "Libia",
"jpn": "\u30ea\u30d3\u30a2",
"nld": "Libi\u00eb",
"rus": "\u041b\u0438\u0432\u0438\u044f",
"spa": "Libia"
},
"latlng": [25, 17],
"demonym": "Libyan",
"borders": ["DZA", "TCD", "EGY", "NER", "SDN", "TUN"],
"area": 1759540
},
{
"name": {
"common": "Liechtenstein",
"official": "Principality of Liechtenstein",
"native": {
"common": "Liechtenstein",
"official": "F\u00fcrstentum Liechtenstein"
}
},
"tld": [".li"],
"cca2": "LI",
"ccn3": "438",
"cca3": "LIE",
"currency": ["CHF"],
"callingCode": ["423"],
"capital": "Vaduz",
"altSpellings": ["LI", "Principality of Liechtenstein", "F\u00fcrstentum Liechtenstein"],
"relevance": "0",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "deu",
"languages": {
"deu": "German"
},
"translations": {
"deu": "Liechtenstein",
"fra": "Liechtenstein",
"hrv": "Lihten\u0161tajn",
"ita": "Liechtenstein",
"jpn": "\u30ea\u30d2\u30c6\u30f3\u30b7\u30e5\u30bf\u30a4\u30f3",
"nld": "Liechtenstein",
"rus": "\u041b\u0438\u0445\u0442\u0435\u043d\u0448\u0442\u0435\u0439\u043d",
"spa": "Liechtenstein"
},
"latlng": [47.26666666, 9.53333333],
"demonym": "Liechtensteiner",
"borders": ["AUT", "CHE"],
"area": 160
},
{
"name": {
"common": "Lithuania",
"official": "Republic of Lithuania",
"native": {
"common": "Lietuva",
"official": "Lietuvos Respublikos"
}
},
"tld": [".lt"],
"cca2": "LT",
"ccn3": "440",
"cca3": "LTU",
"currency": ["LTL"],
"callingCode": ["370"],
"capital": "Vilnius",
"altSpellings": ["LT", "Republic of Lithuania", "Lietuvos Respublika"],
"relevance": "0",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "lit",
"languages": {
"lit": "Lithuanian"
},
"translations": {
"deu": "Litauen",
"fra": "Lituanie",
"hrv": "Litva",
"ita": "Lituania",
"jpn": "\u30ea\u30c8\u30a2\u30cb\u30a2",
"nld": "Litouwen",
"rus": "\u041b\u0438\u0442\u0432\u0430",
"spa": "Lituania"
},
"latlng": [56, 24],
"demonym": "Lithuanian",
"borders": ["BLR", "LVA", "POL", "RUS"],
"area": 65300
},
{
"name": {
"common": "Luxembourg",
"official": "Grand Duchy of Luxembourg",
"native": {
"common": "Luxembourg",
"official": "Grand-Duch\u00e9 de Luxembourg"
}
},
"tld": [".lu"],
"cca2": "LU",
"ccn3": "442",
"cca3": "LUX",
"currency": ["EUR"],
"callingCode": ["352"],
"capital": "Luxembourg",
"altSpellings": ["LU", "Grand Duchy of Luxembourg", "Grand-Duch\u00e9 de Luxembourg", "Gro\u00dfherzogtum Luxemburg", "Groussherzogtum L\u00ebtzebuerg"],
"relevance": "0",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "fra",
"languages": {
"deu": "German",
"fra": "French",
"ltz": "Luxembourgish"
},
"translations": {
"deu": "Luxemburg",
"fra": "Luxembourg",
"hrv": "Luksemburg",
"ita": "Lussemburgo",
"jpn": "\u30eb\u30af\u30bb\u30f3\u30d6\u30eb\u30af",
"nld": "Luxemburg",
"rus": "\u041b\u044e\u043a\u0441\u0435\u043c\u0431\u0443\u0440\u0433",
"spa": "Luxemburgo"
},
"latlng": [49.75, 6.16666666],
"demonym": "Luxembourger",
"borders": ["BEL", "FRA", "DEU"],
"area": 2586
},
{
"name": {
"common": "Macau",
"official": "Macao Special Administrative Region of the People's Republic of China",
"native": {
"common": "\u6fb3\u9580",
"official": "\u6fb3\u95e8\u7279\u522b\u884c\u653f\u533a\u4e2d\u56fd\u4eba\u6c11\u5171\u548c\u56fd"
}
},
"tld": [".mo"],
"cca2": "MO",
"ccn3": "446",
"cca3": "MAC",
"currency": ["MOP"],
"callingCode": ["853"],
"capital": "",
"altSpellings": ["MO", "\u6fb3\u95e8", "Macao Special Administrative Region of the People's Republic of China", "\u4e2d\u83ef\u4eba\u6c11\u5171\u548c\u570b\u6fb3\u9580\u7279\u5225\u884c\u653f\u5340", "Regi\u00e3o Administrativa Especial de Macau da Rep\u00fablica Popular da China"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "zho",
"languages": {
"por": "Portuguese",
"zho": "Chinese"
},
"translations": {
"deu": "Macao",
"fra": "Macao",
"hrv": "Makao",
"ita": "Macao",
"jpn": "\u30de\u30ab\u30aa",
"nld": "Macao",
"rus": "\u041c\u0430\u043a\u0430\u043e",
"spa": "Macao"
},
"latlng": [22.16666666, 113.55],
"demonym": "Chinese",
"borders": ["CHN"],
"area": 30
},
{
"name": {
"common": "Macedonia",
"official": "Republic of Macedonia",
"native": {
"common": "\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0438\u0458\u0430",
"official": "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0438\u0458\u0430"
}
},
"tld": [".mk"],
"cca2": "MK",
"ccn3": "807",
"cca3": "MKD",
"currency": ["MKD"],
"callingCode": ["389"],
"capital": "Skopje",
"altSpellings": ["MK", "Republic of Macedonia", "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0438\u0458\u0430"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "mkd",
"languages": {
"mkd": "Macedonian"
},
"translations": {
"deu": "Mazedonien",
"fra": "Mac\u00e9doine",
"hrv": "Makedonija",
"ita": "Macedonia",
"jpn": "\u30de\u30b1\u30c9\u30cb\u30a2\u65e7\u30e6\u30fc\u30b4\u30b9\u30e9\u30d3\u30a2\u5171\u548c\u56fd",
"nld": "Macedoni\u00eb",
"rus": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0438\u044f",
"spa": "Macedonia"
},
"latlng": [41.83333333, 22],
"demonym": "Macedonian",
"borders": ["ALB", "BGR", "GRC", "KOS", "SRB"],
"area": 25713
},
{
"name": {
"common": "Madagascar",
"official": "Republic of Madagascar",
"native": {
"common": "Madagasikara",
"official": "R\u00e9publique de Madagascar"
}
},
"tld": [".mg"],
"cca2": "MG",
"ccn3": "450",
"cca3": "MDG",
"currency": ["MGA"],
"callingCode": ["261"],
"capital": "Antananarivo",
"altSpellings": ["MG", "Republic of Madagascar", "Repoblikan'i Madagasikara", "R\u00e9publique de Madagascar"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French",
"mlg": "Malagasy"
},
"translations": {
"deu": "Madagaskar",
"fra": "Madagascar",
"hrv": "Madagaskar",
"ita": "Madagascar",
"jpn": "\u30de\u30c0\u30ac\u30b9\u30ab\u30eb",
"nld": "Madagaskar",
"rus": "\u041c\u0430\u0434\u0430\u0433\u0430\u0441\u043a\u0430\u0440",
"spa": "Madagascar"
},
"latlng": [-20, 47],
"demonym": "Malagasy",
"borders": [],
"area": 587041
},
{
"name": {
"common": "Malawi",
"official": "Republic of Malawi",
"native": {
"common": "Mala\u0175i",
"official": "Chalo cha Malawi, Dziko la Mala\u0175i"
}
},
"tld": [".mw"],
"cca2": "MW",
"ccn3": "454",
"cca3": "MWI",
"currency": ["MWK"],
"callingCode": ["265"],
"capital": "Lilongwe",
"altSpellings": ["MW", "Republic of Malawi"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "nya",
"languages": {
"eng": "English",
"nya": "Chewa"
},
"translations": {
"deu": "Malawi",
"fra": "Malawi",
"hrv": "Malavi",
"ita": "Malawi",
"jpn": "\u30de\u30e9\u30a6\u30a4",
"nld": "Malawi",
"rus": "\u041c\u0430\u043b\u0430\u0432\u0438",
"spa": "Malawi"
},
"latlng": [-13.5, 34],
"demonym": "Malawian",
"borders": ["MOZ", "TZA", "ZMB"],
"area": 118484
},
{
"name": {
"common": "Malaysia",
"official": "Malaysia",
"native": {
"common": "\u0645\u0644\u064a\u0633\u064a\u0627",
"official": "\u0645\u0644\u064a\u0633\u064a\u0627"
}
},
"tld": [".my"],
"cca2": "MY",
"ccn3": "458",
"cca3": "MYS",
"currency": ["MYR"],
"callingCode": ["60"],
"capital": "Kuala Lumpur",
"altSpellings": ["MY"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "msa",
"languages": {
"eng": "English",
"msa": "Malay"
},
"translations": {
"deu": "Malaysia",
"fra": "Malaisie",
"hrv": "Malezija",
"ita": "Malesia",
"jpn": "\u30de\u30ec\u30fc\u30b7\u30a2",
"nld": "Maleisi\u00eb",
"rus": "\u041c\u0430\u043b\u0430\u0439\u0437\u0438\u044f",
"spa": "Malasia"
},
"latlng": [2.5, 112.5],
"demonym": "Malaysian",
"borders": ["BRN", "IDN", "THA"],
"area": 330803
},
{
"name": {
"common": "Maldives",
"official": "Republic of the Maldives",
"native": {
"common": "\u078b\u07a8\u0788\u07ac\u0780\u07a8\u0783\u07a7\u0787\u07b0\u0796\u07ad\u078e\u07ac",
"official": "\u078b\u07a8\u0788\u07ac\u0780\u07a8\u0783\u07a7\u0787\u07b0\u0796\u07ad\u078e\u07ac \u0796\u07aa\u0789\u07b0\u0780\u07ab\u0783\u07a8\u0787\u07b0\u0794\u07a7"
}
},
"tld": [".mv"],
"cca2": "MV",
"ccn3": "462",
"cca3": "MDV",
"currency": ["MVR"],
"callingCode": ["960"],
"capital": "Mal\u00e9",
"altSpellings": ["MV", "Maldive Islands", "Republic of the Maldives", "Dhivehi Raajjeyge Jumhooriyya"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "div",
"languages": {
"div": "Maldivian"
},
"translations": {
"deu": "Malediven",
"fra": "Maldives",
"hrv": "Maldivi",
"ita": "Maldive",
"jpn": "\u30e2\u30eb\u30c7\u30a3\u30d6",
"nld": "Maldiven",
"rus": "\u041c\u0430\u043b\u044c\u0434\u0438\u0432\u044b",
"spa": "Maldivas"
},
"latlng": [3.25, 73],
"demonym": "Maldivan",
"borders": [],
"area": 300
},
{
"name": {
"common": "Mali",
"official": "Republic of Mali",
"native": {
"common": "Mali",
"official": "R\u00e9publique du Mali"
}
},
"tld": [".ml"],
"cca2": "ML",
"ccn3": "466",
"cca3": "MLI",
"currency": ["XOF"],
"callingCode": ["223"],
"capital": "Bamako",
"altSpellings": ["ML", "Republic of Mali", "R\u00e9publique du Mali"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Mali",
"fra": "Mali",
"hrv": "Mali",
"ita": "Mali",
"jpn": "\u30de\u30ea",
"nld": "Mali",
"rus": "\u041c\u0430\u043b\u0438",
"spa": "Mali"
},
"latlng": [17, -4],
"demonym": "Malian",
"borders": ["DZA", "BFA", "GIN", "CIV", "MRT", "NER", "SEN"],
"area": 1240192
},
{
"name": {
"common": "Malta",
"official": "Republic of Malta",
"native": {
"common": "Malta",
"official": "Repubblika ta ' Malta"
}
},
"tld": [".mt"],
"cca2": "MT",
"ccn3": "470",
"cca3": "MLT",
"currency": ["EUR"],
"callingCode": ["356"],
"capital": "Valletta",
"altSpellings": ["MT", "Republic of Malta", "Repubblika ta' Malta"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "mlt",
"languages": {
"eng": "English",
"mlt": "Maltese"
},
"translations": {
"deu": "Malta",
"fra": "Malte",
"hrv": "Malta",
"ita": "Malta",
"jpn": "\u30de\u30eb\u30bf",
"nld": "Malta",
"rus": "\u041c\u0430\u043b\u044c\u0442\u0430",
"spa": "Malta"
},
"latlng": [35.83333333, 14.58333333],
"demonym": "Maltese",
"borders": [],
"area": 316
},
{
"name": {
"common": "Marshall Islands",
"official": "Republic of the Marshall Islands",
"native": {
"common": "M\u0327aje\u013c",
"official": "Republic of the Marshall Islands"
}
},
"tld": [".mh"],
"cca2": "MH",
"ccn3": "584",
"cca3": "MHL",
"currency": ["USD"],
"callingCode": ["692"],
"capital": "Majuro",
"altSpellings": ["MH", "Republic of the Marshall Islands", "Aolep\u0101n Aor\u014dkin M\u0327aje\u013c"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "mah",
"languages": {
"eng": "English",
"mah": "Marshallese"
},
"translations": {
"deu": "Marshallinseln",
"fra": "\u00celes Marshall",
"hrv": "Mar\u0161alovi Otoci",
"ita": "Isole Marshall",
"jpn": "\u30de\u30fc\u30b7\u30e3\u30eb\u8af8\u5cf6",
"nld": "Marshalleilanden",
"rus": "\u041c\u0430\u0440\u0448\u0430\u043b\u043b\u043e\u0432\u044b \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Marshall"
},
"latlng": [9, 168],
"demonym": "Marshallese",
"borders": [],
"area": 181
},
{
"name": {
"common": "Martinique",
"official": "Martinique",
"native": {
"common": "Martinique",
"official": "Martinique"
}
},
"tld": [".mq"],
"cca2": "MQ",
"ccn3": "474",
"cca3": "MTQ",
"currency": ["EUR"],
"callingCode": ["596"],
"capital": "Fort-de-France",
"altSpellings": ["MQ"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Martinique",
"fra": "Martinique",
"hrv": "Martinique",
"ita": "Martinica",
"jpn": "\u30de\u30eb\u30c6\u30a3\u30cb\u30fc\u30af",
"nld": "Martinique",
"rus": "\u041c\u0430\u0440\u0442\u0438\u043d\u0438\u043a\u0430",
"spa": "Martinica"
},
"latlng": [14.666667, -61],
"demonym": "French",
"borders": [],
"area": 1128
},
{
"name": {
"common": "Mauritania",
"official": "Islamic Republic of Mauritania",
"native": {
"common": "\u0645\u0648\u0631\u064a\u062a\u0627\u0646\u064a\u0627",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u0625\u0633\u0644\u0627\u0645\u064a\u0629 \u0627\u0644\u0645\u0648\u0631\u064a\u062a\u0627\u0646\u064a\u0629"
}
},
"tld": [".mr"],
"cca2": "MR",
"ccn3": "478",
"cca3": "MRT",
"currency": ["MRO"],
"callingCode": ["222"],
"capital": "Nouakchott",
"altSpellings": ["MR", "Islamic Republic of Mauritania", "al-Jumh\u016briyyah al-\u02beIsl\u0101miyyah al-M\u016br\u012bt\u0101niyyah"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Mauretanien",
"fra": "Mauritanie",
"hrv": "Mauritanija",
"ita": "Mauritania",
"jpn": "\u30e2\u30fc\u30ea\u30bf\u30cb\u30a2",
"nld": "Mauritani\u00eb",
"rus": "\u041c\u0430\u0432\u0440\u0438\u0442\u0430\u043d\u0438\u044f",
"spa": "Mauritania"
},
"latlng": [20, -12],
"demonym": "Mauritanian",
"borders": ["DZA", "MLI", "SEN", "ESH"],
"area": 1030700
},
{
"name": {
"common": "Mauritius",
"official": "Republic of Mauritius",
"native": {
"common": "Maurice",
"official": "Republic of Mauritius"
}
},
"tld": [".mu"],
"cca2": "MU",
"ccn3": "480",
"cca3": "MUS",
"currency": ["MUR"],
"callingCode": ["230"],
"capital": "Port Louis",
"altSpellings": ["MU", "Republic of Mauritius", "R\u00e9publique de Maurice"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "mfe",
"languages": {
"eng": "English",
"fra": "French",
"mfe": "Mauritian Creole"
},
"translations": {
"deu": "Mauritius",
"fra": "\u00cele Maurice",
"hrv": "Mauricijus",
"ita": "Mauritius",
"jpn": "\u30e2\u30fc\u30ea\u30b7\u30e3\u30b9",
"nld": "Mauritius",
"rus": "\u041c\u0430\u0432\u0440\u0438\u043a\u0438\u0439",
"spa": "Mauricio"
},
"latlng": [-20.28333333, 57.55],
"demonym": "Mauritian",
"borders": [],
"area": 2040
},
{
"name": {
"common": "Mayotte",
"official": "Department of Mayotte",
"native": {
"common": "Mayotte",
"official": "D\u00e9partement de Mayotte"
}
},
"tld": [".yt"],
"cca2": "YT",
"ccn3": "175",
"cca3": "MYT",
"currency": ["EUR"],
"callingCode": ["262"],
"capital": "Mamoudzou",
"altSpellings": ["YT", "Department of Mayotte", "D\u00e9partement de Mayotte"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Mayotte",
"fra": "Mayotte",
"hrv": "Mayotte",
"ita": "Mayotte",
"jpn": "\u30de\u30e8\u30c3\u30c8",
"nld": "Mayotte",
"rus": "\u041c\u0430\u0439\u043e\u0442\u0442\u0430",
"spa": "Mayotte"
},
"latlng": [-12.83333333, 45.16666666],
"demonym": "Mahoran",
"borders": [],
"area": 374
},
{
"name": {
"common": "Mexico",
"official": "United Mexican States",
"native": {
"common": "M\u00e9xico",
"official": "Estados Unidos Mexicanos"
}
},
"tld": [".mx"],
"cca2": "MX",
"ccn3": "484",
"cca3": "MEX",
"currency": ["MXN"],
"callingCode": ["52"],
"capital": "Mexico City",
"altSpellings": ["MX", "Mexicanos", "United Mexican States", "Estados Unidos Mexicanos"],
"relevance": "1.5",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Mexiko",
"fra": "Mexique",
"hrv": "Meksiko",
"ita": "Messico",
"jpn": "\u30e1\u30ad\u30b7\u30b3",
"nld": "Mexico",
"rus": "\u041c\u0435\u043a\u0441\u0438\u043a\u0430",
"spa": "M\u00e9xico"
},
"latlng": [23, -102],
"demonym": "Mexican",
"borders": ["BLZ", "GTM", "USA"],
"area": 1964375
},
{
"name": {
"common": "Micronesia",
"official": "Federated States of Micronesia",
"native": {
"common": "Micronesia",
"official": "Federated States of Micronesia"
}
},
"tld": [".fm"],
"cca2": "FM",
"ccn3": "583",
"cca3": "FSM",
"currency": ["USD"],
"callingCode": ["691"],
"capital": "Palikir",
"altSpellings": ["FM", "Federated States of Micronesia"],
"relevance": "0",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Mikronesien",
"fra": "Micron\u00e9sie",
"hrv": "Mikronezija",
"ita": "Micronesia",
"jpn": "\u30df\u30af\u30ed\u30cd\u30b7\u30a2\u9023\u90a6",
"nld": "Micronesi\u00eb",
"rus": "\u0424\u0435\u0434\u0435\u0440\u0430\u0442\u0438\u0432\u043d\u044b\u0435 \u0428\u0442\u0430\u0442\u044b \u041c\u0438\u043a\u0440\u043e\u043d\u0435\u0437\u0438\u0438",
"spa": "Micronesia"
},
"latlng": [6.91666666, 158.25],
"demonym": "Micronesian",
"borders": [],
"area": 702
},
{
"name": {
"common": "Moldova",
"official": "Republic of Moldova",
"native": {
"common": "Moldova",
"official": "Republica Moldova"
}
},
"tld": [".md"],
"cca2": "MD",
"ccn3": "498",
"cca3": "MDA",
"currency": ["MDL"],
"callingCode": ["373"],
"capital": "Chi\u0219in\u0103u",
"altSpellings": ["MD", "Republic of Moldova", "Republica Moldova"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "ron",
"languages": {
"ron": "Moldavian"
},
"translations": {
"deu": "Moldawie",
"fra": "Moldavie",
"hrv": "Moldova",
"ita": "Moldavia",
"jpn": "\u30e2\u30eb\u30c9\u30d0\u5171\u548c\u56fd",
"nld": "Moldavi\u00eb",
"rus": "\u041c\u043e\u043b\u0434\u0430\u0432\u0438\u044f",
"spa": "Moldavia"
},
"latlng": [47, 29],
"demonym": "Moldovan",
"borders": ["ROU", "UKR"],
"area": 33846
},
{
"name": {
"common": "Monaco",
"official": "Principality of Monaco",
"native": {
"common": "Monaco",
"official": "Principaut\u00e9 de Monaco"
}
},
"tld": [".mc"],
"cca2": "MC",
"ccn3": "492",
"cca3": "MCO",
"currency": ["EUR"],
"callingCode": ["377"],
"capital": "Monaco",
"altSpellings": ["MC", "Principality of Monaco", "Principaut\u00e9 de Monaco"],
"relevance": "0",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Monaco",
"fra": "Monaco",
"hrv": "Monako",
"ita": "Principato di Monaco",
"jpn": "\u30e2\u30ca\u30b3",
"nld": "Monaco",
"rus": "\u041c\u043e\u043d\u0430\u043a\u043e",
"spa": "M\u00f3naco"
},
"latlng": [43.73333333, 7.4],
"demonym": "Monegasque",
"borders": ["FRA"],
"area": 2.02
},
{
"name": {
"common": "Mongolia",
"official": "Mongolia",
"native": {
"common": "\u041c\u043e\u043d\u0433\u043e\u043b \u0443\u043b\u0441",
"official": "\u041c\u043e\u043d\u0433\u043e\u043b \u0443\u043b\u0441"
}
},
"tld": [".mn"],
"cca2": "MN",
"ccn3": "496",
"cca3": "MNG",
"currency": ["MNT"],
"callingCode": ["976"],
"capital": "Ulan Bator",
"altSpellings": ["MN"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "mon",
"languages": {
"mon": "Mongolian"
},
"translations": {
"deu": "Mongolei",
"fra": "Mongolie",
"hrv": "Mongolija",
"ita": "Mongolia",
"jpn": "\u30e2\u30f3\u30b4\u30eb",
"nld": "Mongoli\u00eb",
"rus": "\u041c\u043e\u043d\u0433\u043e\u043b\u0438\u044f",
"spa": "Mongolia"
},
"latlng": [46, 105],
"demonym": "Mongolian",
"borders": ["CHN", "RUS"],
"area": 1564110
},
{
"name": {
"common": "Montenegro",
"official": "Montenegro",
"native": {
"common": "\u0426\u0440\u043d\u0430 \u0413\u043e\u0440\u0430",
"official": "\u0426\u0440\u043d\u0430 \u0413\u043e\u0440\u0430"
}
},
"tld": [".me"],
"cca2": "ME",
"ccn3": "499",
"cca3": "MNE",
"currency": ["EUR"],
"callingCode": ["382"],
"capital": "Podgorica",
"altSpellings": ["ME", "Crna Gora"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "srp",
"languages": {
"srp": "Montenegrin"
},
"translations": {
"deu": "Montenegro",
"fra": "Mont\u00e9n\u00e9gro",
"hrv": "Crna Gora",
"ita": "Montenegro",
"jpn": "\u30e2\u30f3\u30c6\u30cd\u30b0\u30ed",
"nld": "Montenegro",
"rus": "\u0427\u0435\u0440\u043d\u043e\u0433\u043e\u0440\u0438\u044f",
"spa": "Montenegro"
},
"latlng": [42.5, 19.3],
"demonym": "Montenegrin",
"borders": ["ALB", "BIH", "HRV", "KOS", "SRB"],
"area": 13812
},
{
"name": {
"common": "Montserrat",
"official": "Montserrat",
"native": {
"common": "Montserrat",
"official": "Montserrat"
}
},
"tld": [".ms"],
"cca2": "MS",
"ccn3": "500",
"cca3": "MSR",
"currency": ["XCD"],
"callingCode": ["1664"],
"capital": "Plymouth",
"altSpellings": ["MS"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Montserrat",
"fra": "Montserrat",
"hrv": "Montserrat",
"ita": "Montserrat",
"jpn": "\u30e2\u30f3\u30c8\u30bb\u30e9\u30c8",
"nld": "Montserrat",
"rus": "\u041c\u043e\u043d\u0442\u0441\u0435\u0440\u0440\u0430\u0442",
"spa": "Montserrat"
},
"latlng": [16.75, -62.2],
"demonym": "Montserratian",
"borders": [],
"area": 102
},
{
"name": {
"common": "Morocco",
"official": "Kingdom of Morocco",
"native": {
"common": "\u0627\u0644\u0645\u063a\u0631\u0628",
"official": "\u0627\u0644\u0645\u0645\u0644\u0643\u0629 \u0627\u0644\u0645\u063a\u0631\u0628\u064a\u0629"
}
},
"tld": [".ma", "\u0627\u0644\u0645\u063a\u0631\u0628."],
"cca2": "MA",
"ccn3": "504",
"cca3": "MAR",
"currency": ["MAD"],
"callingCode": ["212"],
"capital": "Rabat",
"altSpellings": ["MA", "Kingdom of Morocco", "Al-Mamlakah al-Ma\u0121ribiyah"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"ber": "Berber"
},
"translations": {
"deu": "Marokko",
"fra": "Maroc",
"hrv": "Maroko",
"ita": "Marocco",
"jpn": "\u30e2\u30ed\u30c3\u30b3",
"nld": "Marokko",
"rus": "\u041c\u0430\u0440\u043e\u043a\u043a\u043e",
"spa": "Marruecos"
},
"latlng": [32, -5],
"demonym": "Moroccan",
"borders": ["DZA", "ESH", "ESP"],
"area": 446550
},
{
"name": {
"common": "Mozambique",
"official": "Republic of Mozambique",
"native": {
"common": "Mo\u00e7ambique",
"official": "Rep\u00fablica de Mo\u00e7ambique"
}
},
"tld": [".mz"],
"cca2": "MZ",
"ccn3": "508",
"cca3": "MOZ",
"currency": ["MZN"],
"callingCode": ["258"],
"capital": "Maputo",
"altSpellings": ["MZ", "Republic of Mozambique", "Rep\u00fablica de Mo\u00e7ambique"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"deu": "Mosambik",
"fra": "Mozambique",
"hrv": "Mozambik",
"ita": "Mozambico",
"jpn": "\u30e2\u30b6\u30f3\u30d3\u30fc\u30af",
"nld": "Mozambique",
"rus": "\u041c\u043e\u0437\u0430\u043c\u0431\u0438\u043a",
"spa": "Mozambique"
},
"latlng": [-18.25, 35],
"demonym": "Mozambican",
"borders": ["MWI", "ZAF", "SWZ", "TZA", "ZMB", "ZWE"],
"area": 801590
},
{
"name": {
"common": "Myanmar",
"official": "Republic of the Union of Myanmar",
"native": {
"common": "\u1019\u103c\u1014\u103a\u1019\u102c",
"official": "\u1015\u103c\u100a\u103a\u1011\u1031\u102c\u1004\u103a\u1005\u102f \u101e\u1019\u1039\u1019\u1010 \u1019\u103c\u1014\u103a\u1019\u102c\u1014\u102d\u102f\u1004\u103a\u1004\u1036\u1010\u1031\u102c\u103a"
}
},
"tld": [".mm"],
"cca2": "MM",
"ccn3": "104",
"cca3": "MMR",
"currency": ["MMK"],
"callingCode": ["95"],
"capital": "Naypyidaw",
"altSpellings": ["MM", "Burma", "Republic of the Union of Myanmar", "Pyidaunzu Thanm\u0103da My\u0103ma Nainngandaw"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "mya",
"languages": {
"mya": "Burmese"
},
"translations": {
"deu": "Myanmar",
"fra": "Myanmar",
"hrv": "Mijanmar",
"ita": "Birmania",
"jpn": "\u30df\u30e3\u30f3\u30de\u30fc",
"nld": "Myanmar",
"rus": "\u041c\u044c\u044f\u043d\u043c\u0430",
"spa": "Myanmar"
},
"latlng": [22, 98],
"demonym": "Myanmarian",
"borders": ["BGD", "CHN", "IND", "LAO", "THA"],
"area": 676578
},
{
"name": {
"common": "Namibia",
"official": "Republic of Namibia",
"native": {
"common": "Namibia",
"official": "Republic of Namibia"
}
},
"tld": [".na"],
"cca2": "NA",
"ccn3": "516",
"cca3": "NAM",
"currency": ["NAD", "ZAR"],
"callingCode": ["264"],
"capital": "Windhoek",
"altSpellings": ["NA", "Namibi\u00eb", "Republic of Namibia"],
"relevance": "0",
"region": "Africa",
"subregion": "Southern Africa",
"nativeLanguage": "afr",
"languages": {
"afr": "Afrikaans",
"deu": "German",
"eng": "English",
"her": "Herero",
"hgm": "Khoekhoe",
"kwn": "Kwangali",
"loz": "Lozi",
"ndo": "Ndonga",
"tsn": "Tswana"
},
"translations": {
"deu": "Namibia",
"fra": "Namibie",
"hrv": "Namibija",
"ita": "Namibia",
"jpn": "\u30ca\u30df\u30d3\u30a2",
"nld": "Namibi\u00eb",
"rus": "\u041d\u0430\u043c\u0438\u0431\u0438\u044f",
"spa": "Namibia"
},
"latlng": [-22, 17],
"demonym": "Namibian",
"borders": ["AGO", "BWA", "ZAF", "ZMB"],
"area": 825615
},
{
"name": {
"common": "Nauru",
"official": "Republic of Nauru",
"native": {
"common": "Nauru",
"official": "Republic of Nauru"
}
},
"tld": [".nr"],
"cca2": "NR",
"ccn3": "520",
"cca3": "NRU",
"currency": ["AUD"],
"callingCode": ["674"],
"capital": "Yaren",
"altSpellings": ["NR", "Naoero", "Pleasant Island", "Republic of Nauru", "Ripublik Naoero"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "nau",
"languages": {
"eng": "English",
"nau": "Nauru"
},
"translations": {
"deu": "Nauru",
"fra": "Nauru",
"hrv": "Nauru",
"ita": "Nauru",
"jpn": "\u30ca\u30a6\u30eb",
"nld": "Nauru",
"rus": "\u041d\u0430\u0443\u0440\u0443",
"spa": "Nauru"
},
"latlng": [-0.53333333, 166.91666666],
"demonym": "Nauruan",
"borders": [],
"area": 21
},
{
"name": {
"common": "Nepal",
"official": "Federal Democratic Republic of Nepal",
"native": {
"common": "\u0928\u092a\u0932",
"official": "\u0928\u0947\u092a\u093e\u0932 \u0938\u0902\u0918\u0940\u092f \u0932\u094b\u0915\u0924\u093e\u0928\u094d\u0924\u094d\u0930\u093f\u0915 \u0917\u0923\u0924\u0928\u094d\u0924\u094d\u0930"
}
},
"tld": [".np"],
"cca2": "NP",
"ccn3": "524",
"cca3": "NPL",
"currency": ["NPR"],
"callingCode": ["977"],
"capital": "Kathmandu",
"altSpellings": ["NP", "Federal Democratic Republic of Nepal", "Lokt\u0101ntrik Ganatantra Nep\u0101l"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "nep",
"languages": {
"nep": "Nepali"
},
"translations": {
"deu": "N\u00e9pal",
"fra": "N\u00e9pal",
"hrv": "Nepal",
"ita": "Nepal",
"jpn": "\u30cd\u30d1\u30fc\u30eb",
"nld": "Nepal",
"rus": "\u041d\u0435\u043f\u0430\u043b",
"spa": "Nepal"
},
"latlng": [28, 84],
"demonym": "Nepalese",
"borders": ["CHN", "IND"],
"area": 147181
},
{
"name": {
"common": "Netherlands",
"official": "Netherlands",
"native": {
"common": "Nederland",
"official": "Nederland"
}
},
"tld": [".nl"],
"cca2": "NL",
"ccn3": "528",
"cca3": "NLD",
"currency": ["EUR"],
"callingCode": ["31"],
"capital": "Amsterdam",
"altSpellings": ["NL", "Holland", "Nederland"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "nld",
"languages": {
"nld": "Dutch"
},
"translations": {
"deu": "Niederlande",
"fra": "Pays-Bas",
"hrv": "Nizozemska",
"ita": "Paesi Bassi",
"jpn": "\u30aa\u30e9\u30f3\u30c0",
"nld": "Nederland",
"rus": "\u041d\u0438\u0434\u0435\u0440\u043b\u0430\u043d\u0434\u044b",
"spa": "Pa\u00edses Bajos"
},
"latlng": [52.5, 5.75],
"demonym": "Dutch",
"borders": ["BEL", "DEU"],
"area": 41850
},
{
"name": {
"common": "New Caledonia",
"official": "New Caledonia",
"native": {
"common": "Nouvelle-Cal\u00e9donie",
"official": "Nouvelle-Cal\u00e9donie"
}
},
"tld": [".nc"],
"cca2": "NC",
"ccn3": "540",
"cca3": "NCL",
"currency": ["XPF"],
"callingCode": ["687"],
"capital": "Noum\u00e9a",
"altSpellings": ["NC"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Melanesia",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Neukaledonien",
"fra": "Nouvelle-Cal\u00e9donie",
"hrv": "Nova Kaledonija",
"ita": "Nuova Caledonia",
"jpn": "\u30cb\u30e5\u30fc\u30ab\u30ec\u30c9\u30cb\u30a2",
"nld": "Nieuw-Caledoni\u00eb",
"rus": "\u041d\u043e\u0432\u0430\u044f \u041a\u0430\u043b\u0435\u0434\u043e\u043d\u0438\u044f",
"spa": "Nueva Caledonia"
},
"latlng": [-21.5, 165.5],
"demonym": "New Caledonian",
"borders": [],
"area": 18575
},
{
"name": {
"common": "New Zealand",
"official": "New Zealand",
"native": {
"common": "New Zealand",
"official": "New Zealand"
}
},
"tld": [".nz"],
"cca2": "NZ",
"ccn3": "554",
"cca3": "NZL",
"currency": ["NZD"],
"callingCode": ["64"],
"capital": "Wellington",
"altSpellings": ["NZ", "Aotearoa"],
"relevance": "1.0",
"region": "Oceania",
"subregion": "Australia and New Zealand",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"mri": "M\u0101ori",
"nzs": "New Zealand Sign Language"
},
"translations": {
"deu": "Neuseeland",
"fra": "Nouvelle-Z\u00e9lande",
"hrv": "Novi Zeland",
"ita": "Nuova Zelanda",
"jpn": "\u30cb\u30e5\u30fc\u30b8\u30fc\u30e9\u30f3\u30c9",
"nld": "Nieuw-Zeeland",
"rus": "\u041d\u043e\u0432\u0430\u044f \u0417\u0435\u043b\u0430\u043d\u0434\u0438\u044f",
"spa": "Nueva Zelanda"
},
"latlng": [-41, 174],
"demonym": "New Zealander",
"borders": [],
"area": 270467
},
{
"name": {
"common": "Nicaragua",
"official": "Republic of Nicaragua",
"native": {
"common": "Nicaragua",
"official": "Rep\u00fablica de Nicaragua"
}
},
"tld": [".ni"],
"cca2": "NI",
"ccn3": "558",
"cca3": "NIC",
"currency": ["NIO"],
"callingCode": ["505"],
"capital": "Managua",
"altSpellings": ["NI", "Republic of Nicaragua", "Rep\u00fablica de Nicaragua"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Nicaragua",
"fra": "Nicaragua",
"hrv": "Nikaragva",
"ita": "Nicaragua",
"jpn": "\u30cb\u30ab\u30e9\u30b0\u30a2",
"nld": "Nicaragua",
"rus": "\u041d\u0438\u043a\u0430\u0440\u0430\u0433\u0443\u0430",
"spa": "Nicaragua"
},
"latlng": [13, -85],
"demonym": "Nicaraguan",
"borders": ["CRI", "HND"],
"area": 130373
},
{
"name": {
"common": "Niger",
"official": "Republic of Niger",
"native": {
"common": "Niger",
"official": "R\u00e9publique du Niger"
}
},
"tld": [".ne"],
"cca2": "NE",
"ccn3": "562",
"cca3": "NER",
"currency": ["XOF"],
"callingCode": ["227"],
"capital": "Niamey",
"altSpellings": ["NE", "Nijar", "Republic of Niger", "R\u00e9publique du Niger"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Niger",
"fra": "Niger",
"hrv": "Niger",
"ita": "Niger",
"jpn": "\u30cb\u30b8\u30a7\u30fc\u30eb",
"nld": "Niger",
"rus": "\u041d\u0438\u0433\u0435\u0440",
"spa": "N\u00edger"
},
"latlng": [16, 8],
"demonym": "Nigerien",
"borders": ["DZA", "BEN", "BFA", "TCD", "LBY", "MLI", "NGA"],
"area": 1267000
},
{
"name": {
"common": "Nigeria",
"official": "Federal Republic of Nigeria",
"native": {
"common": "Nigeria",
"official": "Federal Republic of Nigeria"
}
},
"tld": [".ng"],
"cca2": "NG",
"ccn3": "566",
"cca3": "NGA",
"currency": ["NGN"],
"callingCode": ["234"],
"capital": "Abuja",
"altSpellings": ["NG", "Nijeriya", "Na\u00edj\u00edr\u00ed\u00e0", "Federal Republic of Nigeria"],
"relevance": "1.5",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Nigeria",
"fra": "Nig\u00e9ria",
"hrv": "Nigerija",
"ita": "Nigeria",
"jpn": "\u30ca\u30a4\u30b8\u30a7\u30ea\u30a2",
"nld": "Nigeria",
"rus": "\u041d\u0438\u0433\u0435\u0440\u0438\u044f",
"spa": "Nigeria"
},
"latlng": [10, 8],
"demonym": "Nigerian",
"borders": ["BEN", "CMR", "TCD", "NER"],
"area": 923768
},
{
"name": {
"common": "Niue",
"official": "Niue",
"native": {
"common": "Niu\u0113",
"official": "Niu\u0113"
}
},
"tld": [".nu"],
"cca2": "NU",
"ccn3": "570",
"cca3": "NIU",
"currency": ["NZD"],
"callingCode": ["683"],
"capital": "Alofi",
"altSpellings": ["NU"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "niu",
"languages": {
"eng": "English",
"niu": "Niuean"
},
"translations": {
"deu": "Niue",
"fra": "Niue",
"hrv": "Niue",
"ita": "Niue",
"jpn": "\u30cb\u30a6\u30a8",
"nld": "Niue",
"rus": "\u041d\u0438\u0443\u044d",
"spa": "Niue"
},
"latlng": [-19.03333333, -169.86666666],
"demonym": "Niuean",
"borders": [],
"area": 260
},
{
"name": {
"common": "Norfolk Island",
"official": "Territory of Norfolk Island",
"native": {
"common": "Norfolk Island",
"official": "Territory of Norfolk Island"
}
},
"tld": [".nf"],
"cca2": "NF",
"ccn3": "574",
"cca3": "NFK",
"currency": ["AUD"],
"callingCode": ["672"],
"capital": "Kingston",
"altSpellings": ["NF", "Territory of Norfolk Island", "Teratri of Norf'k Ailen"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Australia and New Zealand",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"pih": "Norfuk"
},
"translations": {
"deu": "Norfolkinsel",
"fra": "\u00cele de Norfolk",
"hrv": "Otok Norfolk",
"ita": "Isola Norfolk",
"jpn": "\u30ce\u30fc\u30d5\u30a9\u30fc\u30af\u5cf6",
"nld": "Norfolkeiland",
"rus": "\u041d\u043e\u0440\u0444\u043e\u043b\u043a",
"spa": "Isla de Norfolk"
},
"latlng": [-29.03333333, 167.95],
"demonym": "Norfolk Islander",
"borders": [],
"area": 36
},
{
"name": {
"common": "North Korea",
"official": "Democratic People's Republic of Korea",
"native": {
"common": "\ubd81\ud55c",
"official": "\uc870\uc120 \ubbfc\uc8fc\uc8fc\uc758 \uc778\ubbfc \uacf5\ud654\uad6d"
}
},
"tld": [".kp"],
"cca2": "KP",
"ccn3": "408",
"cca3": "PRK",
"currency": ["KPW"],
"callingCode": ["850"],
"capital": "Pyongyang",
"altSpellings": ["KP", "Democratic People's Republic of Korea", "\uc870\uc120\ubbfc\uc8fc\uc8fc\uc758\uc778\ubbfc\uacf5\ud654\uad6d", "Chos\u014fn Minjuju\u016di Inmin Konghwaguk"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "kor",
"languages": {
"kor": "Korean"
},
"translations": {
"deu": "Nordkorea",
"fra": "Cor\u00e9e du Nord",
"hrv": "Sjeverna Koreja",
"ita": "Corea del Nord",
"jpn": "\u671d\u9bae\u6c11\u4e3b\u4e3b\u7fa9\u4eba\u6c11\u5171\u548c\u56fd",
"nld": "Noord-Korea",
"rus": "\u0421\u0435\u0432\u0435\u0440\u043d\u0430\u044f \u041a\u043e\u0440\u0435\u044f",
"spa": "Corea del Norte"
},
"latlng": [40, 127],
"demonym": "North Korean",
"borders": ["CHN", "KOR", "RUS"],
"area": 120538
},
{
"name": {
"common": "Northern Mariana Islands",
"official": "Commonwealth of the Northern Mariana Islands",
"native": {
"common": "Northern Mariana Islands",
"official": "Commonwealth of the Northern Mariana Islands"
}
},
"tld": [".mp"],
"cca2": "MP",
"ccn3": "580",
"cca3": "MNP",
"currency": ["USD"],
"callingCode": ["1670"],
"capital": "Saipan",
"altSpellings": ["MP", "Commonwealth of the Northern Mariana Islands", "Sankattan Siha Na Islas Mari\u00e5nas"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "eng",
"languages": {
"cal": "Carolinian",
"cha": "Chamorro",
"eng": "English"
},
"translations": {
"deu": "N\u00f6rdliche Marianen",
"fra": "\u00celes Mariannes du Nord",
"hrv": "Sjevernomarijanski otoci",
"ita": "Isole Marianne Settentrionali",
"jpn": "\u5317\u30de\u30ea\u30a2\u30ca\u8af8\u5cf6",
"nld": "Noordelijke Marianeneilanden",
"rus": "\u0421\u0435\u0432\u0435\u0440\u043d\u044b\u0435 \u041c\u0430\u0440\u0438\u0430\u043d\u0441\u043a\u0438\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Marianas del Norte"
},
"latlng": [15.2, 145.75],
"demonym": "American",
"borders": [],
"area": 464
},
{
"name": {
"common": "Norway",
"official": "Kingdom of Norway",
"native": {
"common": "Norge",
"official": "Kongeriket Norge"
}
},
"tld": [".no"],
"cca2": "NO",
"ccn3": "578",
"cca3": "NOR",
"currency": ["NOK"],
"callingCode": ["47"],
"capital": "Oslo",
"altSpellings": ["NO", "Norge", "Noreg", "Kingdom of Norway", "Kongeriket Norge", "Kongeriket Noreg"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "nor",
"languages": {
"nno": "Nynorsk",
"nob": "Bokm\u00e5l",
"nor": "Norwegian"
},
"translations": {
"deu": "Norwegen",
"fra": "Norv\u00e8ge",
"hrv": "Norve\u0161ka",
"ita": "Norvegia",
"jpn": "\u30ce\u30eb\u30a6\u30a7\u30fc",
"nld": "Noorwegen",
"rus": "\u041d\u043e\u0440\u0432\u0435\u0433\u0438\u044f",
"spa": "Noruega"
},
"latlng": [62, 10],
"demonym": "Norwegian",
"borders": ["FIN", "SWE", "RUS"],
"area": 323802
},
{
"name": {
"common": "Oman",
"official": "Sultanate of Oman",
"native": {
"common": "\u0639\u0645\u0627\u0646",
"official": "\u0633\u0644\u0637\u0646\u0629 \u0639\u0645\u0627\u0646"
}
},
"tld": [".om"],
"cca2": "OM",
"ccn3": "512",
"cca3": "OMN",
"currency": ["OMR"],
"callingCode": ["968"],
"capital": "Muscat",
"altSpellings": ["OM", "Sultanate of Oman", "Sal\u1e6danat \u02bbUm\u0101n"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Oman",
"fra": "Oman",
"hrv": "Oman",
"ita": "oman",
"jpn": "\u30aa\u30de\u30fc\u30f3",
"nld": "Oman",
"rus": "\u041e\u043c\u0430\u043d",
"spa": "Om\u00e1n"
},
"latlng": [21, 57],
"demonym": "Omani",
"borders": ["SAU", "ARE", "YEM"],
"area": 309500
},
{
"name": {
"common": "Pakistan",
"official": "Islamic Republic of Pakistan",
"native": {
"common": "Pakistan",
"official": "Islamic Republic of Pakistan"
}
},
"tld": [".pk"],
"cca2": "PK",
"ccn3": "586",
"cca3": "PAK",
"currency": ["PKR"],
"callingCode": ["92"],
"capital": "Islamabad",
"altSpellings": ["PK", "P\u0101kist\u0101n", "Islamic Republic of Pakistan", "Isl\u0101m\u012b Jumh\u016briya'eh P\u0101kist\u0101n"],
"relevance": "2",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"urd": "Urdu"
},
"translations": {
"deu": "Pakistan",
"fra": "Pakistan",
"hrv": "Pakistan",
"ita": "Pakistan",
"jpn": "\u30d1\u30ad\u30b9\u30bf\u30f3",
"nld": "Pakistan",
"rus": "\u041f\u0430\u043a\u0438\u0441\u0442\u0430\u043d",
"spa": "Pakist\u00e1n"
},
"latlng": [30, 70],
"demonym": "Pakistani",
"borders": ["AFG", "CHN", "IND", "IRN"],
"area": 881912
},
{
"name": {
"common": "Palau",
"official": "Republic of Palau",
"native": {
"common": "Palau",
"official": "Republic of Palau"
}
},
"tld": [".pw"],
"cca2": "PW",
"ccn3": "585",
"cca3": "PLW",
"currency": ["USD"],
"callingCode": ["680"],
"capital": "Ngerulmud",
"altSpellings": ["PW", "Republic of Palau", "Beluu er a Belau"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Micronesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English",
"pau": "Palauan"
},
"translations": {
"deu": "Palau",
"fra": "Palaos",
"hrv": "Palau",
"ita": "Palau",
"jpn": "\u30d1\u30e9\u30aa",
"nld": "Palau",
"rus": "\u041f\u0430\u043b\u0430\u0443",
"spa": "Palau"
},
"latlng": [7.5, 134.5],
"demonym": "Palauan",
"borders": [],
"area": 459
},
{
"name": {
"common": "Palestine",
"official": "State of Palestine",
"native": {
"common": "\u0641\u0644\u0633\u0637\u064a\u0646",
"official": "\u062f\u0648\u0644\u0629 \u0641\u0644\u0633\u0637\u064a\u0646"
}
},
"tld": [".ps", "\u0641\u0644\u0633\u0637\u064a\u0646."],
"cca2": "PS",
"ccn3": "275",
"cca3": "PSE",
"currency": ["ILS"],
"callingCode": ["970"],
"capital": "Ramallah",
"altSpellings": ["PS", "State of Palestine", "Dawlat Filas\u1e6din"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Pal\u00e4stina",
"fra": "Palestine",
"hrv": "Palestina",
"ita": "Palestina",
"jpn": "\u30d1\u30ec\u30b9\u30c1\u30ca",
"nld": "Palestijnse gebieden",
"rus": "\u041f\u0430\u043b\u0435\u0441\u0442\u0438\u043d\u0430",
"spa": "Palestina"
},
"latlng": [31.9, 35.2],
"demonym": "Palestinian",
"borders": ["ISR", "EGY", "JOR"],
"area": 6220
},
{
"name": {
"common": "Panama",
"official": "Republic of Panama",
"native": {
"common": "Panam\u00e1",
"official": "Rep\u00fablica de Panam\u00e1"
}
},
"tld": [".pa"],
"cca2": "PA",
"ccn3": "591",
"cca3": "PAN",
"currency": ["PAB", "USD"],
"callingCode": ["507"],
"capital": "Panama City",
"altSpellings": ["PA", "Republic of Panama", "Rep\u00fablica de Panam\u00e1"],
"relevance": "0",
"region": "Americas",
"subregion": "Central America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Panama",
"fra": "Panama",
"hrv": "Panama",
"ita": "Panama",
"jpn": "\u30d1\u30ca\u30de",
"nld": "Panama",
"rus": "\u041f\u0430\u043d\u0430\u043c\u0430",
"spa": "Panam\u00e1"
},
"latlng": [9, -80],
"demonym": "Panamanian",
"borders": ["COL", "CRI"],
"area": 75417
},
{
"name": {
"common": "Papua New Guinea",
"official": "Independent State of Papua New Guinea",
"native": {
"common": "Papua Niugini",
"official": "Independent State of Papua New Guinea"
}
},
"tld": [".pg"],
"cca2": "PG",
"ccn3": "598",
"cca3": "PNG",
"currency": ["PGK"],
"callingCode": ["675"],
"capital": "Port Moresby",
"altSpellings": ["PG", "Independent State of Papua New Guinea", "Independen Stet bilong Papua Niugini"],
"relevance": "0",
"region": "Oceania",
"subregion": "Melanesia",
"nativeLanguage": "hmo",
"languages": {
"eng": "English",
"hmo": "Hiri Motu",
"tpi": "Tok Pisin"
},
"translations": {
"deu": "Papua-Neuguinea",
"fra": "Papouasie-Nouvelle-Guin\u00e9e",
"hrv": "Papua Nova Gvineja",
"ita": "Papua Nuova Guinea",
"jpn": "\u30d1\u30d7\u30a2\u30cb\u30e5\u30fc\u30ae\u30cb\u30a2",
"nld": "Papoea-Nieuw-Guinea",
"rus": "\u041f\u0430\u043f\u0443\u0430 \u2014 \u041d\u043e\u0432\u0430\u044f \u0413\u0432\u0438\u043d\u0435\u044f",
"spa": "Pap\u00faa Nueva Guinea"
},
"latlng": [-6, 147],
"demonym": "Papua New Guinean",
"borders": ["IDN"],
"area": 462840
},
{
"name": {
"common": "Paraguay",
"official": "Republic of Paraguay",
"native": {
"common": "Paraguay",
"official": "Rep\u00fablica de Paraguay"
}
},
"tld": [".py"],
"cca2": "PY",
"ccn3": "600",
"cca3": "PRY",
"currency": ["PYG"],
"callingCode": ["595"],
"capital": "Asunci\u00f3n",
"altSpellings": ["PY", "Republic of Paraguay", "Rep\u00fablica del Paraguay", "Tet\u00e3 Paragu\u00e1i"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"grn": "Guaran\u00ed",
"spa": "Spanish"
},
"translations": {
"deu": "Paraguay",
"fra": "Paraguay",
"hrv": "Paragvaj",
"ita": "Paraguay",
"jpn": "\u30d1\u30e9\u30b0\u30a2\u30a4",
"nld": "Paraguay",
"rus": "\u041f\u0430\u0440\u0430\u0433\u0432\u0430\u0439",
"spa": "Paraguay"
},
"latlng": [-23, -58],
"demonym": "Paraguayan",
"borders": ["ARG", "BOL", "BRA"],
"area": 406752
},
{
"name": {
"common": "Peru",
"official": "Republic of Peru",
"native": {
"common": "Per\u00fa",
"official": "Rep\u00fablica del Per\u00fa"
}
},
"tld": [".pe"],
"cca2": "PE",
"ccn3": "604",
"cca3": "PER",
"currency": ["PEN"],
"callingCode": ["51"],
"capital": "Lima",
"altSpellings": ["PE", "Republic of Peru", " Rep\u00fablica del Per\u00fa"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"aym": "Aymara",
"que": "Quechua",
"spa": "Spanish"
},
"translations": {
"deu": "Peru",
"fra": "P\u00e9rou",
"hrv": "Peru",
"ita": "Per\u00f9",
"jpn": "\u30da\u30eb\u30fc",
"nld": "Peru",
"rus": "\u041f\u0435\u0440\u0443",
"spa": "Per\u00fa"
},
"latlng": [-10, -76],
"demonym": "Peruvian",
"borders": ["BOL", "BRA", "CHL", "COL", "ECU"],
"area": 1285216
},
{
"name": {
"common": "Philippines",
"official": "Republic of the Philippines",
"native": {
"common": "Pilipinas",
"official": "Republic of the Philippines"
}
},
"tld": [".ph"],
"cca2": "PH",
"ccn3": "608",
"cca3": "PHL",
"currency": ["PHP"],
"callingCode": ["63"],
"capital": "Manila",
"altSpellings": ["PH", "Republic of the Philippines", "Rep\u00fablika ng Pilipinas"],
"relevance": "1.5",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "fil",
"languages": {
"eng": "English",
"fil": "Filipino"
},
"translations": {
"deu": "Philippinen",
"fra": "Philippines",
"hrv": "Filipini",
"ita": "Filippine",
"jpn": "\u30d5\u30a3\u30ea\u30d4\u30f3",
"nld": "Filipijnen",
"rus": "\u0424\u0438\u043b\u0438\u043f\u043f\u0438\u043d\u044b",
"spa": "Filipinas"
},
"latlng": [13, 122],
"demonym": "Filipino",
"borders": [],
"area": 342353
},
{
"name": {
"common": "Pitcairn Islands",
"official": "Pitcairn Group of Islands",
"native": {
"common": "Pitcairn Islands",
"official": "Pitcairn Group of Islands"
}
},
"tld": [".pn"],
"cca2": "PN",
"ccn3": "612",
"cca3": "PCN",
"currency": ["NZD"],
"callingCode": ["64"],
"capital": "Adamstown",
"altSpellings": ["PN", "Pitcairn Henderson Ducie and Oeno Islands"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Pitcairn",
"fra": "\u00celes Pitcairn",
"hrv": "Pitcairnovo oto\u010dje",
"ita": "Isole Pitcairn",
"jpn": "\u30d4\u30c8\u30b1\u30a2\u30f3",
"nld": "Pitcairneilanden",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432\u0430 \u041f\u0438\u0442\u043a\u044d\u0440\u043d",
"spa": "Islas Pitcairn"
},
"latlng": [-25.06666666, -130.1],
"demonym": "Pitcairn Islander",
"borders": [],
"area": 47
},
{
"name": {
"common": "Poland",
"official": "Republic of Poland",
"native": {
"common": "Polska",
"official": "Rzeczpospolita Polska"
}
},
"tld": [".pl"],
"cca2": "PL",
"ccn3": "616",
"cca3": "POL",
"currency": ["PLN"],
"callingCode": ["48"],
"capital": "Warsaw",
"altSpellings": ["PL", "Republic of Poland", "Rzeczpospolita Polska"],
"relevance": "1.25",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "pol",
"languages": {
"pol": "Polish"
},
"translations": {
"deu": "Polen",
"fra": "Pologne",
"hrv": "Poljska",
"ita": "Polonia",
"jpn": "\u30dd\u30fc\u30e9\u30f3\u30c9",
"nld": "Polen",
"rus": "\u041f\u043e\u043b\u044c\u0448\u0430",
"spa": "Polonia"
},
"latlng": [52, 20],
"demonym": "Polish",
"borders": ["BLR", "CZE", "DEU", "LTU", "RUS", "SVK", "UKR"],
"area": 312679
},
{
"name": {
"common": "Portugal",
"official": "Portuguese Republic",
"native": {
"common": "Portugal",
"official": "Rep\u00fablica portugu\u00eas"
}
},
"tld": [".pt"],
"cca2": "PT",
"ccn3": "620",
"cca3": "PRT",
"currency": ["EUR"],
"callingCode": ["351"],
"capital": "Lisbon",
"altSpellings": ["PT", "Portuguesa", "Portuguese Republic", "Rep\u00fablica Portuguesa"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"deu": "Portugal",
"fra": "Portugal",
"hrv": "Portugal",
"ita": "Portogallo",
"jpn": "\u30dd\u30eb\u30c8\u30ac\u30eb",
"nld": "Portugal",
"rus": "\u041f\u043e\u0440\u0442\u0443\u0433\u0430\u043b\u0438\u044f",
"spa": "Portugal"
},
"latlng": [39.5, -8],
"demonym": "Portuguese",
"borders": ["ESP"],
"area": 92090
},
{
"name": {
"common": "Puerto Rico",
"official": "Commonwealth of Puerto Rico",
"native": {
"common": "Puerto Rico",
"official": "Estado Libre Asociado de Puerto Rico"
}
},
"tld": [".pr"],
"cca2": "PR",
"ccn3": "630",
"cca3": "PRI",
"currency": ["USD"],
"callingCode": ["1787", "1939"],
"capital": "San Juan",
"altSpellings": ["PR", "Commonwealth of Puerto Rico", "Estado Libre Asociado de Puerto Rico"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "spa",
"languages": {
"eng": "English",
"spa": "Spanish"
},
"translations": {
"deu": "Puerto Rico",
"fra": "Porto Rico",
"hrv": "Portoriko",
"ita": "Porto Rico",
"jpn": "\u30d7\u30a8\u30eb\u30c8\u30ea\u30b3",
"nld": "Puerto Rico",
"rus": "\u041f\u0443\u044d\u0440\u0442\u043e-\u0420\u0438\u043a\u043e",
"spa": "Puerto Rico"
},
"latlng": [18.25, -66.5],
"demonym": "Puerto Rican",
"borders": [],
"area": 8870
},
{
"name": {
"common": "Qatar",
"official": "State of Qatar",
"native": {
"common": "\u0642\u0637\u0631",
"official": "\u062f\u0648\u0644\u0629 \u0642\u0637\u0631"
}
},
"tld": [".qa", "\u0642\u0637\u0631."],
"cca2": "QA",
"ccn3": "634",
"cca3": "QAT",
"currency": ["QAR"],
"callingCode": ["974"],
"capital": "Doha",
"altSpellings": ["QA", "State of Qatar", "Dawlat Qa\u1e6dar"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Katar",
"fra": "Qatar",
"hrv": "Katar",
"ita": "Qatar",
"jpn": "\u30ab\u30bf\u30fc\u30eb",
"nld": "Qatar",
"rus": "\u041a\u0430\u0442\u0430\u0440",
"spa": "Catar"
},
"latlng": [25.5, 51.25],
"demonym": "Qatari",
"borders": ["SAU"],
"area": 11586
},
{
"name": {
"common": "Kosovo",
"official": "Republic of Kosovo",
"native": {
"common": "Kosova",
"official": "Republika e Kosov\u00ebs"
}
},
"tld": [],
"cca2": "XK",
"ccn3": "780",
"cca3": "KOS",
"currency": ["EUR"],
"callingCode": ["377", "381", "386"],
"capital": "Pristina",
"altSpellings": ["XK", "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u043e\u0441\u043e\u0432\u043e"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "sqi",
"languages": {
"sqi": "Albanian",
"srp": "Serbian"
},
"translations": {
"hrv": "Kosovo",
"rus": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u043e\u0441\u043e\u0432\u043e",
"spa": "Kosovo"
},
"latlng": [42.666667, 21.166667],
"demonym": "Kosovar",
"borders": ["ALB", "MKD", "MNE", "SRB"],
"area": 10908
},
{
"name": {
"common": "R\u00e9union",
"official": "R\u00e9union Island",
"native": {
"common": "La R\u00e9union",
"official": "Ile de la R\u00e9union"
}
},
"tld": [".re"],
"cca2": "RE",
"ccn3": "638",
"cca3": "REU",
"currency": ["EUR"],
"callingCode": ["262"],
"capital": "Saint-Denis",
"altSpellings": ["RE", "Reunion"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "R\u00e9union",
"fra": "R\u00e9union",
"hrv": "R\u00e9union",
"ita": "Riunione",
"jpn": "\u30ec\u30e6\u30cb\u30aa\u30f3",
"nld": "R\u00e9union",
"rus": "\u0420\u0435\u044e\u043d\u044c\u043e\u043d",
"spa": "Reuni\u00f3n"
},
"latlng": [-21.15, 55.5],
"demonym": "French",
"borders": [],
"area": 2511
},
{
"name": {
"common": "Romania",
"official": "Romania",
"native": {
"common": "Rom\u00e2nia",
"official": "Rom\u00e2nia"
}
},
"tld": [".ro"],
"cca2": "RO",
"ccn3": "642",
"cca3": "ROU",
"currency": ["RON"],
"callingCode": ["40"],
"capital": "Bucharest",
"altSpellings": ["RO", "Rumania", "Roumania", "Rom\u00e2nia"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "ron",
"languages": {
"ron": "Romanian"
},
"translations": {
"deu": "Rum\u00e4nien",
"fra": "Roumanie",
"hrv": "Rumunjska",
"ita": "Romania",
"jpn": "\u30eb\u30fc\u30de\u30cb\u30a2",
"nld": "Roemeni\u00eb",
"rus": "\u0420\u0443\u043c\u044b\u043d\u0438\u044f",
"spa": "Rumania"
},
"latlng": [46, 25],
"demonym": "Romanian",
"borders": ["BGR", "HUN", "MDA", "SRB", "UKR"],
"area": 238391
},
{
"name": {
"common": "Russia",
"official": "Russian Federation",
"native": {
"common": "\u0420\u043e\u0441\u0441\u0438\u044f",
"official": "\u0420\u0443\u0441\u0441\u043a\u0430\u044f \u0424\u0435\u0434\u0435\u0440\u0430\u0446\u0438\u044f"
}
},
"tld": [".ru", ".su", ".\u0440\u0444"],
"cca2": "RU",
"ccn3": "643",
"cca3": "RUS",
"currency": ["RUB"],
"callingCode": ["7"],
"capital": "Moscow",
"altSpellings": ["RU", "Rossiya", "Russian Federation", "\u0420\u043e\u0441\u0441\u0438\u0439\u0441\u043a\u0430\u044f \u0424\u0435\u0434\u0435\u0440\u0430\u0446\u0438\u044f", "Rossiyskaya Federatsiya"],
"relevance": "2.5",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "rus",
"languages": {
"rus": "Russian"
},
"translations": {
"deu": "Russland",
"fra": "Russie",
"hrv": "Rusija",
"ita": "Russia",
"jpn": "\u30ed\u30b7\u30a2\u9023\u90a6",
"nld": "Rusland",
"rus": "\u0420\u043e\u0441\u0441\u0438\u044f",
"spa": "Rusia"
},
"latlng": [60, 100],
"demonym": "Russian",
"borders": ["AZE", "BLR", "CHN", "EST", "FIN", "GEO", "KAZ", "PRK", "LVA", "LTU", "MNG", "NOR", "POL", "UKR"],
"area": 17098242
},
{
"name": {
"common": "Rwanda",
"official": "Republic of Rwanda",
"native": {
"common": "Rwanda",
"official": "Repubulika y'u Rwanda"
}
},
"tld": [".rw"],
"cca2": "RW",
"ccn3": "646",
"cca3": "RWA",
"currency": ["RWF"],
"callingCode": ["250"],
"capital": "Kigali",
"altSpellings": ["RW", "Republic of Rwanda", "Repubulika y'u Rwanda", "R\u00e9publique du Rwanda"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "kin",
"languages": {
"eng": "English",
"fra": "French",
"kin": "Kinyarwanda"
},
"translations": {
"deu": "Ruanda",
"fra": "Rwanda",
"hrv": "Ruanda",
"ita": "Ruanda",
"jpn": "\u30eb\u30ef\u30f3\u30c0",
"nld": "Rwanda",
"rus": "\u0420\u0443\u0430\u043d\u0434\u0430",
"spa": "Ruanda"
},
"latlng": [-2, 30],
"demonym": "Rwandan",
"borders": ["BDI", "COD", "TZA", "UGA"],
"area": 26338
},
{
"name": {
"common": "Saint Barth\u00e9lemy",
"official": "Collectivity of Saint Barth\u00e9lemySaint Barth\u00e9lemy",
"native": {
"common": "Saint-Barth\u00e9lemy",
"official": "Collectivit\u00e9 de Saint Barth\u00e9lemy Barth\u00e9lemySaint"
}
},
"tld": [".bl"],
"cca2": "BL",
"ccn3": "652",
"cca3": "BLM",
"currency": ["EUR"],
"callingCode": ["590"],
"capital": "Gustavia",
"altSpellings": ["BL", "St. Barthelemy", "Collectivity of Saint Barth\u00e9lemy", "Collectivit\u00e9 de Saint-Barth\u00e9lemy"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Saint-Barth\u00e9lemy",
"fra": "Saint-Barth\u00e9lemy",
"hrv": "Saint Barth\u00e9lemy",
"ita": "Antille Francesi",
"jpn": "\u30b5\u30f3\u30fb\u30d0\u30eb\u30c6\u30eb\u30df\u30fc",
"nld": "Saint Barth\u00e9lemy",
"rus": "\u0421\u0435\u043d-\u0411\u0430\u0440\u0442\u0435\u043b\u0435\u043c\u0438",
"spa": "San Bartolom\u00e9"
},
"latlng": [18.5, -63.41666666],
"demonym": "Saint Barth\u00e9lemy Islander",
"borders": [],
"area": 21
},
{
"name": {
"common": "Saint Helena, Ascension and Tristan da Cunha",
"official": "Saint Helena, Ascension and Tristan da Cunha",
"native": {
"common": "Saint Helena, Ascension and Tristan da Cunha",
"official": "Saint Helena, Ascension and Tristan da Cunha"
}
},
"tld": [".sh"],
"cca2": "SH",
"ccn3": "654",
"cca3": "SHN",
"currency": ["SHP"],
"callingCode": ["290"],
"capital": "Jamestown",
"altSpellings": ["SH"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Sankt Helena",
"fra": "Sainte-H\u00e9l\u00e8ne",
"hrv": "Sveta Helena",
"ita": "Sant'Elena",
"jpn": "\u30bb\u30f3\u30c8\u30d8\u30ec\u30ca\u30fb\u30a2\u30bb\u30f3\u30b7\u30e7\u30f3\u304a\u3088\u3073\u30c8\u30ea\u30b9\u30bf\u30f3\u30c0\u30af\u30fc\u30cb\u30e3",
"nld": "Sint-Helena",
"rus": "\u041e\u0441\u0442\u0440\u043e\u0432 \u0421\u0432\u044f\u0442\u043e\u0439 \u0415\u043b\u0435\u043d\u044b",
"spa": "Santa Helena"
},
"latlng": [-15.95, -5.7],
"demonym": "Saint Helenian",
"borders": [],
"area": 397
},
{
"name": {
"common": "Saint Kitts and Nevis",
"official": "Federation of Saint Christopher and Nevisa",
"native": {
"common": "Saint Kitts and Nevis",
"official": "Federation of Saint Christopher and Nevisa"
}
},
"tld": [".kn"],
"cca2": "KN",
"ccn3": "659",
"cca3": "KNA",
"currency": ["XCD"],
"callingCode": ["1869"],
"capital": "Basseterre",
"altSpellings": ["KN", "Federation of Saint Christopher and Nevis"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Saint Christopher und Nevis",
"fra": "Saint-Christophe-et-Ni\u00e9v\u00e8s",
"hrv": "Sveti Kristof i Nevis",
"ita": "Saint Kitts e Nevis",
"jpn": "\u30bb\u30f3\u30c8\u30af\u30ea\u30b9\u30c8\u30d5\u30a1\u30fc\u30fb\u30cd\u30a4\u30d3\u30b9",
"nld": "Saint Kitts en Nevis",
"rus": "\u0421\u0435\u043d\u0442-\u041a\u0438\u0442\u0441 \u0438 \u041d\u0435\u0432\u0438\u0441",
"spa": "San Crist\u00f3bal y Nieves"
},
"latlng": [17.33333333, -62.75],
"demonym": "Kittitian or Nevisian",
"borders": [],
"area": 261
},
{
"name": {
"common": "Saint Lucia",
"official": "Saint Lucia",
"native": {
"common": "Saint Lucia",
"official": "Saint Lucia"
}
},
"tld": [".lc"],
"cca2": "LC",
"ccn3": "662",
"cca3": "LCA",
"currency": ["XCD"],
"callingCode": ["1758"],
"capital": "Castries",
"altSpellings": ["LC"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Saint Lucia",
"fra": "Saint-Lucie",
"hrv": "Sveta Lucija",
"ita": "Santa Lucia",
"jpn": "\u30bb\u30f3\u30c8\u30eb\u30b7\u30a2",
"nld": "Saint Lucia",
"rus": "\u0421\u0435\u043d\u0442-\u041b\u044e\u0441\u0438\u044f",
"spa": "Santa Luc\u00eda"
},
"latlng": [13.88333333, -60.96666666],
"demonym": "Saint Lucian",
"borders": [],
"area": 616
},
{
"name": {
"common": "Saint Martin",
"official": "Saint Pierre and Miquelon",
"native": {
"common": "Saint-Martin",
"official": "Saint-Pierre-et-Miquelon"
}
},
"tld": [".fr", ".gp"],
"cca2": "MF",
"ccn3": "663",
"cca3": "MAF",
"currency": ["EUR"],
"callingCode": ["590"],
"capital": "Marigot",
"altSpellings": ["MF", "Collectivity of Saint Martin", "Collectivit\u00e9 de Saint-Martin"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Saint Martin",
"fra": "Saint-Martin",
"hrv": "Sveti Martin",
"ita": "Saint Martin",
"jpn": "\u30b5\u30f3\u30fb\u30de\u30eb\u30bf\u30f3\uff08\u30d5\u30e9\u30f3\u30b9\u9818\uff09",
"nld": "Saint-Martin",
"rus": "\u0421\u0435\u043d-\u041c\u0430\u0440\u0442\u0435\u043d",
"spa": "Saint Martin"
},
"latlng": [18.08333333, -63.95],
"demonym": "Saint Martin Islander",
"borders": ["SXM"],
"area": 53
},
{
"name": {
"common": "Saint Pierre and Miquelon",
"official": "Saint-Pierre-et-Miquelon",
"native": {
"common": "Saint-Pierre-et-Miquelon",
"official": "Collectivit\u00E9 territoriale de Saint-Pierre-et-Miquelon"
}
},
"tld": [".pm"],
"cca2": "PM",
"ccn3": "666",
"cca3": "SPM",
"currency": ["EUR"],
"callingCode": ["508"],
"capital": "Saint-Pierre",
"altSpellings": ["PM", "Collectivit\u00e9 territoriale de Saint-Pierre-et-Miquelon"],
"relevance": "0",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Saint-Pierre und Miquelon",
"fra": "Saint-Pierre-et-Miquelon",
"hrv": "Sveti Petar i Mikelon",
"ita": "Saint-Pierre e Miquelon",
"jpn": "\u30b5\u30f3\u30d4\u30a8\u30fc\u30eb\u5cf6\u30fb\u30df\u30af\u30ed\u30f3\u5cf6",
"nld": "Saint Pierre en Miquelon",
"rus": "\u0421\u0435\u043d-\u041f\u044c\u0435\u0440 \u0438 \u041c\u0438\u043a\u0435\u043b\u043e\u043d",
"spa": "San Pedro y Miquel\u00f3n"
},
"latlng": [46.83333333, -56.33333333],
"demonym": "French",
"borders": [],
"area": 242
},
{
"name": {
"common": "Saint Vincent and the Grenadines",
"official": "Saint Vincent and the Grenadines",
"native": {
"common": "Saint Vincent and the Grenadines",
"official": "Saint Vincent and the Grenadines"
}
},
"tld": [".vc"],
"cca2": "VC",
"ccn3": "670",
"cca3": "VCT",
"currency": ["XCD"],
"callingCode": ["1784"],
"capital": "Kingstown",
"altSpellings": ["VC"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Saint Vincent und die Grenadinen",
"fra": "Saint-Vincent-et-les-Grenadines",
"hrv": "Sveti Vincent i Grenadini",
"ita": "Saint Vincent e Grenadine",
"jpn": "\u30bb\u30f3\u30c8\u30d3\u30f3\u30bb\u30f3\u30c8\u304a\u3088\u3073\u30b0\u30ec\u30ca\u30c7\u30a3\u30fc\u30f3\u8af8\u5cf6",
"nld": "Saint Vincent en de Grenadines",
"rus": "\u0421\u0435\u043d\u0442-\u0412\u0438\u043d\u0441\u0435\u043d\u0442 \u0438 \u0413\u0440\u0435\u043d\u0430\u0434\u0438\u043d\u044b",
"spa": "San Vicente y Granadinas"
},
"latlng": [13.25, -61.2],
"demonym": "Saint Vincentian",
"borders": [],
"area": 389
},
{
"name": {
"common": "Samoa",
"official": "Independent State of Samoa",
"native": {
"common": "S\u0101moa",
"official": "Malo Sa\u02bboloto Tuto\u02bbatasi o S\u0101moa"
}
},
"tld": [".ws"],
"cca2": "WS",
"ccn3": "882",
"cca3": "WSM",
"currency": ["WST"],
"callingCode": ["685"],
"capital": "Apia",
"altSpellings": ["WS", "Independent State of Samoa", "Malo Sa\u02bboloto Tuto\u02bbatasi o S\u0101moa"],
"relevance": "0",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "smo",
"languages": {
"eng": "English",
"smo": "Samoan"
},
"translations": {
"deu": "Samoa",
"fra": "Samoa",
"hrv": "Samoa",
"ita": "Samoa",
"jpn": "\u30b5\u30e2\u30a2",
"nld": "Samoa",
"rus": "\u0421\u0430\u043c\u043e\u0430",
"spa": "Samoa"
},
"latlng": [-13.58333333, -172.33333333],
"demonym": "Samoan",
"borders": [],
"area": 2842
},
{
"name": {
"common": "San Marino",
"official": "Most Serene Republic of San Marino",
"native": {
"common": "San Marino",
"official": "Serenissima Repubblica di San Marino"
}
},
"tld": [".sm"],
"cca2": "SM",
"ccn3": "674",
"cca3": "SMR",
"currency": ["EUR"],
"callingCode": ["378"],
"capital": "City of San Marino",
"altSpellings": ["SM", "Republic of San Marino", "Repubblica di San Marino"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "ita",
"languages": {
"ita": "Italian"
},
"translations": {
"deu": "San Marino",
"fra": "Saint-Marin",
"hrv": "San Marino",
"ita": "San Marino",
"jpn": "\u30b5\u30f3\u30de\u30ea\u30ce",
"nld": "San Marino",
"rus": "\u0421\u0430\u043d-\u041c\u0430\u0440\u0438\u043d\u043e",
"spa": "San Marino"
},
"latlng": [43.76666666, 12.41666666],
"demonym": "Sammarinese",
"borders": ["ITA"],
"area": 61
},
{
"name": {
"common": "S\u00e3o Tom\u00e9 and Pr\u00edncipe",
"official": "Democratic Republic of S\u00e3o Tom\u00e9 and Pr\u00edncipe",
"native": {
"common": "S\u00e3o Tom\u00e9 e Pr\u00edncipe",
"official": "Rep\u00fablica Democr\u00e1tica do S\u00e3o Tom\u00e9 e Pr\u00edncipe"
}
},
"tld": [".st"],
"cca2": "ST",
"ccn3": "678",
"cca3": "STP",
"currency": ["STD"],
"callingCode": ["239"],
"capital": "S\u00e3o Tom\u00e9",
"altSpellings": ["ST", "Democratic Republic of S\u00e3o Tom\u00e9 and Pr\u00edncipe", "Rep\u00fablica Democr\u00e1tica de S\u00e3o Tom\u00e9 e Pr\u00edncipe"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese"
},
"translations": {
"deu": "S\u00e3o Tom\u00e9 und Pr\u00edncipe",
"fra": "Sao Tom\u00e9-et-Principe",
"hrv": "Sveti Toma i Princip",
"ita": "S\u00e3o Tom\u00e9 e Pr\u00edncipe",
"jpn": "\u30b5\u30f3\u30c8\u30e1\u30fb\u30d7\u30ea\u30f3\u30b7\u30da",
"nld": "Sao Tom\u00e9 en Principe",
"rus": "\u0421\u0430\u043d-\u0422\u043e\u043c\u0435 \u0438 \u041f\u0440\u0438\u043d\u0441\u0438\u043f\u0438",
"spa": "Santo Tom\u00e9 y Pr\u00edncipe"
},
"latlng": [1, 7],
"demonym": "Sao Tomean",
"borders": [],
"area": 964
},
{
"name": {
"common": "Saudi Arabia",
"official": "Kingdom of Saudi Arabia",
"native": {
"common": "\u0627\u0644\u0639\u0631\u0628\u064a\u0629 \u0627\u0644\u0633\u0639\u0648\u062f\u064a\u0629",
"official": "\u0627\u0644\u0645\u0645\u0644\u0643\u0629 \u0627\u0644\u0639\u0631\u0628\u064a\u0629 \u0627\u0644\u0633\u0639\u0648\u062f\u064a\u0629"
}
},
"tld": [".sa", ".\u0627\u0644\u0633\u0639\u0648\u062f\u064a\u0629"],
"cca2": "SA",
"ccn3": "682",
"cca3": "SAU",
"currency": ["SAR"],
"callingCode": ["966"],
"capital": "Riyadh",
"altSpellings": ["Saudi", "SA", "Kingdom of Saudi Arabia", "Al-Mamlakah al-\u2018Arabiyyah as-Su\u2018\u016bdiyyah"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Saudi-Arabien",
"fra": "Arabie Saoudite",
"hrv": "Saudijska Arabija",
"ita": "Arabia Saudita",
"jpn": "\u30b5\u30a6\u30b8\u30a2\u30e9\u30d3\u30a2",
"nld": "Saoedi-Arabi\u00eb",
"rus": "\u0421\u0430\u0443\u0434\u043e\u0432\u0441\u043a\u0430\u044f \u0410\u0440\u0430\u0432\u0438\u044f",
"spa": "Arabia Saud\u00ed"
},
"latlng": [25, 45],
"demonym": "Saudi Arabian",
"borders": ["IRQ", "JOR", "KWT", "OMN", "QAT", "ARE", "YEM"],
"area": 2149690
},
{
"name": {
"common": "Senegal",
"official": "Republic of Senegal",
"native": {
"common": "S\u00e9n\u00e9gal",
"official": "R\u00e9publique du S\u00e9n\u00e9gal"
}
},
"tld": [".sn"],
"cca2": "SN",
"ccn3": "686",
"cca3": "SEN",
"currency": ["XOF"],
"callingCode": ["221"],
"capital": "Dakar",
"altSpellings": ["SN", "Republic of Senegal", "R\u00e9publique du S\u00e9n\u00e9gal"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Senegal",
"fra": "S\u00e9n\u00e9gal",
"hrv": "Senegal",
"ita": "Senegal",
"jpn": "\u30bb\u30cd\u30ac\u30eb",
"nld": "Senegal",
"rus": "\u0421\u0435\u043d\u0435\u0433\u0430\u043b",
"spa": "Senegal"
},
"latlng": [14, -14],
"demonym": "Senegalese",
"borders": ["GMB", "GIN", "GNB", "MLI", "MRT"],
"area": 196722
},
{
"name": {
"common": "Serbia",
"official": "Republic of Serbia",
"native": {
"common": "\u0421\u0440\u0431\u0438\u0458\u0430",
"official": "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0421\u0440\u0431\u0438\u0458\u0430"
}
},
"tld": [".rs", ".\u0441\u0440\u0431"],
"cca2": "RS",
"ccn3": "688",
"cca3": "SRB",
"currency": ["RSD"],
"callingCode": ["381"],
"capital": "Belgrade",
"altSpellings": ["RS", "Srbija", "Republic of Serbia", "\u0420\u0435\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0421\u0440\u0431\u0438\u0458\u0430", "Republika Srbija"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "srp",
"languages": {
"srp": "Serbian"
},
"translations": {
"deu": "Serbien",
"fra": "Serbie",
"hrv": "Srbija",
"ita": "Serbia",
"jpn": "\u30bb\u30eb\u30d3\u30a2",
"nld": "Servi\u00eb",
"rus": "\u0421\u0435\u0440\u0431\u0438\u044f",
"spa": "Serbia"
},
"latlng": [44, 21],
"demonym": "Serbian",
"borders": ["BIH", "BGR", "HRV", "HUN", "KOS", "MKD", "MNE", "ROU"],
"area": 88361
},
{
"name": {
"common": "Seychelles",
"official": "Republic of Seychelles",
"native": {
"common": "Seychelles",
"official": "R\u00e9publique des Seychelles"
}
},
"tld": [".sc"],
"cca2": "SC",
"ccn3": "690",
"cca3": "SYC",
"currency": ["SCR"],
"callingCode": ["248"],
"capital": "Victoria",
"altSpellings": ["SC", "Republic of Seychelles", "Repiblik Sesel", "R\u00e9publique des Seychelles"],
"relevance": "0.5",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "fra",
"languages": {
"crs": "Seychellois Creole",
"eng": "English",
"fra": "French"
},
"translations": {
"deu": "Seychellen",
"fra": "Seychelles",
"hrv": "Sej\u0161eli",
"ita": "Seychelles",
"jpn": "\u30bb\u30fc\u30b7\u30a7\u30eb",
"nld": "Seychellen",
"rus": "\u0421\u0435\u0439\u0448\u0435\u043b\u044c\u0441\u043a\u0438\u0435 \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Seychelles"
},
"latlng": [-4.58333333, 55.66666666],
"demonym": "Seychellois",
"borders": [],
"area": 452
},
{
"name": {
"common": "Sierra Leone",
"official": "Republic of Sierra Leone",
"native": {
"common": "Sierra Leone",
"official": "Republic of Sierra Leone"
}
},
"tld": [".sl"],
"cca2": "SL",
"ccn3": "694",
"cca3": "SLE",
"currency": ["SLL"],
"callingCode": ["232"],
"capital": "Freetown",
"altSpellings": ["SL", "Republic of Sierra Leone"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Sierra Leone",
"fra": "Sierra Leone",
"hrv": "Sijera Leone",
"ita": "Sierra Leone",
"jpn": "\u30b7\u30a8\u30e9\u30ec\u30aa\u30cd",
"nld": "Sierra Leone",
"rus": "\u0421\u044c\u0435\u0440\u0440\u0430-\u041b\u0435\u043e\u043d\u0435",
"spa": "Sierra Leone"
},
"latlng": [8.5, -11.5],
"demonym": "Sierra Leonean",
"borders": ["GIN", "LBR"],
"area": 71740
},
{
"name": {
"common": "Singapore",
"official": "Republic of Singapore",
"native": {
"common": "Singapore",
"official": "Republic of Singapore"
}
},
"tld": [".sg", ".\u65b0\u52a0\u5761", ".\u0b9a\u0bbf\u0b99\u0bcd\u0b95\u0baa\u0bcd\u0baa\u0bc2\u0bb0\u0bcd"],
"cca2": "SG",
"ccn3": "702",
"cca3": "SGP",
"currency": ["SGD"],
"callingCode": ["65"],
"capital": "Singapore",
"altSpellings": ["SG", "Singapura", "Republik Singapura", "\u65b0\u52a0\u5761\u5171\u548c\u56fd"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "eng",
"languages": {
"cmn": "Mandarin",
"eng": "English",
"msa": "Malay",
"tam": "Tamil"
},
"translations": {
"deu": "Singapur",
"fra": "Singapour",
"hrv": "Singapur",
"ita": "Singapore",
"jpn": "\u30b7\u30f3\u30ac\u30dd\u30fc\u30eb",
"nld": "Singapore",
"rus": "\u0421\u0438\u043d\u0433\u0430\u043f\u0443\u0440",
"spa": "Singapur"
},
"latlng": [1.36666666, 103.8],
"demonym": "Singaporean",
"borders": [],
"area": 710
},
{
"name": {
"common": "Sint Maarten",
"official": "Sint Maarten",
"native": {
"common": "Sint Maarten",
"official": "Sint Maarten"
}
},
"tld": [".sx"],
"cca2": "SX",
"ccn3": "534",
"cca3": "SXM",
"currency": ["ANG"],
"callingCode": ["1721"],
"capital": "Philipsburg",
"altSpellings": ["SX"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "nld",
"languages": {
"eng": "English",
"fra": "French",
"nld": "Dutch"
},
"translations": {
"deu": "Sint Maarten",
"fra": "Saint-Martin",
"ita": "Sint Maarten",
"jpn": "\u30b7\u30f3\u30c8\u30fb\u30de\u30fc\u30eb\u30c6\u30f3",
"nld": "Sint Maarten",
"rus": "\u0421\u0438\u043d\u0442-\u041c\u0430\u0440\u0442\u0435\u043d",
"spa": "Sint Maarten"
},
"latlng": [18.033333, -63.05],
"demonym": "St. Maartener",
"borders": ["MAF"],
"area": 34
},
{
"name": {
"common": "Slovakia",
"official": "Slovak Republic",
"native": {
"common": "Slovensko",
"official": "slovensk\u00e1 republika"
}
},
"tld": [".sk"],
"cca2": "SK",
"ccn3": "703",
"cca3": "SVK",
"currency": ["EUR"],
"callingCode": ["421"],
"capital": "Bratislava",
"altSpellings": ["SK", "Slovak Republic", "Slovensk\u00e1 republika"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "slk",
"languages": {
"slk": "Slovak"
},
"translations": {
"deu": "Slowakei",
"fra": "Slovaquie",
"hrv": "Slova\u010dka",
"ita": "Slovacchia",
"jpn": "\u30b9\u30ed\u30d0\u30ad\u30a2",
"nld": "Slowakije",
"rus": "\u0421\u043b\u043e\u0432\u0430\u043a\u0438\u044f",
"spa": "Rep\u00fablica Eslovaca"
},
"latlng": [48.66666666, 19.5],
"demonym": "Slovak",
"borders": ["AUT", "CZE", "HUN", "POL", "UKR"],
"area": 49037
},
{
"name": {
"common": "Slovenia",
"official": "Republic of Slovenia",
"native": {
"common": "Slovenija",
"official": "Republika Slovenija"
}
},
"tld": [".si"],
"cca2": "SI",
"ccn3": "705",
"cca3": "SVN",
"currency": ["EUR"],
"callingCode": ["386"],
"capital": "Ljubljana",
"altSpellings": ["SI", "Republic of Slovenia", "Republika Slovenija"],
"relevance": "0",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "slv",
"languages": {
"slv": "Slovene"
},
"translations": {
"deu": "Slowenien",
"fra": "Slov\u00e9nie",
"hrv": "Slovenija",
"ita": "Slovenia",
"jpn": "\u30b9\u30ed\u30d9\u30cb\u30a2",
"nld": "Sloveni\u00eb",
"rus": "\u0421\u043b\u043e\u0432\u0435\u043d\u0438\u044f",
"spa": "Eslovenia"
},
"latlng": [46.11666666, 14.81666666],
"demonym": "Slovene",
"borders": ["AUT", "HRV", "ITA", "HUN"],
"area": 20273
},
{
"name": {
"common": "Solomon Islands",
"official": "Solomon Islands",
"native": {
"common": "Solomon Islands",
"official": "Solomon Islands"
}
},
"tld": [".sb"],
"cca2": "SB",
"ccn3": "090",
"cca3": "SLB",
"currency": ["SDB"],
"callingCode": ["677"],
"capital": "Honiara",
"altSpellings": ["SB"],
"relevance": "0",
"region": "Oceania",
"subregion": "Melanesia",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Salomonen",
"fra": "\u00celes Salomon",
"hrv": "Solomonski Otoci",
"ita": "Isole Salomone",
"jpn": "\u30bd\u30ed\u30e2\u30f3\u8af8\u5cf6",
"nld": "Salomonseilanden",
"rus": "\u0421\u043e\u043b\u043e\u043c\u043e\u043d\u043e\u0432\u044b \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Salom\u00f3n"
},
"latlng": [-8, 159],
"demonym": "Solomon Islander",
"borders": [],
"area": 28896
},
{
"name": {
"common": "Somalia",
"official": "Federal Republic of Somalia",
"native": {
"common": "Soomaaliya",
"official": "Jamhuuriyadda Federaalka Soomaaliya"
}
},
"tld": [".so"],
"cca2": "SO",
"ccn3": "706",
"cca3": "SOM",
"currency": ["SOS"],
"callingCode": ["252"],
"capital": "Mogadishu",
"altSpellings": ["SO", "a\u1e63-\u1e62\u016bm\u0101l", "Federal Republic of Somalia", "Jamhuuriyadda Federaalka Soomaaliya", "Jumh\u016briyyat a\u1e63-\u1e62\u016bm\u0101l al-Fider\u0101liyya"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "som",
"languages": {
"ara": "Arabic",
"som": "Somali"
},
"translations": {
"deu": "Somalia",
"fra": "Somalie",
"hrv": "Somalija",
"ita": "Somalia",
"jpn": "\u30bd\u30de\u30ea\u30a2",
"nld": "Somali\u00eb",
"rus": "\u0421\u043e\u043c\u0430\u043b\u0438",
"spa": "Somalia"
},
"latlng": [10, 49],
"demonym": "Somali",
"borders": ["DJI", "ETH", "KEN"],
"area": 637657
},
{
"name": {
"common": "South Africa",
"official": "Republic of South Africa",
"native": {
"common": "South Africa",
"official": "Republiek van Suid-Afrika"
}
},
"tld": [".za"],
"cca2": "ZA",
"ccn3": "710",
"cca3": "ZAF",
"currency": ["ZAR"],
"callingCode": ["27"],
"capital": "Pretoria",
"altSpellings": ["ZA", "RSA", "Suid-Afrika", "Republic of South Africa"],
"relevance": "0",
"region": "Africa",
"subregion": "Southern Africa",
"nativeLanguage": "afr",
"languages": {
"afr": "Afrikaans",
"eng": "English",
"nbl": "Southern Ndebele",
"nso": "Northern Sotho",
"sot": "Sotho",
"ssw": "Swazi",
"tsn": "Tswana",
"tso": "Tsonga",
"ven": "Venda",
"xho": "Xhosa",
"zul": "Zulu"
},
"translations": {
"deu": "Republik S\u00fcdafrika",
"fra": "Afrique du Sud",
"hrv": "Ju\u017enoafri\u010dka Republika",
"ita": "Sud Africa",
"jpn": "\u5357\u30a2\u30d5\u30ea\u30ab",
"nld": "Zuid-Afrika",
"rus": "\u042e\u0436\u043d\u043e-\u0410\u0444\u0440\u0438\u043a\u0430\u043d\u0441\u043a\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430",
"spa": "Rep\u00fablica de Sud\u00e1frica"
},
"latlng": [-29, 24],
"demonym": "South African",
"borders": ["BWA", "LSO", "MOZ", "NAM", "SWZ", "ZWE"],
"area": 1221037
},
{
"name": {
"common": "South Georgia",
"official": "South Georgia and the South Sandwich Islands",
"native": {
"common": "South Georgia",
"official": "South Georgia and the South Sandwich Islands"
}
},
"tld": [".gs"],
"cca2": "GS",
"ccn3": "239",
"cca3": "SGS",
"currency": ["GBP"],
"callingCode": ["500"],
"capital": "King Edward Point",
"altSpellings": ["GS", "South Georgia and the South Sandwich Islands"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "S\u00fcdgeorgien und die S\u00fcdlichen Sandwichinseln",
"fra": "G\u00e9orgie du Sud-et-les \u00celes Sandwich du Sud",
"hrv": "Ju\u017ena Georgija i oto\u010dje Ju\u017eni Sandwich",
"ita": "Georgia del Sud e Isole Sandwich Meridionali",
"jpn": "\u30b5\u30a6\u30b9\u30b8\u30e7\u30fc\u30b8\u30a2\u30fb\u30b5\u30a6\u30b9\u30b5\u30f3\u30c9\u30a6\u30a3\u30c3\u30c1\u8af8\u5cf6",
"nld": "Zuid-Georgia en Zuidelijke Sandwicheilanden",
"rus": "\u042e\u0436\u043d\u0430\u044f \u0413\u0435\u043e\u0440\u0433\u0438\u044f \u0438 \u042e\u0436\u043d\u044b\u0435 \u0421\u0430\u043d\u0434\u0432\u0438\u0447\u0435\u0432\u044b \u043e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas Georgias del Sur y Sandwich del Sur"
},
"latlng": [-54.5, -37],
"demonym": "South Georgian South Sandwich Islander",
"borders": [],
"area": 3903
},
{
"name": {
"common": "South Korea",
"official": "Republic of Korea",
"native": {
"common": "\ub300\ud55c\ubbfc\uad6d",
"official": "\ud55c\uad6d"
}
},
"tld": [".kr", ".\ud55c\uad6d"],
"cca2": "KR",
"ccn3": "410",
"cca3": "KOR",
"currency": ["KRW"],
"callingCode": ["82"],
"capital": "Seoul",
"altSpellings": ["KR", "Republic of Korea"],
"relevance": "1.5",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "kor",
"languages": {
"kor": "Korean"
},
"translations": {
"deu": "S\u00fcdkorea",
"fra": "Cor\u00e9e du Sud",
"hrv": "Ju\u017ena Koreja",
"ita": "Corea del Sud",
"jpn": "\u5927\u97d3\u6c11\u56fd",
"nld": "Zuid-Korea",
"rus": "\u042e\u0436\u043d\u0430\u044f \u041a\u043e\u0440\u0435\u044f",
"spa": "Corea del Sur"
},
"latlng": [37, 127.5],
"demonym": "South Korean",
"borders": ["PRK"],
"area": 100210
},
{
"name": {
"common": "South Sudan",
"official": "Republic of South SudanSouth Sudan",
"native": {
"common": "South Sudan",
"official": "Republic of South SudanSouth Sudan"
}
},
"tld": [".ss"],
"cca2": "SS",
"ccn3": "728",
"cca3": "SSD",
"currency": ["SSP"],
"callingCode": ["211"],
"capital": "Juba",
"altSpellings": ["SS"],
"relevance": "0",
"region": "Africa",
"subregion": "Middle Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "S\u00fcdsudan",
"fra": "Soudan du Sud",
"hrv": "Ju\u017eni Sudan",
"ita": "Sudan del sud",
"jpn": "\u5357\u30b9\u30fc\u30c0\u30f3",
"nld": "Zuid-Soedan",
"rus": "\u042e\u0436\u043d\u044b\u0439 \u0421\u0443\u0434\u0430\u043d",
"spa": "Sud\u00e1n del Sur"
},
"latlng": [7, 30],
"demonym": "South Sudanese",
"borders": ["CAF", "COD", "ETH", "KEN", "SDN", "UGA"],
"area": 619745
},
{
"name": {
"common": "Spain",
"official": "Kingdom of Spain",
"native": {
"common": "Espa\u00f1a",
"official": "Reino de Espa\u00f1a"
}
},
"tld": [".es"],
"cca2": "ES",
"ccn3": "724",
"cca3": "ESP",
"currency": ["EUR"],
"callingCode": ["34"],
"capital": "Madrid",
"altSpellings": ["ES", "Kingdom of Spain", "Reino de Espa\u00f1a"],
"relevance": "2",
"region": "Europe",
"subregion": "Southern Europe",
"nativeLanguage": "spa",
"languages": {
"cat": "Catalan",
"eus": "Basque",
"glg": "Galician",
"oci": "Occitan",
"spa": "Spanish"
},
"translations": {
"deu": "Spanien",
"fra": "Espagne",
"hrv": "\u0160panjolska",
"ita": "Spagna",
"jpn": "\u30b9\u30da\u30a4\u30f3",
"nld": "Spanje",
"rus": "\u0418\u0441\u043f\u0430\u043d\u0438\u044f",
"spa": "Espa\u00f1a"
},
"latlng": [40, -4],
"demonym": "Spanish",
"borders": ["AND", "FRA", "GIB", "PRT", "MAR"],
"area": 505992
},
{
"name": {
"common": "Sri Lanka",
"official": "Democratic Socialist Republic of Sri Lanka",
"native": {
"common": "\u0dc1\u0dca\u200d\u0dbb\u0dd3 \u0dbd\u0d82\u0d9a\u0dcf\u0dc0",
"official": "\u0dc1\u0dca\u200d\u0dbb\u0dd3 \u0dbd\u0d82\u0d9a\u0dcf \u0db4\u0dca\u200d\u0dbb\u0da2\u0dcf\u0dad\u0dcf\u0db1\u0dca\u0dad\u0dca\u200d\u0dbb\u0dd2\u0d9a \u0dc3\u0db8\u0dcf\u0da2\u0dc0\u0dcf\u0daf\u0dd3 \u0da2\u0db1\u0dbb\u0da2\u0dba"
}
},
"tld": [".lk", ".\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8", ".\u0dbd\u0d82\u0d9a\u0dcf"],
"cca2": "LK",
"ccn3": "144",
"cca3": "LKA",
"currency": ["LKR"],
"callingCode": ["94"],
"capital": "Colombo",
"altSpellings": ["LK", "ila\u1e45kai", "Democratic Socialist Republic of Sri Lanka"],
"relevance": "0",
"region": "Asia",
"subregion": "Southern Asia",
"nativeLanguage": "sin",
"languages": {
"sin": "Sinhala",
"tam": "Tamil"
},
"translations": {
"deu": "Sri Lanka",
"fra": "Sri Lanka",
"hrv": "\u0160ri Lanka",
"ita": "Sri Lanka",
"jpn": "\u30b9\u30ea\u30e9\u30f3\u30ab",
"nld": "Sri Lanka",
"rus": "\u0428\u0440\u0438-\u041b\u0430\u043d\u043a\u0430",
"spa": "Sri Lanka"
},
"latlng": [7, 81],
"demonym": "Sri Lankan",
"borders": ["IND"],
"area": 65610
},
{
"name": {
"common": "Sudan",
"official": "Republic of the Sudan",
"native": {
"common": "\u0627\u0644\u0633\u0648\u062f\u0627\u0646",
"official": "\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u0633\u0648\u062f\u0627\u0646"
}
},
"tld": [".sd"],
"cca2": "SD",
"ccn3": "729",
"cca3": "SDN",
"currency": ["SDG"],
"callingCode": ["249"],
"capital": "Khartoum",
"altSpellings": ["SD", "Republic of the Sudan", "Jumh\u016br\u012byat as-S\u016bd\u0101n"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic",
"eng": "English"
},
"translations": {
"deu": "Sudan",
"fra": "Soudan",
"hrv": "Sudan",
"ita": "Sudan",
"jpn": "\u30b9\u30fc\u30c0\u30f3",
"nld": "Soedan",
"rus": "\u0421\u0443\u0434\u0430\u043d",
"spa": "Sud\u00e1n"
},
"latlng": [15, 30],
"demonym": "Sudanese",
"borders": ["CAF", "TCD", "EGY", "ERI", "ETH", "LBY", "SSD"],
"area": 1886068
},
{
"name": {
"common": "Suriname",
"official": "Republic of Suriname",
"native": {
"common": "Suriname",
"official": "Republiek Suriname"
}
},
"tld": [".sr"],
"cca2": "SR",
"ccn3": "740",
"cca3": "SUR",
"currency": ["SRD"],
"callingCode": ["597"],
"capital": "Paramaribo",
"altSpellings": ["SR", "Sarnam", "Sranangron", "Republic of Suriname", "Republiek Suriname"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "nld",
"languages": {
"nld": "Dutch"
},
"translations": {
"deu": "Suriname",
"fra": "Surinam",
"hrv": "Surinam",
"ita": "Suriname",
"jpn": "\u30b9\u30ea\u30ca\u30e0",
"nld": "Suriname",
"rus": "\u0421\u0443\u0440\u0438\u043d\u0430\u043c",
"spa": "Surinam"
},
"latlng": [4, -56],
"demonym": "Surinamer",
"borders": ["BRA", "GUF", "GUY"],
"area": 163820
},
{
"name": {
"common": "Svalbard and Jan Mayen",
"official": "Svalbard og Jan Mayen",
"native": {
"common": "Svalbard og Jan Mayen",
"official": "Svalbard og Jan Mayen"
}
},
"tld": [".sj"],
"cca2": "SJ",
"ccn3": "744",
"cca3": "SJM",
"currency": ["NOK"],
"callingCode": ["4779"],
"capital": "Longyearbyen",
"altSpellings": ["SJ", "Svalbard and Jan Mayen Islands"],
"relevance": "0.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "nor",
"languages": {
"nor": "Norwegian"
},
"translations": {
"deu": "Svalbard und Jan Mayen",
"fra": "Svalbard et Jan Mayen",
"hrv": "Svalbard i Jan Mayen",
"ita": "Svalbard e Jan Mayen",
"jpn": "\u30b9\u30f4\u30a1\u30fc\u30eb\u30d0\u30eb\u8af8\u5cf6\u304a\u3088\u3073\u30e4\u30f3\u30de\u30a4\u30a8\u30f3\u5cf6",
"nld": "Svalbard en Jan Mayen",
"rus": "\u0428\u043f\u0438\u0446\u0431\u0435\u0440\u0433\u0435\u043d \u0438 \u042f\u043d-\u041c\u0430\u0439\u0435\u043d",
"spa": "Islas Svalbard y Jan Mayen"
},
"latlng": [78, 20],
"demonym": "Norwegian",
"borders": [],
"area": -1
},
{
"name": {
"common": "Swaziland",
"official": "Kingdom of Swaziland",
"native": {
"common": "Swaziland",
"official": "Kingdom of Swaziland"
}
},
"tld": [".sz"],
"cca2": "SZ",
"ccn3": "748",
"cca3": "SWZ",
"currency": ["SZL"],
"callingCode": ["268"],
"capital": "Lobamba",
"altSpellings": ["SZ", "weSwatini", "Swatini", "Ngwane", "Kingdom of Swaziland", "Umbuso waseSwatini"],
"relevance": "0",
"region": "Africa",
"subregion": "Southern Africa",
"nativeLanguage": "ssw",
"languages": {
"eng": "English",
"ssw": "Swazi"
},
"translations": {
"deu": "Swasiland",
"fra": "Swaziland",
"hrv": "Svazi",
"ita": "Swaziland",
"jpn": "\u30b9\u30ef\u30b8\u30e9\u30f3\u30c9",
"nld": "Swaziland",
"rus": "\u0421\u0432\u0430\u0437\u0438\u043b\u0435\u043d\u0434",
"spa": "Suazilandia"
},
"latlng": [-26.5, 31.5],
"demonym": "Swazi",
"borders": ["MOZ", "ZAF"],
"area": 17364
},
{
"name": {
"common": "Sweden",
"official": "Kingdom of Sweden",
"native": {
"common": "Sverige",
"official": "Konungariket Sverige"
}
},
"tld": [".se"],
"cca2": "SE",
"ccn3": "752",
"cca3": "SWE",
"currency": ["SEK"],
"callingCode": ["46"],
"capital": "Stockholm",
"altSpellings": ["SE", "Kingdom of Sweden", "Konungariket Sverige"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "swe",
"languages": {
"swe": "Swedish"
},
"translations": {
"deu": "Schweden",
"fra": "Su\u00e8de",
"hrv": "\u0160vedska",
"ita": "Svezia",
"jpn": "\u30b9\u30a6\u30a7\u30fc\u30c7\u30f3",
"nld": "Zweden",
"rus": "\u0428\u0432\u0435\u0446\u0438\u044f",
"spa": "Suecia"
},
"latlng": [62, 15],
"demonym": "Swedish",
"borders": ["FIN", "NOR"],
"area": 450295
},
{
"name": {
"common": "Switzerland",
"official": "Swiss Confederation",
"native": {
"common": "Schweiz",
"official": "Schweizerische Eidgenossenschaft"
}
},
"tld": [".ch"],
"cca2": "CH",
"ccn3": "756",
"cca3": "CHE",
"currency": ["CHE", "CHF", "CHW"],
"callingCode": ["41"],
"capital": "Bern",
"altSpellings": ["CH", "Swiss Confederation", "Schweiz", "Suisse", "Svizzera", "Svizra"],
"relevance": "1.5",
"region": "Europe",
"subregion": "Western Europe",
"nativeLanguage": "deu",
"languages": {
"deu": "German",
"fra": "French",
"ita": "Italian",
"roh": "Romansh"
},
"translations": {
"deu": "Schweiz",
"fra": "Suisse",
"hrv": "\u0160vicarska",
"ita": "Svizzera",
"jpn": "\u30b9\u30a4\u30b9",
"nld": "Zwitserland",
"rus": "\u0428\u0432\u0435\u0439\u0446\u0430\u0440\u0438\u044f",
"spa": "Suiza"
},
"latlng": [47, 8],
"demonym": "Swiss",
"borders": ["AUT", "FRA", "ITA", "LIE", "DEU"],
"area": 41284
},
{
"name": {
"common": "Syria",
"official": "Syrian Arab Republic",
"native": {
"common": "\u0633\u0648\u0631\u064a\u0627",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u0639\u0631\u0628\u064a\u0629 \u0627\u0644\u0633\u0648\u0631\u064a\u0629"
}
},
"tld": [".sy", "\u0633\u0648\u0631\u064a\u0627."],
"cca2": "SY",
"ccn3": "760",
"cca3": "SYR",
"currency": ["SYP"],
"callingCode": ["963"],
"capital": "Damascus",
"altSpellings": ["SY", "Syrian Arab Republic", "Al-Jumh\u016br\u012byah Al-\u02bbArab\u012byah As-S\u016br\u012byah"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Syrien",
"fra": "Syrie",
"hrv": "Sirija",
"ita": "Siria",
"jpn": "\u30b7\u30ea\u30a2\u30fb\u30a2\u30e9\u30d6\u5171\u548c\u56fd",
"nld": "Syri\u00eb",
"rus": "\u0421\u0438\u0440\u0438\u044f",
"spa": "Siria"
},
"latlng": [35, 38],
"demonym": "Syrian",
"borders": ["IRQ", "ISR", "JOR", "LBN", "TUR"],
"area": 185180
},
{
"name": {
"common": "Taiwan",
"official": "Republic of China",
"native": {
"common": "\u81fa\u7063",
"official": "\u4e2d\u534e\u6c11\u56fd"
}
},
"tld": [".tw", ".\u53f0\u6e7e", ".\u53f0\u7063"],
"cca2": "TW",
"ccn3": "158",
"cca3": "TWN",
"currency": ["TWD"],
"callingCode": ["886"],
"capital": "Taipei",
"altSpellings": ["TW", "T\u00e1iw\u0101n", "Republic of China", "\u4e2d\u83ef\u6c11\u570b", "Zh\u014dnghu\u00e1 M\u00edngu\u00f3"],
"relevance": "0",
"region": "Asia",
"subregion": "Eastern Asia",
"nativeLanguage": "cmn",
"languages": {
"cmn": "Mandarin"
},
"translations": {
"deu": "Taiwan",
"fra": "Ta\u00efwan",
"hrv": "Tajvan",
"ita": "Taiwan",
"jpn": "\u53f0\u6e7e\uff08\u53f0\u6e7e\u7701/\u4e2d\u83ef\u6c11\u56fd\uff09",
"nld": "Taiwan",
"rus": "\u0422\u0430\u0439\u0432\u0430\u043d\u044c",
"spa": "Taiw\u00e1n"
},
"latlng": [23.5, 121],
"demonym": "Taiwanese",
"borders": [],
"area": 36193
},
{
"name": {
"common": "Tajikistan",
"official": "Republic of Tajikistan",
"native": {
"common": "\u0422\u043e\u04b7\u0438\u043a\u0438\u0441\u0442\u043e\u043d",
"official": "\u04b6\u0443\u043c\u04b3\u0443\u0440\u0438\u0438 \u0422\u043e\u04b7\u0438\u043a\u0438\u0441\u0442\u043e\u043d"
}
},
"tld": [".tj"],
"cca2": "TJ",
"ccn3": "762",
"cca3": "TJK",
"currency": ["TJS"],
"callingCode": ["992"],
"capital": "Dushanbe",
"altSpellings": ["TJ", "To\u00e7ikiston", "Republic of Tajikistan", "\u04b6\u0443\u043c\u04b3\u0443\u0440\u0438\u0438 \u0422\u043e\u04b7\u0438\u043a\u0438\u0441\u0442\u043e\u043d", "\u00c7umhuriyi To\u00e7ikiston"],
"relevance": "0",
"region": "Asia",
"subregion": "Central Asia",
"nativeLanguage": "tgk",
"languages": {
"rus": "Russian",
"tgk": "Tajik"
},
"translations": {
"deu": "Tadschikistan",
"fra": "Tadjikistan",
"hrv": "Ta\u0111ikistan",
"ita": "Tagikistan",
"jpn": "\u30bf\u30b8\u30ad\u30b9\u30bf\u30f3",
"nld": "Tadzjikistan",
"rus": "\u0422\u0430\u0434\u0436\u0438\u043a\u0438\u0441\u0442\u0430\u043d",
"spa": "Tayikist\u00e1n"
},
"latlng": [39, 71],
"demonym": "Tadzhik",
"borders": ["AFG", "CHN", "KGZ", "UZB"],
"area": 143100
},
{
"name": {
"common": "Tanzania",
"official": "United Republic of Tanzania",
"native": {
"common": "Tanzania",
"official": "Jamhuri ya Muungano wa Tanzania"
}
},
"tld": [".tz"],
"cca2": "TZ",
"ccn3": "834",
"cca3": "TZA",
"currency": ["TZS"],
"callingCode": ["255"],
"capital": "Dodoma",
"altSpellings": ["TZ", "United Republic of Tanzania", "Jamhuri ya Muungano wa Tanzania"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "swa",
"languages": {
"eng": "English",
"swa": "Swahili"
},
"translations": {
"deu": "Tansania",
"fra": "Tanzanie",
"hrv": "Tanzanija",
"ita": "Tanzania",
"jpn": "\u30bf\u30f3\u30b6\u30cb\u30a2",
"nld": "Tanzania",
"rus": "\u0422\u0430\u043d\u0437\u0430\u043d\u0438\u044f",
"spa": "Tanzania"
},
"latlng": [-6, 35],
"demonym": "Tanzanian",
"borders": ["BDI", "COD", "KEN", "MWI", "MOZ", "RWA", "UGA", "ZMB"],
"area": 945087
},
{
"name": {
"common": "Thailand",
"official": "Kingdom of Thailand",
"native": {
"common": "\u0e1b\u0e23\u0e30\u0e40\u0e17\u0e28\u0e44\u0e17\u0e22",
"official": "\u0e23\u0e32\u0e0a\u0e2d\u0e32\u0e13\u0e32\u0e08\u0e31\u0e01\u0e23\u0e44\u0e17\u0e22"
}
},
"tld": [".th", ".\u0e44\u0e17\u0e22"],
"cca2": "TH",
"ccn3": "764",
"cca3": "THA",
"currency": ["THB"],
"callingCode": ["66"],
"capital": "Bangkok",
"altSpellings": ["TH", "Prathet", "Thai", "Kingdom of Thailand", "\u0e23\u0e32\u0e0a\u0e2d\u0e32\u0e13\u0e32\u0e08\u0e31\u0e01\u0e23\u0e44\u0e17\u0e22", "Ratcha Anachak Thai"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "tha",
"languages": {
"tha": "Thai"
},
"translations": {
"deu": "Thailand",
"fra": "Tha\u00eflande",
"hrv": "Tajland",
"ita": "Tailandia",
"jpn": "\u30bf\u30a4",
"nld": "Thailand",
"rus": "\u0422\u0430\u0438\u043b\u0430\u043d\u0434",
"spa": "Tailandia"
},
"latlng": [15, 100],
"demonym": "Thai",
"borders": ["MMR", "KHM", "LAO", "MYS"],
"area": 513120
},
{
"name": {
"common": "Timor-Leste",
"official": "Democratic Republic of Timor-Leste",
"native": {
"common": "Timor-Leste",
"official": "Rep\u00fablica Democr\u00e1tica de Timor-Leste"
}
},
"tld": [".tl"],
"cca2": "TL",
"ccn3": "626",
"cca3": "TLS",
"currency": ["USD"],
"callingCode": ["670"],
"capital": "Dili",
"altSpellings": ["TL", "East Timor", "Democratic Republic of Timor-Leste", "Rep\u00fablica Democr\u00e1tica de Timor-Leste", "Rep\u00fablika Demokr\u00e1tika Tim\u00f3r-Leste"],
"relevance": "0",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "por",
"languages": {
"por": "Portuguese",
"tet": "Tetum"
},
"translations": {
"deu": "Timor-Leste",
"fra": "Timor oriental",
"hrv": "Isto\u010dni Timor",
"ita": "Timor Est",
"jpn": "\u6771\u30c6\u30a3\u30e2\u30fc\u30eb",
"nld": "Oost-Timor",
"rus": "\u0412\u043e\u0441\u0442\u043e\u0447\u043d\u044b\u0439 \u0422\u0438\u043c\u043e\u0440",
"spa": "Timor Oriental"
},
"latlng": [-8.83333333, 125.91666666],
"demonym": "East Timorese",
"borders": ["IDN"],
"area": 14874
},
{
"name": {
"common": "Togo",
"official": "Togolese Republic",
"native": {
"common": "Togo",
"official": "R\u00e9publique togolaise"
}
},
"tld": [".tg"],
"cca2": "TG",
"ccn3": "768",
"cca3": "TGO",
"currency": ["XOF"],
"callingCode": ["228"],
"capital": "Lom\u00e9",
"altSpellings": ["TG", "Togolese", "Togolese Republic", "R\u00e9publique Togolaise"],
"relevance": "0",
"region": "Africa",
"subregion": "Western Africa",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Togo",
"fra": "Togo",
"hrv": "Togo",
"ita": "Togo",
"jpn": "\u30c8\u30fc\u30b4",
"nld": "Togo",
"rus": "\u0422\u043e\u0433\u043e",
"spa": "Togo"
},
"latlng": [8, 1.16666666],
"demonym": "Togolese",
"borders": ["BEN", "BFA", "GHA"],
"area": 56785
},
{
"name": {
"common": "Tokelau",
"official": "Tokelau",
"native": {
"common": "Tokelau",
"official": "Tokelau"
}
},
"tld": [".tk"],
"cca2": "TK",
"ccn3": "772",
"cca3": "TKL",
"currency": ["NZD"],
"callingCode": ["690"],
"capital": "Fakaofo",
"altSpellings": ["TK"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "tkl",
"languages": {
"eng": "English",
"smo": "Samoan",
"tkl": "Tokelauan"
},
"translations": {
"deu": "Tokelau",
"fra": "Tokelau",
"hrv": "Tokelau",
"ita": "Isole Tokelau",
"jpn": "\u30c8\u30b1\u30e9\u30a6",
"nld": "Tokelau",
"rus": "\u0422\u043e\u043a\u0435\u043b\u0430\u0443",
"spa": "Islas Tokelau"
},
"latlng": [-9, -172],
"demonym": "Tokelauan",
"borders": [],
"area": 12
},
{
"name": {
"common": "Tonga",
"official": "Kingdom of Tonga",
"native": {
"common": "Tonga",
"official": "Kingdom of Tonga"
}
},
"tld": [".to"],
"cca2": "TO",
"ccn3": "776",
"cca3": "TON",
"currency": ["TOP"],
"callingCode": ["676"],
"capital": "Nuku'alofa",
"altSpellings": ["TO"],
"relevance": "0",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "ton",
"languages": {
"eng": "English",
"ton": "Tongan"
},
"translations": {
"deu": "Tonga",
"fra": "Tonga",
"hrv": "Tonga",
"ita": "Tonga",
"jpn": "\u30c8\u30f3\u30ac",
"nld": "Tonga",
"rus": "\u0422\u043e\u043d\u0433\u0430",
"spa": "Tonga"
},
"latlng": [-20, -175],
"demonym": "Tongan",
"borders": [],
"area": 747
},
{
"name": {
"common": "Trinidad and Tobago",
"official": "Republic of Trinidad and Tobago",
"native": {
"common": "Trinidad and Tobago",
"official": "Republic of Trinidad and Tobago"
}
},
"tld": [".tt"],
"cca2": "TT",
"ccn3": "780",
"cca3": "TTO",
"currency": ["TTD"],
"callingCode": ["1868"],
"capital": "Port of Spain",
"altSpellings": ["TT", "Republic of Trinidad and Tobago"],
"relevance": "0",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Trinidad und Tobago",
"fra": "Trinit\u00e9 et Tobago",
"hrv": "Trinidad i Tobago",
"ita": "Trinidad e Tobago",
"jpn": "\u30c8\u30ea\u30cb\u30c0\u30fc\u30c9\u30fb\u30c8\u30d0\u30b4",
"nld": "Trinidad en Tobago",
"rus": "\u0422\u0440\u0438\u043d\u0438\u0434\u0430\u0434 \u0438 \u0422\u043e\u0431\u0430\u0433\u043e",
"spa": "Trinidad y Tobago"
},
"latlng": [11, -61],
"demonym": "Trinidadian",
"borders": [],
"area": 5130
},
{
"name": {
"common": "Tunisia",
"official": "Tunisian Republic",
"native": {
"common": "\u062a\u0648\u0646\u0633",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u062a\u0648\u0646\u0633\u064a\u0629"
}
},
"tld": [".tn"],
"cca2": "TN",
"ccn3": "788",
"cca3": "TUN",
"currency": ["TND"],
"callingCode": ["216"],
"capital": "Tunis",
"altSpellings": ["TN", "Republic of Tunisia", "al-Jumh\u016briyyah at-T\u016bnisiyyah"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Tunesien",
"fra": "Tunisie",
"hrv": "Tunis",
"ita": "Tunisia",
"jpn": "\u30c1\u30e5\u30cb\u30b8\u30a2",
"nld": "Tunesi\u00eb",
"rus": "\u0422\u0443\u043d\u0438\u0441",
"spa": "T\u00fanez"
},
"latlng": [34, 9],
"demonym": "Tunisian",
"borders": ["DZA", "LBY"],
"area": 163610
},
{
"name": {
"common": "Turkey",
"official": "Republic of Turkey",
"native": {
"common": "T\u00fcrkiye",
"official": "T\u00fcrkiye Cumhuriyeti"
}
},
"tld": [".tr"],
"cca2": "TR",
"ccn3": "792",
"cca3": "TUR",
"currency": ["TRY"],
"callingCode": ["90"],
"capital": "Ankara",
"altSpellings": ["TR", "Turkiye", "Republic of Turkey", "T\u00fcrkiye Cumhuriyeti"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "tur",
"languages": {
"tur": "Turkish"
},
"translations": {
"deu": "T\u00fcrkei",
"fra": "Turquie",
"hrv": "Turska",
"ita": "Turchia",
"jpn": "\u30c8\u30eb\u30b3",
"nld": "Turkije",
"rus": "\u0422\u0443\u0440\u0446\u0438\u044f",
"spa": "Turqu\u00eda"
},
"latlng": [39, 35],
"demonym": "Turkish",
"borders": ["ARM", "AZE", "BGR", "GEO", "GRC", "IRN", "IRQ", "SYR"],
"area": 783562
},
{
"name": {
"common": "Turkmenistan",
"official": "Turkmenistan",
"native": {
"common": "T\u00fcrkmenistan",
"official": "T\u00fcrkmenistan"
}
},
"tld": [".tm"],
"cca2": "TM",
"ccn3": "795",
"cca3": "TKM",
"currency": ["TMT"],
"callingCode": ["993"],
"capital": "Ashgabat",
"altSpellings": ["TM"],
"relevance": "0",
"region": "Asia",
"subregion": "Central Asia",
"nativeLanguage": "tuk",
"languages": {
"rus": "Russian",
"tuk": "Turkmen"
},
"translations": {
"deu": "Turkmenistan",
"fra": "Turkm\u00e9nistan",
"hrv": "Turkmenistan",
"ita": "Turkmenistan",
"jpn": "\u30c8\u30eb\u30af\u30e1\u30cb\u30b9\u30bf\u30f3",
"nld": "Turkmenistan",
"rus": "\u0422\u0443\u0440\u043a\u043c\u0435\u043d\u0438\u044f",
"spa": "Turkmenist\u00e1n"
},
"latlng": [40, 60],
"demonym": "Turkmen",
"borders": ["AFG", "IRN", "KAZ", "UZB"],
"area": 488100
},
{
"name": {
"common": "Turks and Caicos Islands",
"official": "Turks and Caicos Islands",
"native": {
"common": "Turks and Caicos Islands",
"official": "Turks and Caicos Islands"
}
},
"tld": [".tc"],
"cca2": "TC",
"ccn3": "796",
"cca3": "TCA",
"currency": ["USD"],
"callingCode": ["1649"],
"capital": "Cockburn Town",
"altSpellings": ["TC"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Turks- und Caicosinseln",
"fra": "\u00celes Turques-et-Ca\u00efques",
"hrv": "Otoci Turks i Caicos",
"ita": "Isole Turks e Caicos",
"jpn": "\u30bf\u30fc\u30af\u30b9\u30fb\u30ab\u30a4\u30b3\u30b9\u8af8\u5cf6",
"nld": "Turks- en Caicoseilanden",
"rus": "\u0422\u0435\u0440\u043a\u0441 \u0438 \u041a\u0430\u0439\u043a\u043e\u0441",
"spa": "Islas Turks y Caicos"
},
"latlng": [21.75, -71.58333333],
"demonym": "Turks and Caicos Islander",
"borders": [],
"area": 948
},
{
"name": {
"common": "Tuvalu",
"official": "Tuvalu",
"native": {
"common": "Tuvalu",
"official": "Tuvalu"
}
},
"tld": [".tv"],
"cca2": "TV",
"ccn3": "798",
"cca3": "TUV",
"currency": ["AUD"],
"callingCode": ["688"],
"capital": "Funafuti",
"altSpellings": ["TV"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "tvl",
"languages": {
"eng": "English",
"tvl": "Tuvaluan"
},
"translations": {
"deu": "Tuvalu",
"fra": "Tuvalu",
"hrv": "Tuvalu",
"ita": "Tuvalu",
"jpn": "\u30c4\u30d0\u30eb",
"nld": "Tuvalu",
"rus": "\u0422\u0443\u0432\u0430\u043b\u0443",
"spa": "Tuvalu"
},
"latlng": [-8, 178],
"demonym": "Tuvaluan",
"borders": [],
"area": 26
},
{
"name": {
"common": "Uganda",
"official": "Republic of Uganda",
"native": {
"common": "Uganda",
"official": "Republic of Uganda"
}
},
"tld": [".ug"],
"cca2": "UG",
"ccn3": "800",
"cca3": "UGA",
"currency": ["UGX"],
"callingCode": ["256"],
"capital": "Kampala",
"altSpellings": ["UG", "Republic of Uganda", "Jamhuri ya Uganda"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "swa",
"languages": {
"eng": "English",
"swa": "Swahili"
},
"translations": {
"deu": "Uganda",
"fra": "Uganda",
"hrv": "Uganda",
"ita": "Uganda",
"jpn": "\u30a6\u30ac\u30f3\u30c0",
"nld": "Oeganda",
"rus": "\u0423\u0433\u0430\u043d\u0434\u0430",
"spa": "Uganda"
},
"latlng": [1, 32],
"demonym": "Ugandan",
"borders": ["COD", "KEN", "RWA", "SSD", "TZA"],
"area": 241550
},
{
"name": {
"common": "Ukraine",
"official": "Ukraine",
"native": {
"common": "\u0423\u043a\u0440\u0430\u0457\u043d\u0430",
"official": "\u0423\u043a\u0440\u0430\u0457\u043d\u0430"
}
},
"tld": [".ua", ".\u0443\u043a\u0440"],
"cca2": "UA",
"ccn3": "804",
"cca3": "UKR",
"currency": ["UAH"],
"callingCode": ["380"],
"capital": "Kiev",
"altSpellings": ["UA", "Ukrayina"],
"relevance": "0",
"region": "Europe",
"subregion": "Eastern Europe",
"nativeLanguage": "ukr",
"languages": {
"ukr": "Ukrainian"
},
"translations": {
"deu": "Ukraine",
"fra": "Ukraine",
"hrv": "Ukrajina",
"ita": "Ucraina",
"jpn": "\u30a6\u30af\u30e9\u30a4\u30ca",
"nld": "Oekra\u00efne",
"rus": "\u0423\u043a\u0440\u0430\u0438\u043d\u0430",
"spa": "Ucrania"
},
"latlng": [49, 32],
"demonym": "Ukrainian",
"borders": ["BLR", "HUN", "MDA", "POL", "ROU", "RUS", "SVK"],
"area": 603500
},
{
"name": {
"common": "United Arab Emirates",
"official": "United Arab Emirates",
"native": {
"common": "\u062f\u0648\u0644\u0629 \u0627\u0644\u0625\u0645\u0627\u0631\u0627\u062a \u0627\u0644\u0639\u0631\u0628\u064a\u0629 \u0627\u0644\u0645\u062a\u062d\u062f\u0629",
"official": "\u0627\u0644\u0625\u0645\u0627\u0631\u0627\u062a \u0627\u0644\u0639\u0631\u0628\u064a\u0629 \u0627\u0644\u0645\u062a\u062d\u062f\u0629"
}
},
"tld": [".ae", "\u0627\u0645\u0627\u0631\u0627\u062a."],
"cca2": "AE",
"ccn3": "784",
"cca3": "ARE",
"currency": ["AED"],
"callingCode": ["971"],
"capital": "Abu Dhabi",
"altSpellings": ["AE", "UAE", "Emirates"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Vereinigte Arabische Emirate",
"fra": "\u00c9mirats arabes unis",
"hrv": "Ujedinjeni Arapski Emirati",
"ita": "Emirati Arabi Uniti",
"jpn": "\u30a2\u30e9\u30d6\u9996\u9577\u56fd\u9023\u90a6",
"nld": "Verenigde Arabische Emiraten",
"rus": "\u041e\u0431\u044a\u0435\u0434\u0438\u043d\u0451\u043d\u043d\u044b\u0435 \u0410\u0440\u0430\u0431\u0441\u043a\u0438\u0435 \u042d\u043c\u0438\u0440\u0430\u0442\u044b",
"spa": "Emiratos \u00c1rabes Unidos"
},
"latlng": [24, 54],
"demonym": "Emirati",
"borders": ["OMN", "SAU"],
"area": 83600
},
{
"name": {
"common": "United Kingdom",
"official": "United Kingdom of Great Britain and Northern Ireland",
"native": {
"common": "United Kingdom",
"official": "United Kingdom of Great Britain and Northern Ireland"
}
},
"tld": [".uk"],
"cca2": "GB",
"ccn3": "826",
"cca3": "GBR",
"currency": ["GBP"],
"callingCode": ["44"],
"capital": "London",
"altSpellings": ["GB", "UK", "Great Britain"],
"relevance": "2.5",
"region": "Europe",
"subregion": "Northern Europe",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Vereinigtes K\u00f6nigreich",
"fra": "Royaume-Uni",
"hrv": "Ujedinjeno Kraljevstvo",
"ita": "Regno Unito",
"jpn": "\u30a4\u30ae\u30ea\u30b9",
"nld": "Verenigd Koninkrijk",
"rus": "\u0412\u0435\u043b\u0438\u043a\u043e\u0431\u0440\u0438\u0442\u0430\u043d\u0438\u044f",
"spa": "Reino Unido"
},
"latlng": [54, -2],
"demonym": "British",
"borders": ["IRL"],
"area": 242900
},
{
"name": {
"common": "United States",
"official": "United States of America",
"native": {
"common": "United States",
"official": "United States of America"
}
},
"tld": [".us"],
"cca2": "US",
"ccn3": "840",
"cca3": "USA",
"currency": ["USD", "USN", "USS"],
"callingCode": ["1"],
"capital": "Washington D.C.",
"altSpellings": ["US", "USA", "United States of America"],
"relevance": "3.5",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Vereinigte Staaten von Amerika",
"fra": "\u00c9tats-Unis",
"hrv": "Sjedinjene Ameri\u010dke Dr\u017eave",
"ita": "Stati Uniti D'America",
"jpn": "\u30a2\u30e1\u30ea\u30ab\u5408\u8846\u56fd",
"nld": "Verenigde Staten",
"rus": "\u0421\u043e\u0435\u0434\u0438\u043d\u0451\u043d\u043d\u044b\u0435 \u0428\u0442\u0430\u0442\u044b \u0410\u043c\u0435\u0440\u0438\u043a\u0438",
"spa": "Estados Unidos"
},
"latlng": [38, -97],
"demonym": "American",
"borders": ["CAN", "MEX"],
"area": 9372610
},
{
"name": {
"common": "United States Minor Outlying Islands",
"official": "United States Minor Outlying Islands",
"native": {
"common": "United States Minor Outlying Islands",
"official": "United States Minor Outlying Islands"
}
},
"tld": [".us"],
"cca2": "UM",
"ccn3": "581",
"cca3": "UMI",
"currency": ["USD"],
"callingCode": [],
"capital": "",
"altSpellings": ["UM"],
"relevance": "0",
"region": "Americas",
"subregion": "Northern America",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Kleinere Inselbesitzungen der Vereinigten Staaten",
"fra": "\u00celes mineures \u00e9loign\u00e9es des \u00c9tats-Unis",
"hrv": "Mali udaljeni otoci SAD-a",
"ita": "Isole minori esterne degli Stati Uniti d'America",
"jpn": "\u5408\u8846\u56fd\u9818\u6709\u5c0f\u96e2\u5cf6",
"nld": "Kleine afgelegen eilanden van de Verenigde Staten",
"rus": "\u0412\u043d\u0435\u0448\u043d\u0438\u0435 \u043c\u0430\u043b\u044b\u0435 \u043e\u0441\u0442\u0440\u043e\u0432\u0430 \u0421\u0428\u0410",
"spa": "Islas Ultramarinas Menores de Estados Unidos"
},
"latlng": [],
"demonym": "American",
"borders": [],
"area": 34.2
},
{
"name": {
"common": "United States Virgin Islands",
"official": "Virgin Islands of the United States",
"native": {
"common": "United States Virgin Islands",
"official": "Virgin Islands of the United States"
}
},
"tld": [".vi"],
"cca2": "VI",
"ccn3": "850",
"cca3": "VIR",
"currency": ["USD"],
"callingCode": ["1340"],
"capital": "Charlotte Amalie",
"altSpellings": ["VI"],
"relevance": "0.5",
"region": "Americas",
"subregion": "Caribbean",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Amerikanische Jungferninseln",
"fra": "\u00celes Vierges des \u00c9tats-Unis",
"hrv": "Ameri\u010dki Djevi\u010danski Otoci",
"ita": "Isole Vergini americane",
"jpn": "\u30a2\u30e1\u30ea\u30ab\u9818\u30f4\u30a1\u30fc\u30b8\u30f3\u8af8\u5cf6",
"nld": "Amerikaanse Maagdeneilanden",
"rus": "\u0412\u0438\u0440\u0433\u0438\u043d\u0441\u043a\u0438\u0435 \u041e\u0441\u0442\u0440\u043e\u0432\u0430",
"spa": "Islas V\u00edrgenes de los Estados Unidos"
},
"latlng": [18.35, -64.933333],
"demonym": "Virgin Islander",
"borders": [],
"area": 347
},
{
"name": {
"common": "Uruguay",
"official": "Oriental Republic of Uruguay",
"native": {
"common": "Uruguay",
"official": "Rep\u00fablica Oriental del Uruguay"
}
},
"tld": [".uy"],
"cca2": "UY",
"ccn3": "858",
"cca3": "URY",
"currency": ["UYI", "UYU"],
"callingCode": ["598"],
"capital": "Montevideo",
"altSpellings": ["UY", "Oriental Republic of Uruguay", "Rep\u00fablica Oriental del Uruguay"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Uruguay",
"fra": "Uruguay",
"hrv": "Urugvaj",
"ita": "Uruguay",
"jpn": "\u30a6\u30eb\u30b0\u30a2\u30a4",
"nld": "Uruguay",
"rus": "\u0423\u0440\u0443\u0433\u0432\u0430\u0439",
"spa": "Uruguay"
},
"latlng": [-33, -56],
"demonym": "Uruguayan",
"borders": ["ARG", "BRA"],
"area": 181034
},
{
"name": {
"common": "Uzbekistan",
"official": "Republic of Uzbekistan",
"native": {
"common": "O\u2018zbekiston",
"official": "O'zbekiston Respublikasi"
}
},
"tld": [".uz"],
"cca2": "UZ",
"ccn3": "860",
"cca3": "UZB",
"currency": ["UZS"],
"callingCode": ["998"],
"capital": "Tashkent",
"altSpellings": ["UZ", "Republic of Uzbekistan", "O\u2018zbekiston Respublikasi", "\u040e\u0437\u0431\u0435\u043a\u0438\u0441\u0442\u043e\u043d \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430\u0441\u0438"],
"relevance": "0",
"region": "Asia",
"subregion": "Central Asia",
"nativeLanguage": "uzb",
"languages": {
"rus": "Russian",
"uzb": "Uzbek"
},
"translations": {
"deu": "Usbekistan",
"fra": "Ouzb\u00e9kistan",
"hrv": "Uzbekistan",
"ita": "Uzbekistan",
"jpn": "\u30a6\u30ba\u30d9\u30ad\u30b9\u30bf\u30f3",
"nld": "Oezbekistan",
"rus": "\u0423\u0437\u0431\u0435\u043a\u0438\u0441\u0442\u0430\u043d",
"spa": "Uzbekist\u00e1n"
},
"latlng": [41, 64],
"demonym": "Uzbekistani",
"borders": ["AFG", "KAZ", "KGZ", "TJK", "TKM"],
"area": 447400
},
{
"name": {
"common": "Vanuatu",
"official": "Republic of Vanuatu",
"native": {
"common": "Vanuatu",
"official": "Ripablik blong Vanuatu"
}
},
"tld": [".vu"],
"cca2": "VU",
"ccn3": "548",
"cca3": "VUT",
"currency": ["VUV"],
"callingCode": ["678"],
"capital": "Port Vila",
"altSpellings": ["VU", "Republic of Vanuatu", "Ripablik blong Vanuatu", "R\u00e9publique de Vanuatu"],
"relevance": "0",
"region": "Oceania",
"subregion": "Melanesia",
"nativeLanguage": "bis",
"languages": {
"bis": "Bislama",
"eng": "English",
"fra": "French"
},
"translations": {
"deu": "Vanuatu",
"fra": "Vanuatu",
"hrv": "Vanuatu",
"ita": "Vanuatu",
"jpn": "\u30d0\u30cc\u30a2\u30c4",
"nld": "Vanuatu",
"rus": "\u0412\u0430\u043d\u0443\u0430\u0442\u0443",
"spa": "Vanuatu"
},
"latlng": [-16, 167],
"demonym": "Ni-Vanuatu",
"borders": [],
"area": 12189
},
{
"name": {
"common": "Venezuela",
"official": "Bolivarian Republic of Venezuela",
"native": {
"common": "Venezuela",
"official": "Rep\u00fablica Bolivariana de Venezuela"
}
},
"tld": [".ve"],
"cca2": "VE",
"ccn3": "862",
"cca3": "VEN",
"currency": ["VEF"],
"callingCode": ["58"],
"capital": "Caracas",
"altSpellings": ["VE", "Bolivarian Republic of Venezuela", "Rep\u00fablica Bolivariana de Venezuela"],
"relevance": "0",
"region": "Americas",
"subregion": "South America",
"nativeLanguage": "spa",
"languages": {
"spa": "Spanish"
},
"translations": {
"deu": "Venezuela",
"fra": "Venezuela",
"hrv": "Venezuela",
"ita": "Venezuela",
"jpn": "\u30d9\u30cd\u30ba\u30a8\u30e9\u30fb\u30dc\u30ea\u30d0\u30eb\u5171\u548c\u56fd",
"nld": "Venezuela",
"rus": "\u0412\u0435\u043d\u0435\u0441\u0443\u044d\u043b\u0430",
"spa": "Venezuela"
},
"latlng": [8, -66],
"demonym": "Venezuelan",
"borders": ["BRA", "COL", "GUY"],
"area": 916445
},
{
"name": {
"common": "Vietnam",
"official": "Socialist Republic of Vietnam",
"native": {
"common": "Vi\u1ec7t Nam",
"official": "C\u1ed9ng h\u00f2a x\u00e3 h\u1ed9i ch\u1ee7 ngh\u0129a Vi\u1ec7t Nam"
}
},
"tld": [".vn"],
"cca2": "VN",
"ccn3": "704",
"cca3": "VNM",
"currency": ["VND"],
"callingCode": ["84"],
"capital": "Hanoi",
"altSpellings": ["VN", "Socialist Republic of Vietnam", "C\u1ed9ng h\u00f2a X\u00e3 h\u1ed9i ch\u1ee7 ngh\u0129a Vi\u1ec7t Nam"],
"relevance": "1.5",
"region": "Asia",
"subregion": "South-Eastern Asia",
"nativeLanguage": "vie",
"languages": {
"vie": "Vietnamese"
},
"translations": {
"deu": "Vietnam",
"fra": "Vi\u00eat Nam",
"hrv": "Vijetnam",
"ita": "Vietnam",
"jpn": "\u30d9\u30c8\u30ca\u30e0",
"nld": "Vietnam",
"rus": "\u0412\u044c\u0435\u0442\u043d\u0430\u043c",
"spa": "Vietnam"
},
"latlng": [16.16666666, 107.83333333],
"demonym": "Vietnamese",
"borders": ["KHM", "CHN", "LAO"],
"area": 331212
},
{
"name": {
"common": "Wallis and Futuna",
"official": "Territory of the Wallis and Futuna Islands",
"native": {
"common": "Wallis et Futuna",
"official": "Territoire des \u00eeles Wallis et Futuna"
}
},
"tld": [".wf"],
"cca2": "WF",
"ccn3": "876",
"cca3": "WLF",
"currency": ["XPF"],
"callingCode": ["681"],
"capital": "Mata-Utu",
"altSpellings": ["WF", "Territory of the Wallis and Futuna Islands", "Territoire des \u00eeles Wallis et Futuna"],
"relevance": "0.5",
"region": "Oceania",
"subregion": "Polynesia",
"nativeLanguage": "fra",
"languages": {
"fra": "French"
},
"translations": {
"deu": "Wallis und Futuna",
"fra": "Wallis-et-Futuna",
"hrv": "Wallis i Fortuna",
"ita": "Wallis e Futuna",
"jpn": "\u30a6\u30a9\u30ea\u30b9\u30fb\u30d5\u30c4\u30ca",
"nld": "Wallis en Futuna",
"rus": "\u0423\u043e\u043b\u043b\u0438\u0441 \u0438 \u0424\u0443\u0442\u0443\u043d\u0430",
"spa": "Wallis y Futuna"
},
"latlng": [-13.3, -176.2],
"demonym": "Wallis and Futuna Islander",
"borders": [],
"area": 142
},
{
"name": {
"common": "Western Sahara",
"official": "Western Sahara",
"native": {
"common": "\u0627\u0644\u0635\u062d\u0631\u0627\u0621 \u0627\u0644\u063a\u0631\u0628\u064a\u0629",
"official": "S\u00e1hara Occidental"
}
},
"tld": [".eh"],
"cca2": "EH",
"ccn3": "732",
"cca3": "ESH",
"currency": ["MAD", "DZD", "MRO"],
"callingCode": ["212"],
"capital": "El Aai\u00fan",
"altSpellings": ["EH", "Tane\u1e93roft Tutrimt"],
"relevance": "0",
"region": "Africa",
"subregion": "Northern Africa",
"nativeLanguage": "ber",
"languages": {
"ber": "Berber",
"mey": "Hassaniya",
"spa": "Spanish"
},
"translations": {
"deu": "Westsahara",
"fra": "Sahara Occidental",
"hrv": "Zapadna Sahara",
"ita": "Sahara Occidentale",
"jpn": "\u897f\u30b5\u30cf\u30e9",
"nld": "Westelijke Sahara",
"rus": "\u0417\u0430\u043f\u0430\u0434\u043d\u0430\u044f \u0421\u0430\u0445\u0430\u0440\u0430",
"spa": "Sahara Occidental"
},
"latlng": [24.5, -13],
"demonym": "Sahrawi",
"borders": ["DZA", "MRT", "MAR"],
"area": 266000
},
{
"name": {
"common": "Yemen",
"official": "Republic of Yemen",
"native": {
"common": "\u0627\u0644\u064a\u064e\u0645\u064e\u0646",
"official": "\u0627\u0644\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u0627\u0644\u064a\u0645\u0646\u064a\u0629"
}
},
"tld": [".ye"],
"cca2": "YE",
"ccn3": "887",
"cca3": "YEM",
"currency": ["YER"],
"callingCode": ["967"],
"capital": "Sana'a",
"altSpellings": ["YE", "Yemeni Republic", "al-Jumh\u016briyyah al-Yamaniyyah"],
"relevance": "0",
"region": "Asia",
"subregion": "Western Asia",
"nativeLanguage": "ara",
"languages": {
"ara": "Arabic"
},
"translations": {
"deu": "Jemen",
"fra": "Y\u00e9men",
"hrv": "Jemen",
"ita": "Yemen",
"jpn": "\u30a4\u30a8\u30e1\u30f3",
"nld": "Jemen",
"rus": "\u0419\u0435\u043c\u0435\u043d",
"spa": "Yemen"
},
"latlng": [15, 48],
"demonym": "Yemeni",
"borders": ["OMN", "SAU"],
"area": 527968
},
{
"name": {
"common": "Zambia",
"official": "Republic of Zambia",
"native": {
"common": "Zambia",
"official": "Republic of Zambia"
}
},
"tld": [".zm"],
"cca2": "ZM",
"ccn3": "894",
"cca3": "ZMB",
"currency": ["ZMK"],
"callingCode": ["260"],
"capital": "Lusaka",
"altSpellings": ["ZM", "Republic of Zambia"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "eng",
"languages": {
"eng": "English"
},
"translations": {
"deu": "Sambia",
"fra": "Zambie",
"hrv": "Zambija",
"ita": "Zambia",
"jpn": "\u30b6\u30f3\u30d3\u30a2",
"nld": "Zambia",
"rus": "\u0417\u0430\u043c\u0431\u0438\u044f",
"spa": "Zambia"
},
"latlng": [-15, 30],
"demonym": "Zambian",
"borders": ["AGO", "BWA", "COD", "MWI", "MOZ", "NAM", "TZA", "ZWE"],
"area": 752612
},
{
"name": {
"common": "Zimbabwe",
"official": "Republic of Zimbabwe",
"native": {
"common": "Zimbabwe",
"official": "Republic of Zimbabwe"
}
},
"tld": [".zw"],
"cca2": "ZW",
"ccn3": "716",
"cca3": "ZWE",
"currency": ["ZWL"],
"callingCode": ["263"],
"capital": "Harare",
"altSpellings": ["ZW", "Republic of Zimbabwe"],
"relevance": "0",
"region": "Africa",
"subregion": "Eastern Africa",
"nativeLanguage": "nya",
"languages": {
"bwg": "Chibarwe",
"eng": "English",
"kck": "Kalanga",
"khi": "Khoisan",
"ndc": "Ndau",
"nde": "Northern Ndebele",
"nya": "Chewa",
"sna": "Shona",
"sot": "Sotho",
"toi": "Tonga",
"tsn": "Tswana",
"tso": "Tsonga",
"ven": "Venda",
"xho": "Xhosa",
"zib": "Zimbabwean Sign Language"
},
"translations": {
"deu": "Simbabwe",
"fra": "Zimbabwe",
"hrv": "Zimbabve",
"ita": "Zimbabwe",
"jpn": "\u30b8\u30f3\u30d0\u30d6\u30a8",
"nld": "Zimbabwe",
"rus": "\u0417\u0438\u043c\u0431\u0430\u0431\u0432\u0435",
"spa": "Zimbabue"
},
"latlng": [-20, 30],
"demonym": "Zimbabwean",
"borders": ["BWA", "MOZ", "ZAF", "ZMB"],
"area": 390757
}
]
country_iso_by_name = dict((country["name"]["common"], country["cca2"]) for country in countries_info)
country_iso_by_name.update(dict((country["name"]["official"], country["cca2"]) for country in countries_info))
country_iso_by_name["Korea (South)"] = "KR"
country_iso_by_name["Serbia and Montenegro"] = "RS"
country_iso_by_name["Reunion"] = "RE"
country_iso_by_name["Macao"] = "MO"
country_iso_by_name["Fiji Islands"] = "FJ"
|
Impactstory/total-impact-core
|
totalimpact/providers/countries_info.py
|
Python
|
mit
| 322,055
|
[
"BWA"
] |
f4c641649b9d91853f12e25e6e6dfdf712824bb97bcce418506fc2a0a96f8183
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from math import floor, e, log, sqrt
from Core.paconf import PA
from Core.maps import Ship
from Core.loadable import loadable, route
class rprod(loadable):
"""Calculate how many <ship> you can build in <ticks> with <factories>. Specify population and/or government for bonuses."""
usage = " <ship> <ticks> <factories> [population] [government]"
dx = tolerance = 0.00001
@route(r"(\S+)\s+(\d+)\s+(\d+)(?:\s+(.*))?")
def execute(self, message, user, params):
name, ticks, factories = params.group(1,2,3)
ship = Ship.load(name=name)
if ship is None:
message.alert("%s is not a ship." % name)
return
ticks = int(ticks)
factories = int(factories)
race = ship.race[:3].lower()
race = "etd" if race == "eit" else race
gov = None
pop = 0
for p in (params.group(4) or "").split():
m=self.govre.match(p)
if m and not gov:
gov=m.group(1).lower()
continue
if p.isdigit() and not pop:
pop = int(p)
continue
m = ship.metal
c = ship.crystal
e = ship.eonium
bonus = 1 + pop/100.0
if gov:
m *= (1+PA.getfloat(gov,"prodcost"))
c *= (1+PA.getfloat(gov,"prodcost"))
e *= (1+PA.getfloat(gov,"prodcost"))
bonus += PA.getfloat(gov,"prodtime")
if race:
bonus += PA.getfloat(race,"prodtime")
cost = floor(m)+floor(c)+floor(e)
res = int(self.revprod(ticks, factories, bonus))
ships = int(res / cost)
reply = "You can build %s %s (%s) in %d ticks" % (self.num2short(ships), ship.name, self.num2short(ships*ship.total_cost//PA.getint("numbers", "ship_value")), ticks)
reply += " using %s factories" % (factories,) if factories > 1 else ""
reply += " with a" if race or gov else ""
reply += " %s"%(PA.get(gov,"name"),) if gov else ""
reply += " %s"%(PA.get(race,"name"),) if race else ""
reply += " planet" if race or gov else ""
reply += " with %s%% population"%(pop,) if pop else ""
message.reply(reply)
def derive(self, f):
"""Numerical derivation of the function f."""
return lambda x: (f(x + self.dx) - f(x)) / self.dx
def close(self, a, b):
"""Is the result acceptable?"""
return abs(a - b) < self.tolerance
def newton_transform(self, f):
"""Do a newton transform of the function f."""
return lambda x: x - (f(x) / self.derive(f)(x))
def fixed_point(self, f, guess):
"""Fixed point search."""
while not self.close(guess, f(guess)):
guess = f(guess)
return guess
def newton(self, f, guess):
"""Generic equation solver using newtons method."""
return self.fixed_point(self.newton_transform(f),
guess)
def rpu(self, y):
"""Curry it."""
return lambda x: 2 * sqrt(x) * log(x, e) - y
def revprod(self, ticks, facs, bonus):
"""Reversed production formula."""
output = ((4000 * facs) ** 0.98) * bonus
return self.newton(self.rpu(ticks * output - 10000 * facs), 10)
|
d7415/merlin
|
Hooks/ships/rprod.py
|
Python
|
gpl-2.0
| 4,276
|
[
"CRYSTAL"
] |
ec7aac24ebcd86ed69d1e48cd5676288a96ae88767a5ad74949a3515ec835122
|
"""
Tests for molecule_database.py.
"""
import shutil
import tempfile
import unittest
from rdkit import Chem
from rdkit_utils import conformers, serial
from vs_utils.scripts.molecule_database import main, parse_args
from vs_utils.utils.dataset_utils import MoleculeDatabase
class TestMoleculeDatabase(unittest.TestCase):
"""
Tests for molecule_database.py.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O', 'CC(C)CC1=CC=C(C=C1)C(C)C(=O)O',
'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F']
names = ['aspirin', 'ibuprofen', 'celecoxib']
self.cids = [2244, 3672, 2662]
self.mols = []
for s, n in zip(smiles, names):
mol = Chem.MolFromSmiles(s)
mol.SetProp('_Name', n)
self.mols.append(mol)
self.temp_dir = tempfile.mkdtemp()
_, self.input_filename = tempfile.mkstemp(dir=self.temp_dir,
suffix='.smi')
with open(self.input_filename, 'wb') as f:
for this_smiles, name in zip(smiles, names):
f.write('{}\t{}\n'.format(this_smiles, name))
_, self.output_filename = tempfile.mkstemp(dir=self.temp_dir)
def tearDown(self):
"""
Clean up tests.
"""
shutil.rmtree(self.temp_dir)
def check_output(self, input_args):
"""
Run main and examine the resulting database.
Parameters
----------
args : list
Command-line arguments.
"""
args = parse_args(input_args)
main(args.input, args.output, args.database, args.stereo_from_3d)
database = MoleculeDatabase()
database.load(args.output)
assert len(database) == len(self.mols)
return database
def test_defaults(self):
"""
Test default arguments.
"""
self.check_output(
['-i', self.input_filename, '-o', self.output_filename])
def test_update(self):
"""
Test updating an existing database.
"""
_, database_filename = tempfile.mkstemp(dir=self.temp_dir)
database = MoleculeDatabase()
database.add_mol(self.mols[0])
database.save(database_filename)
self.check_output(
['-i', self.input_filename, '-o', self.output_filename, '-d',
database_filename])
def test_assign_stereo_from_3d(self):
"""
Test --stereo-from-3d.
"""
# generate conformers for ibuprofen
engine = conformers.ConformerGenerator()
mol = engine.generate_conformers(self.mols[1])
assert mol.GetNumConformers() > 0
self.mols[1] = mol
# rewrite input file
_, self.input_filename = tempfile.mkstemp(dir=self.temp_dir,
suffix='.sdf')
with serial.MolWriter().open(self.input_filename) as writer:
writer.write(self.mols)
# check for absence of chirality using default arguments
database = self.check_output(
['-i', self.input_filename, '-o', self.output_filename])
chiral = False
for smiles in database:
if '@' in smiles:
chiral = True
assert not chiral
# check for presence of chiraliy using --stereo-from-3d
database = self.check_output(
['-i', self.input_filename, '-o', self.output_filename,
'--stereo-from-3d'])
chiral = False
for smiles in database:
if '@' in smiles:
chiral = True
assert chiral
|
rbharath/pande-gas
|
vs_utils/scripts/tests/test_molecule_database.py
|
Python
|
bsd-3-clause
| 3,701
|
[
"RDKit"
] |
fa56fb155421639de2e9ebe3144694f66349072dda87febfad04644b360701ed
|
# -*- coding:iso-8859-10 -*-
__docformat__ = 'restructuredtext'
'''
Doc...
:Author: kmu
:Created: 28. jan. 2011
'''
# Built-in
import os
execfile("../themes/set_pysenorge_path.py") # Adds folder containing the "pysenorge" package to the PYTHONPATH @UnusedImport
# Additional
from netCDF4 import Dataset, num2date
# Own
from pysenorge.set_environment import timeunit
from pysenorge.io.png import writePNG
def netCDF2PNG(ncfile, parameter, timendx=0, CLTfile=None, outdir=None):
'''
Convenience function converting a parameter in the given netCDF file to PNG.
Parameters:
==========
- ncfile: Input netCDF file.
- parameter: parameter in netCDF file
- outdir: Output directory.
Return:
=======
PNG file in the output directory.
'''
if outdir==None:
outdir = os.path.dirname(os.path.abspath(ncfile))
rootgrp = Dataset(ncfile, 'r')
dt = num2date(rootgrp.variables['time'][timendx], timeunit).isoformat()
print dt
dt = dt.split('T')[0].replace('-','_')
data = rootgrp.variables[parameter][timendx,:,:]
# Write to PNG file
writePNG(data, os.path.join(outdir, parameter+'_'+dt), CLTfile)
def _test():
netCDF2PNG(r'Z:\tmp\wind_10m_daily\2011\wind_10m_daily_2011_02_02.nc',
'wind_direction',
CLTfile=r'Z:\tmp\wind_10m_daily\wind_direction_10_no.clt'
)
netCDF2PNG(r'Z:\tmp\wind_10m_daily\2011\wind_10m_daily_2011_02_02.nc',
'avg_wind_speed',
CLTfile=r'Z:\tmp\wind_10m_daily\avg_wind_speed_10_no.clt'
)
netCDF2PNG(r'Z:\tmp\wind_10m_daily\2011\wind_10m_daily_2011_02_02.nc',
'max_wind_speed',
CLTfile=r'Z:\tmp\wind_10m_daily\max_wind_speed_10_no.clt'
)
if __name__ == '__main__':
_test()
|
kmunve/pysenorge
|
pysenorge/tools/netCDF2PNG.py
|
Python
|
gpl-3.0
| 1,893
|
[
"NetCDF"
] |
5e7614eb26a92e7f10aa2cf062f923b1f8accf334158b8fb03ca898c1b9401f3
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************
espressopp.analysis.Autocorrelation
***********************************
.. function:: espressopp.analysis.Autocorrelation(system)
:param system:
:type system:
.. function:: espressopp.analysis.Autocorrelation.clear()
:rtype:
.. function:: espressopp.analysis.Autocorrelation.compute()
:rtype:
.. function:: espressopp.analysis.Autocorrelation.gather(value)
:param value:
:type value:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from _espressopp import analysis_Autocorrelation
class AutocorrelationLocal(analysis_Autocorrelation):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Autocorrelation, system)
def gather(self, value):
return self.cxxclass.gather(self, value)
def clear(self):
return self.cxxclass.clear(self)
def compute(self):
return self.cxxclass.compute(self)
if pmi.isController:
class Autocorrelation(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.AutocorrelationLocal',
pmicall = [ "gather", "clear", "compute" ],
localcall = ["__getitem__", "all"],
pmiproperty = ["size"]
)
|
kkreis/espressopp
|
src/analysis/Autocorrelation.py
|
Python
|
gpl-3.0
| 2,187
|
[
"ESPResSo"
] |
86a7a63dadf36d26d9194e118b99d00f91562fe825ce02093990e072ba255645
|
"""
Implementation of Bilateral filter
Inputs:
img: A 2d image with values in between 0 and 1
varS: variance in space dimension.
varI: variance in Intensity.
N: Kernel size(Must be an odd number)
Output:
img:A 2d zero padded image with values in between 0 and 1
"""
import math
import sys
import cv2
import numpy as np
def vec_gaussian(img: np.ndarray, variance: float) -> np.ndarray:
# For applying gaussian function for each element in matrix.
sigma = math.sqrt(variance)
cons = 1 / (sigma * math.sqrt(2 * math.pi))
return cons * np.exp(-((img / sigma) ** 2) * 0.5)
def get_slice(img: np.ndarray, x: int, y: int, kernel_size: int) -> np.ndarray:
half = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def get_gauss_kernel(kernel_size: int, spatial_variance: float) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
arr = np.zeros((kernel_size, kernel_size))
for i in range(0, kernel_size):
for j in range(0, kernel_size):
arr[i, j] = math.sqrt(
abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2
)
return vec_gaussian(arr, spatial_variance)
def bilateral_filter(
img: np.ndarray,
spatial_variance: float,
intensity_variance: float,
kernel_size: int,
) -> np.ndarray:
img2 = np.zeros(img.shape)
gaussKer = get_gauss_kernel(kernel_size, spatial_variance)
sizeX, sizeY = img.shape
for i in range(kernel_size // 2, sizeX - kernel_size // 2):
for j in range(kernel_size // 2, sizeY - kernel_size // 2):
imgS = get_slice(img, i, j, kernel_size)
imgI = imgS - imgS[kernel_size // 2, kernel_size // 2]
imgIG = vec_gaussian(imgI, intensity_variance)
weights = np.multiply(gaussKer, imgIG)
vals = np.multiply(imgS, weights)
val = np.sum(vals) / np.sum(weights)
img2[i, j] = val
return img2
def parse_args(args: list) -> tuple:
filename = args[1] if args[1:] else "../image_data/lena.jpg"
spatial_variance = float(args[2]) if args[2:] else 1.0
intensity_variance = float(args[3]) if args[3:] else 1.0
if args[4:]:
kernel_size = int(args[4])
kernel_size = kernel_size + abs(kernel_size % 2 - 1)
else:
kernel_size = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
filename, spatial_variance, intensity_variance, kernel_size = parse_args(sys.argv)
img = cv2.imread(filename, 0)
cv2.imshow("input image", img)
out = img / 255
out = out.astype("float32")
out = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
out = out * 255
out = np.uint8(out)
cv2.imshow("output image", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
TheAlgorithms/Python
|
digital_image_processing/filters/bilateral_filter.py
|
Python
|
mit
| 2,875
|
[
"Gaussian"
] |
92b088386a96c31178d9dd8faa35d33071a3bd422c9420a8a03465da21aa3200
|
import numpy as np
np.seterr(divide='ignore') #ignore errors in log division
np.seterr(all='ignore') #ignore errors in log division
import sys
import time
##########################################################################################
def MCMC(LogPosterior,gp,post_args,ch_len,ep,chain_filenames=['MCMC_chain'],n_chains=0,\
adapt_limits=(0,0,0),glob_limits=(0,0,0),thin=1,orth=0,acc=0.234):
"""
Python adaption of MyMCMC (C) code. Marginally slower than the C version, but slowdown
is insignificant when using an expensive posterior function, ie for GPs. Should be much
easier to adapt and use better adaption algorithms to make up for this, although need to
be very careful with choice of python functions, as these have significant effects on
the efficiency.
Note it is very inefficient to have expressions within the chain loop, so as much as
possible, calculations should be done outside the loop in large arrays, eg the random
numbers are generated for the whole chain at once (and multiplied by stepsizes). This
is also done when the stepsizes are adapted, which is much much quicker than doing it
at each step of the loop.
LogPosterior - log posterior distribution
gp - array/list of guess parameters
ep - array/list of (initial) steps
chain_filenames - list of chain filenames, used to calculate no of chains
n_chains - no of chains, overwrites chains_filenames, uses generic names for chains
adapt_limits/glob_limits - tuple of (lower,upper,number), defining the range and number
of adaptions within that range. adapt for relative step sizes and glob for the global
stepsize
thin - int >= 1 - thin the chains by this amount, ie only output every 'thin' steps of
the chain
orth - default is to use the covariance matrix at each adaption step and make correlated
steps in parameter space, this flag forces orthogonal steps, ie along each axis
independently
acc - target acceptance ratio - for infinite iid Gaussian dist -> 23.4%, for single par
is 44%
"""
#first set chain filenames
if n_chains > 0: chain_filenames = ["MCMC_chain_%d" % ch_no for ch_no in range(1,n_chains+1)]
#print parameters
PrintParams(chain_filenames,ch_len,LogPosterior,adapt_limits,glob_limits,gp,ep)
print ('-' * 80)
####### loop over chains ###############
for n,chain in enumerate(chain_filenames):
#initialise parameters
p,e = np.copy(gp),np.copy(ep)
p_acc,L_acc = np.copy(p),-np.inf
#arrays for storing results
ParArr = np.zeros(ch_len/thin*len(p)).reshape(ch_len/thin,len(p))
PostArr = np.zeros(ch_len/thin)
AccArr = np.zeros(ch_len)
#jump parameters
#error array computed in advance - much faster to compute as a block
G = np.float(2.4**2 / (e>0).sum() )
K = np.diag(e**2) #create starting (diagonal) covariance matrix
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
np.random.seed()
RandArr = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len) * G
#set columns to zero after too! - for large K sometimes zero variance parameters have small random scatter
RandArr[:][:,np.where(e==0.)[0]] = 0.
#print "Computing Chain %d: '%s' " % (n+1,chain),
####### individual chain ###############
start = time.time()
for i in xrange(ch_len):
if i % ((ch_len)/20) == 0:
PrintBar(n,chain,i,ch_len,AccArr,start)
#create proposal parameters, and calculate posterior
# p_prop = p_acc + RandArr[i] * e * G #for diag covariance matrix
p_prop = p_acc + RandArr[i] #this line has the largest (extra) overhead from C -> python version
L_prop = LogPosterior(p_prop,*post_args)
#Metropolis algorithm to accept step
if np.random.rand() < np.exp(L_prop - L_acc):
p_acc,L_acc = p_prop,L_prop
AccArr[i] = 1 #update acceptance array
#add new posterior and parameters to chain
if i%thin==0: ParArr[i/thin],PostArr[i/thin] = p_acc,L_acc
#adaptive stepsizes
if (i <= adapt_limits[1]) and (i > adapt_limits[0]):
if (i-adapt_limits[0]) % ((adapt_limits[1]-adapt_limits[0])/adapt_limits[2]) == 0:
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
if orth: K = np.diag(((e + 4*ParArr[adapt_limits[0]/thin:i/thin].std(axis=0))/5.)**2.) #for diagonal covariance matrix
else: K = (K + 4.*np.cov(ParArr[adapt_limits[0]/thin:i/thin],rowvar=0))/5.
K[np.where(e==0.)],K[:,np.where(e==0.)] = 0.,0. #reset error=0. values to 0.
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * G
RandArr[i:][:,np.where(e==0.)[0]] = 0. #set columns to zero after too!
#adaptive global step size
if (i <= glob_limits[1]) and (i > glob_limits[0]):
if (i-glob_limits[0]) % ((glob_limits[1]-glob_limits[0])/glob_limits[2]) == 0:
G *= (1./acc) * min(0.9,max(0.1,AccArr[i-(glob_limits[1]-glob_limits[0])/glob_limits[2]:i].sum()/((glob_limits[1]-glob_limits[0])/glob_limits[2])))
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * G
RandArr[i:][:,np.where(e==0.)[0]] = 0.
####### end individual chain ###########
PrintBar(n,chain,i,ch_len,AccArr,start); print
np.save(chain+".npy",np.concatenate([PostArr.reshape(PostArr.size,1),ParArr],axis=1))
####### end loop over chains ############
print ('-' * 80)
##########################################################################################
def PrintBar(n,chain,i,ch_len,AccArr,start):
"Print the status bar - probably a more elegant way to write this..."
ts = time.time()-start
a_str = "" if i <= ch_len/5 else ", acc = %.2f%%" % (100.*np.float(AccArr[ch_len/5:i].sum())/(i-ch_len/5+1))
print ("\rComputing Chain %d: '%s' %-20s t = %dm %.2fs%s" % (n+1,chain,'#'*(i/(ch_len/20)+1),ts // 60., ts % 60.,a_str),end='')
sys.stdout.flush();
##########################################################################################
def PrintParams(ch_filenames,ch_len,posterior,adapt_limits,glob_limits,gp,ep):
print ("Infer.MCMC runnning...")
print ("MCMC parameters:")
print (" No Chains: %d" % len(ch_filenames))
print (" Chain Length: %d" % ch_len)
if(adapt_limits[2]): print (" Relative-step adaption limits: (%d,%d,%d)" % (adapt_limits[0],adapt_limits[1],adapt_limits[2]))
if(glob_limits[2]): print (" Global-step adaption limits: (%d,%d,%d)" % (glob_limits[0],glob_limits[1],glob_limits[2]))
print (" Computing chains:", ch_filenames)
print (" Posterior probability function: ", posterior)
print (" Function params <value prop_size>:")
for q in range(len(gp)):
print (" p[%d] = %f +- %f" % (q,gp[q],ep[q]))
##########################################################################################
|
nealegibson/Infer
|
src/MCMC.py
|
Python
|
gpl-3.0
| 6,930
|
[
"Gaussian"
] |
9de6f09f33d952bc948d754a22361023e1bbc0a1553d44a5dc1202a29ecfb747
|
# !/usr/local/bin/python3.4.2
# ----Copyright (c) 2017 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# run script with 3 or more arguments:
# argument 0 is the script name
# argument 1 is the path to the database IDs and filenames
# argument 2 is the path to the database IDs and asset IDs
# argument 3 is the path you want to output the matched CSV to
# argument 4 is the Batch ID of the subset of photographs (e.g. GB-006)
import csv
import os
from os.path import isfile, join, split
import sys
import io
#Set filepath variables
filePath_1 = str(sys.argv[1])
filePath_2 = str(sys.argv[2])
filePath_3 = str(sys.argv[3])
batch = str(sys.argv[4])
filenameDict = {}
assetidDict = {}
simplifiedDict = {}
with open(filePath_1, 'rU') as f, open(filePath_2, 'rU') as g:
filenameData = csv.reader(f, dialect='excel', delimiter=',')
next(filenameData, None) # skip the headers
assetData = csv.reader(g,dialect='excel', delimiter=',')
for row in assetData:
database_id = row[0]
asset_id = row[1]
assetidDict[database_id] = asset_id
for row in filenameData:
database_id_2 = row[0]
uploadfilename = row[1]
asset_id = assetidDict[database_id_2]
simplifiedDict[asset_id] = uploadfilename
outputPath = ''.join([str(filePath_3), '/SimplifiedMatching', batch, '.csv'])
with open(outputPath, 'w', newline='') as csvfile:
w = csv.writer(csvfile, dialect='excel', delimiter=',')
for k,v in simplifiedDict.items():
w.writerow([k,v])
|
CarnegieHall/quality-control
|
matchvaluesfromlists.py
|
Python
|
mit
| 1,562
|
[
"VisIt"
] |
ebee1d9b6dca2041d63554abaa438e759308571c8877e36a744313bc81311148
|
from __future__ import print_function
#import sys
import os
import os.path as op
import traceback
#from os import listdir
#from os.path import isfile, join
import re
import logging
from seqcluster.libs.classes import sequence_unique
from seqcluster.libs.classes import quality
from seqcluster.libs.fastq import is_fastq, open_fastq
logger = logging.getLogger('prepare')
def prepare(args):
"""
Read all seq.fa files and create a matrix and unique fasta files.
The information is
:param args: options parsed from command line
:param con: logging messages going to console
:param log: logging messages going to console and file
:returns: files - matrix and fasta files that should be used with
and aligner (as bowtie) and run `seqcluster cluster`
"""
try:
f = open(args.config, 'r')
seq_out = open(op.join(args.out, "seqs.fastq"), 'w')
ma_out = open(op.join(args.out, "seqs.ma"), 'w')
except IOError as e:
traceback.print_exc()
raise IOError("Can not create output files: %s, %s or read %s" % (op.join(args.out, "seqs.ma"), op.join(args.out, "seqs.fastq"), args.config))
logger.info("Reading sequences")
seq_l, sample_l = _read_fastq_files(f, args)
logger.info("Creating matrix with unique sequences")
logger.info("Filtering: min counts %s, min size %s, max size %s, min shared %s" % (args.minc, args.minl, args.maxl, args.min_shared))
_create_matrix_uniq_seq(sample_l, seq_l, ma_out, seq_out, args.min_shared)
logger.info("Finish preprocessing. Get a sorted BAM file of seqs.fa and run seqcluster cluster.")
def _read_fasta_files(f, args):
""" read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * :code:`seq_l`: is a list of seq_obj objects, containing
the information of each sequence
* :code:`sample_l`: is a list with the name of the samples
(column two of the config file)
"""
seq_l = {}
sample_l = []
idx = 1
for line1 in f:
line1 = line1.strip()
cols = line1.split("\t")
with open(cols[0], 'r') as fasta:
sample_l.append(cols[1])
for line in fasta:
if line.startswith(">"):
idx += 1
counts = int(re.search("x([0-9]+)", line.strip()).group(1))
else:
seq = line.strip()
seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq
if counts > int(args.minc) and len(seq) > int(args.minl):
if seq not in seq_l:
seq_l[seq] = sequence_unique(idx, seq)
seq_l[seq].add_exp(cols[1], counts)
return seq_l, sample_l
def _read_fastq_files(f, args):
""" read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * :code:`seq_l`: is a list of seq_obj objects, containing
the information of each sequence
* :code:`sample_l`: is a list with the name of the samples
(column two of the config file)
"""
seq_l = {}
sample_l = []
idx = 1
p = re.compile("^[ATCGNU]+$")
with open(op.join(args.out, "stats_prepare.tsv"), 'w') as out_handle:
for line1 in f:
line1 = line1.strip()
cols = line1.split("\t")
# if not is_fastq(cols[0]):
# raise ValueError("file is not fastq: %s" % cols[0])
with open_fastq(cols[0]) as handle:
sample_l.append(cols[1])
total = added = 0
line = handle.readline()
while line:
if line.startswith("@") or line.startswith(">"):
seq = handle.readline().strip()
if not p.match(seq):
continue
idx += 1
total += 1
keep = {}
counts = int(re.search("x([0-9]+)", line.strip()).group(1))
if is_fastq(cols[0]):
handle.readline().strip()
qual = handle.readline().strip()
else:
qual = "I" * len(seq)
qual = qual[0:int(args.maxl)] if len(qual) > int(args.maxl) else qual
seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq
if counts > int(args.minc) and len(seq) > int(args.minl):
added += 1
if seq in keep:
keep[seq].update(qual)
else:
keep[seq] = quality(qual)
if seq not in seq_l:
seq_l[seq] = sequence_unique(idx, seq)
seq_l[seq].add_exp(cols[1], counts)
seq_l[seq].quality = keep[seq].get()
line=handle.readline()
print("total\t%s\t%s" % (idx, cols[1]), file=out_handle, end="")
print("added\t%s\t%s" % (len(seq_l), cols[1]), file=out_handle, end="")
logger.info("%s: Total read %s ; Total added %s" % (cols[1], idx, len(seq_l)))
return seq_l, sample_l
def _create_matrix_uniq_seq(sample_l, seq_l, maout, out, min_shared):
""" create matrix counts for each different sequence in all the fasta files
:param sample_l: :code:`list_s` is the output of :code:`_read_fasta_files`
:param seq_l: :code:`seq_s` is the output of :code:`_read_fasta_files`
:param maout: is a file handler to write the matrix count information
:param out: is a file handle to write the fasta file with unique sequences
:returns: Null
"""
skip = 0
if int(min_shared) > len(sample_l):
min_shared = len(sample_l)
maout.write("id\tseq")
for g in sample_l:
maout.write("\t%s" % g)
for s in seq_l.keys():
seen = sum([1 for g in seq_l[s].group if seq_l[s].group[g] > 0])
if seen < int(min_shared):
skip += 1
continue
maout.write("\nseq_%s\t%s" % (seq_l[s].idx, seq_l[s].seq))
for g in sample_l:
if g in seq_l[s].group:
maout.write("\t%s" % seq_l[s].group[g])
else:
maout.write("\t0")
qual = "".join(seq_l[s].quality)
out.write("@seq_%s\n%s\n+\n%s\n" % (seq_l[s].idx, seq_l[s].seq, qual))
out.close()
maout.close()
logger.info("Total skipped due to --min-shared parameter (%s) : %s" % (min_shared, skip))
|
lpantano/seqcluster
|
seqcluster/prepare_data.py
|
Python
|
mit
| 7,124
|
[
"Bowtie"
] |
94570b0ade20ef68b030c29a6bfeb52abb2ff78d344a40ed7577b8efe093f7d9
|
from __future__ import print_function
from .diagnostic import unitroot_adf
import statsmodels.datasets.macrodata.data as macro
macrod = macro.load().data
print(macro.NOTE)
print(macrod.dtype.names)
datatrendli = [
('realgdp', 1),
('realcons', 1),
('realinv', 1),
('realgovt', 1),
('realdpi', 1),
('cpi', 1),
('m1', 1),
('tbilrate', 0),
('unemp',0),
('pop', 1),
('infl',0),
('realint', 0)
]
print('%-10s %5s %-8s' % ('variable', 'trend', ' adf'))
for name, torder in datatrendli:
adf_, pval = unitroot_adf(macrod[name], trendorder=torder)[:2]
print('%-10s %5d %8.4f %8.4f' % (name, torder, adf_, pval))
|
hlin117/statsmodels
|
statsmodels/sandbox/stats/ex_newtests.py
|
Python
|
bsd-3-clause
| 815
|
[
"ADF"
] |
e093b0b3fa766ca4cf014bb27767dc50abe89ac0930d60cf07bf886e12f49d76
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NeuronEphysDataMap'
db.create_table('neuroelectro_neuronephysdatamap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('neuron_concept_map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.NeuronConceptMap'])),
('ephys_concept_map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.EphysConceptMap'])),
('val', self.gf('django.db.models.fields.FloatField')()),
('err', self.gf('django.db.models.fields.FloatField')(null=True)),
('n', self.gf('django.db.models.fields.IntegerField')(null=True)),
('ref_text', self.gf('django.db.models.fields.CharField')(max_length=200)),
('data_table', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.DataTable'], null=True)),
('dt_id', self.gf('django.db.models.fields.CharField')(max_length=20, null=True)),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('added_by', self.gf('django.db.models.fields.CharField')(default='robot', max_length=20)),
('times_validated', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('neuroelectro', ['NeuronEphysDataMap'])
def backwards(self, orm):
# Deleting model 'NeuronEphysDataMap'
db.delete_table('neuroelectro_neuronephysdatamap')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'ephys_props': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysProp']", 'null': 'True', 'through': "orm['neuroelectro.EphysConceptMap']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.datatabletag': {
'Meta': {'object_name': 'DataTableTag'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"})
},
'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'ephys_prop_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysPropSyn']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'defining_articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Article']", 'null': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronConceptMap']"}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.neuronephyslink': {
'Meta': {'object_name': 'NeuronEphysLink'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']"}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_reps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_err': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['neuroelectro']
|
neuroelectro/neuroelectro_org
|
neuroelectro/south_migrations/0025_auto__add_neuronephysdatamap.py
|
Python
|
gpl-2.0
| 17,158
|
[
"NEURON"
] |
6851f43e0bebfa77218e7702b947b2c56b9e89493d185d6869865a304d057949
|
# mako/codegen.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module
source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 8
# names which are hardwired into the
# template and are not accessed via the
# context itself
RESERVED_NAMES = set(['context', 'loop', 'UNDEFINED'])
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False,
enable_loop=True,
reserved_names=()):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
self.enable_loop = enable_loop
self.reserved_names = reserved_names
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
self.compiler.enable_loop = self.compiler.enable_loop or eval(
pagetag.attributes.get(
'enable_loop', 'False')
)
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers(self.compiler)
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside "
"<%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r,"
" context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, "
" calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri,"
" module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline(
"context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline(
"_mako_get_namespace(context, %r)."\
"_populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
if has_loop:
self.printer.writeline(
'loop = __M_loop = runtime.LoopStack()'
)
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the
corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline(
"return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline(
"def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args,
"%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
if node.has_loop_context:
self.printer.writeline('finally:')
self.printer.writeline("loop = __M_loop._exit()")
self.printer.writeline(None)
else:
self.write_source_comment(node)
if self.compiler.enable_loop and node.keyword == 'for':
text = mangle_mako_loop(node, self.printer)
else:
text = node.text
self.printer.writeline(text)
children = node.get_children()
# this covers the three situations where we want to insert a pass:
# 1) a ternary control line with no children,
# 2) a primary control line with nothing but its own ternary
# and end control lines, and
# 3) any control line with no content other than comments
if not children or (
util.all(isinstance(c, (parsetree.Comment,
parsetree.ControlLine))
for c in children) and
util.all((node.is_ternary(c.keyword) or c.isend)
for c in children
if isinstance(c, parsetree.ControlLine))):
self.printer.writeline("pass")
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline(
'__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for __M_key in'
' [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline(
"context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the
# "closuredefs" defined in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, "
"callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable(
[], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, compiler, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
self.compiler = compiler
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
illegal_names = self.compiler.reserved_names.intersection(
self.locally_declared)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words declared in template: %s" %
", ".join(illegal_names))
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, "\
"argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(
node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node,
(parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
_FOR_LOOP = re.compile(
r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*'
r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):'
)
def mangle_mako_loop(node, printer):
"""converts a for loop into a context manager wrapped around a for loop
when access to the `loop` variable has been detected in the for loop body
"""
loop_variable = LoopVariable()
node.accept_visitor(loop_variable)
if loop_variable.detected:
node.nodes[-1].has_loop_context = True
match = _FOR_LOOP.match(node.text)
if match:
printer.writelines(
'loop = __M_loop._enter(%s)' % match.group(2),
'try:'
#'with __M_loop(%s) as loop:' % match.group(2)
)
text = 'for %s in loop:' % match.group(1)
else:
raise SyntaxError("Couldn't apply loop context: %s" % node.text)
else:
text = node.text
return text
class LoopVariable(object):
"""A node visitor which looks for the name 'loop' within undeclared
identifiers."""
def __init__(self):
self.detected = False
def _loop_reference_detected(self, node):
if 'loop' in node.undeclared_identifiers():
self.detected = True
else:
for n in node.get_children():
n.accept_visitor(self)
def visitControlLine(self, node):
self._loop_reference_detected(node)
def visitCode(self, node):
self._loop_reference_detected(node)
def visitExpression(self, node):
self._loop_reference_detected(node)
|
swangui/ggrid
|
mako/codegen.py
|
Python
|
mit
| 48,628
|
[
"VisIt"
] |
ac2eb0be8e907fd886a929404bbab903d178bfab358bcf065345fea2d68d1d1c
|
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .i18n import _
from .node import nullrev
class basedag(object):
"""generic interface for DAGs
terms:
"ix" (short for index) identifies a nodes internally,
"id" identifies one externally.
All params are ixs unless explicitly suffixed otherwise.
Pluralized params are lists or sets.
"""
def __init__(self):
self._inverse = None
def nodeset(self):
"""set of all node ixs"""
raise NotImplementedError
def heads(self):
"""list of head ixs"""
raise NotImplementedError
def parents(self, ix):
"""list of parents ixs of ix"""
raise NotImplementedError
def inverse(self):
"""inverse DAG, where parents becomes children, etc."""
raise NotImplementedError
def ancestorset(self, starts, stops=None):
"""
set of all ancestors of starts (incl), but stop walk at stops (excl)
"""
raise NotImplementedError
def descendantset(self, starts, stops=None):
"""
set of all descendants of starts (incl), but stop walk at stops (excl)
"""
return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs):
"""
subset of connected list of ixs so that no node has a descendant in it
By "connected list" we mean that if an ancestor and a descendant are in
the list, then so is at least one path connecting them.
"""
raise NotImplementedError
def externalize(self, ix):
"""return a node id"""
return self._externalize(ix)
def externalizeall(self, ixs):
"""return a list of (or set if given a set) of node ids"""
ids = self._externalizeall(ixs)
if isinstance(ixs, set):
return set(ids)
return list(ids)
def internalize(self, id):
"""return a node ix"""
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
"""return a list of (or set if given a set) of node ixs"""
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
return list(ixs)
class genericdag(basedag):
"""generic implementations for DAGs"""
def ancestorset(self, starts, stops=None):
if stops:
stops = set(stops)
else:
stops = set()
seen = set()
pending = list(starts)
while pending:
n = pending.pop()
if n not in seen and n not in stops:
seen.add(n)
pending.extend(self.parents(n))
return seen
def headsetofconnecteds(self, ixs):
hds = set(ixs)
if not hds:
return hds
for n in ixs:
for p in self.parents(n):
hds.discard(p)
assert hds
return hds
class revlogbaseddag(basedag):
"""generic dag interface to a revlog"""
def __init__(self, revlog, nodeset):
basedag.__init__(self)
self._revlog = revlog
self._heads = None
self._nodeset = nodeset
def nodeset(self):
return self._nodeset
def heads(self):
if self._heads is None:
self._heads = self._getheads()
return self._heads
def _externalize(self, ix):
return self._revlog.index[ix][7]
def _externalizeall(self, ixs):
idx = self._revlog.index
return [idx[i][7] for i in ixs]
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _("nullid"))
return ix
def _internalizeall(self, ids, filterunknown):
rl = self._revlog
if filterunknown:
return [
r for r in map(rl.nodemap.get, ids) if (r is not None and r != nullrev)
]
return [self._internalize(i) for i in ids]
class revlogdag(revlogbaseddag):
"""dag interface to a revlog"""
def __init__(self, revlog, localsubset=None):
revlogbaseddag.__init__(self, revlog, set(revlog))
self._heads = localsubset
def _getheads(self):
# See docstring on rawheads about the use-case.
return [r for r in self._revlog.rawheadrevs() if r != nullrev]
def parents(self, ix):
rlog = self._revlog
idx = rlog.index
revdata = idx[ix]
prev = revdata[5]
if prev != nullrev:
prev2 = revdata[6]
if prev2 == nullrev:
return [prev]
return [prev, prev2]
prev2 = revdata[6]
if prev2 != nullrev:
return [prev2]
return []
def inverse(self):
if self._inverse is None:
self._inverse = inverserevlogdag(self)
return self._inverse
def ancestorset(self, starts, stops=None):
rlog = self._revlog
idx = rlog.index
if stops:
stops = set(stops)
else:
stops = set()
seen = set()
pending = list(starts)
while pending:
rev = pending.pop()
if rev not in seen and rev not in stops:
seen.add(rev)
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
pending.append(prev)
return seen
def headsetofconnecteds(self, ixs):
if not ixs:
return set()
rlog = self._revlog
idx = rlog.index
headrevs = set(ixs)
for rev in ixs:
revdata = idx[rev]
for i in [5, 6]:
prev = revdata[i]
if prev != nullrev:
headrevs.discard(prev)
assert headrevs
return headrevs
def linearize(self, ixs):
"""linearize and topologically sort a list of revisions
The linearization process tries to create long runs of revs where
a child rev comes immediately after its first parent. This is done by
visiting the heads of the given revs in inverse topological order,
and for each visited rev, visiting its second parent, then its first
parent, then adding the rev itself to the output list.
"""
sorted = []
visit = list(self.headsetofconnecteds(ixs))
visit.sort(reverse=True)
finished = set()
while visit:
cur = visit.pop()
if cur < 0:
cur = -cur - 1
if cur not in finished:
sorted.append(cur)
finished.add(cur)
else:
visit.append(-cur - 1)
visit += [
p for p in self.parents(cur) if p in ixs and p not in finished
]
assert len(sorted) == len(ixs)
return sorted
class inverserevlogdag(revlogbaseddag, genericdag):
"""inverse of an existing revlog dag; see revlogdag.inverse()"""
def __init__(self, orig):
revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
self._orig = orig
self._children = {}
self._roots = []
self._walkfrom = len(self._revlog) - 1
def _walkto(self, walkto):
rev = self._walkfrom
cs = self._children
roots = self._roots
idx = self._revlog.index
while rev >= walkto:
data = idx[rev]
isroot = True
for prev in [data[5], data[6]]: # parent revs
if prev != nullrev:
cs.setdefault(prev, []).append(rev)
isroot = False
if isroot:
roots.append(rev)
rev -= 1
self._walkfrom = rev
def _getheads(self):
self._walkto(nullrev)
return self._roots
def parents(self, ix):
if ix is None:
return []
if ix <= self._walkfrom:
self._walkto(ix)
return self._children.get(ix, [])
def inverse(self):
return self._orig
|
facebookexperimental/eden
|
eden/hg-server/edenscm/mercurial/dagutil.py
|
Python
|
gpl-2.0
| 8,602
|
[
"VisIt"
] |
af0cb044e157bfc7f4cd1d6113af8f8ecb7646f29a5c86afab8778d0243ea36c
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bowtie(MakefilePackage):
"""Bowtie is an ultrafast, memory-efficient short read aligner
for short DNA sequences (reads) from next-gen sequencers."""
homepage = "https://sourceforge.net/projects/bowtie-bio/"
url = "https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.0/bowtie-1.2-source.zip"
version('1.2', '6d97f0ea1a65af11d17cc270cfac4af9')
variant('tbb', default=False, description='Use Intel thread building block')
depends_on('tbb', when='+tbb')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('CC = .*', 'CC = ' + env['CC'])
makefile.filter('CXX = .*', 'CPP = ' + env['CXX'])
def build(self, spec, prefix):
if '+tbb' in spec:
make()
else:
make('NO_TBB=1')
def install(self, spec, prefix):
make('prefix={0}'.format(self.prefix), 'install')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/bowtie/package.py
|
Python
|
lgpl-2.1
| 2,175
|
[
"Bowtie"
] |
dcb113dd82707fb7d17f1eceeb9a190fffeb671912704551e0285160ea4a18a9
|
# -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
import json
import logging
import re
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from organizations.models import Organization
from social_core.backends.base import BaseAuth
from social_core.backends.oauth import OAuthAuth
from social_core.backends.saml import SAMLAuth
from social_core.exceptions import SocialAuthBaseException
from social_core.utils import module_member
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers import get_current_request
from openedx.core.lib.hash_utils import create_hash256
from .lti import LTI_PARAMS_KEY, LTIAuthBackend
from .saml import STANDARD_SAML_PROVIDER_KEY, get_saml_idp_choices, get_saml_idp_class
log = logging.getLogger(__name__)
REGISTRATION_FORM_FIELD_BLACKLIST = [
'name',
'username'
]
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
_LTI_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(LTIAuthBackend)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError(u"Invalid JSON: {}".format(err))
if not isinstance(value_python, of_type):
raise ValidationError(u"Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return re.sub(r'[^-\w]+', '_', username)[:30]
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format(
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
.. no_pii:
"""
KEY_FIELDS = ('slug',)
icon_class = models.CharField(
max_length=50,
blank=True,
default=u'fa-sign-in',
help_text=(
u'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
# We use a FileField instead of an ImageField here because ImageField
# doesn't support SVG. This means we don't get any image validation, but
# that should be fine because only trusted users should be uploading these
# anyway.
icon_image = models.FileField(
blank=True,
help_text=(
u'If there is no Font Awesome icon available for this provider, upload a custom image. '
'SVG images are recommended as they can scale to any size.'
),
)
name = models.CharField(max_length=50, blank=False, help_text=u"Name of this provider (shown to users)")
slug = models.SlugField(
max_length=30, db_index=True, default=u'default',
help_text=(
u'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
organization = models.ForeignKey(
Organization,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text=_(
'optional. If this provider is an Organization, this attribute '
'can be used reference users in that Organization'
)
)
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this provider configuration belongs to.'
),
on_delete=models.CASCADE,
)
skip_hinted_login_dialog = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users that visit a \"TPA hinted\" URL for this provider "
"(e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to "
"the login URL of the provider instead of being first prompted with a login dialog."
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
send_welcome_email = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be sent a welcome email upon registration."
),
)
visible = models.BooleanField(
default=False,
help_text=_(
"If this option is not selected, users will not be presented with the provider "
"as an option to authenticate with on the login screen, but manual "
"authentication using the correct link is still possible."
),
)
max_session_length = models.PositiveIntegerField(
null=True,
blank=True,
default=None,
verbose_name=u'Max session length (seconds)',
help_text=_(
"If this option is set, then users logging in using this SSO provider will have "
"their session length limited to no longer than this value. If set to 0 (zero), "
"the session will expire upon the user closing their browser. If left blank, the "
"Django platform session default length will be used."
)
)
send_to_registration_first = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will be directed to the registration page "
"immediately after authenticating with the third party instead of the login page."
),
)
sync_learner_profile_data = models.BooleanField(
default=False,
help_text=_(
"Synchronize user profile data received from the identity provider with the edX user "
"account on each SSO login. The user will be notified if the email address associated "
"with their account is changed as a part of this synchronization."
)
)
enable_sso_id_verification = models.BooleanField(
default=False,
help_text=u"Use the presence of a profile from a trusted third party as proof of identity verification.",
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
accepts_logins = True # Whether to display a sign-in button when the provider is enabled
# "enabled" field is inherited from ConfigurationModel
class Meta(object):
app_label = "third_party_auth"
abstract = True
def clean(self):
""" Ensure that either `icon_class` or `icon_image` is set """
super(ProviderConfig, self).clean()
if bool(self.icon_class) == bool(self.icon_image):
raise ValidationError('Either an icon class or an icon image must be given (but not both)')
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
@property
def full_class_name(self):
""" Get the fully qualified class name of this provider. """
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
# This is generally the same thing as the UID, expect when one backend is used for multiple providers
assert self.match_social_auth(social_auth)
return social_auth.uid
def get_social_auth_uid(self, remote_id):
"""
Return the uid in social auth.
This is default implementation. Subclass may override with a different one.
"""
return remote_id
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
register_user uses this to populate
the new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
registration_form_data = {}
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details').copy()
# Set the registration form to use the `fullname` detail for the `name` field.
registration_form_data['name'] = details.get('fullname', '')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
registration_form_data['username'] = clean_username(pipeline_kwargs.get('username') or '')
# Any other values that are present in the details dict should be copied
# into the registration form details. This may include details that do
# not map to a value that exists in the registration form. However,
# because the fields that are actually rendered are not based on this
# list, only those values that map to a valid registration form field
# will actually be sent to the form as default values.
for blacklisted_field in REGISTRATION_FORM_FIELD_BLACKLIST:
details.pop(blacklisted_field, None)
registration_form_data.update(details)
return registration_form_data
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
@property
def display_for_login(self):
"""
Determines whether the provider ought to be shown as an option with
which to authenticate on the login screen, registration screen, and elsewhere.
"""
return bool(self.enabled_for_current_site and self.accepts_logins and self.visible)
@property
def enabled_for_current_site(self):
"""
Determines if the provider is able to be used with the current site.
"""
return self.enabled and self.site_id == Site.objects.get_current(get_current_request()).id
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
.. no_pii:
"""
# We are keying the provider config by backend_name here as suggested in the python social
# auth documentation. In order to reuse a backend for a second provider, a subclass can be
# created with seperate name.
# example:
# class SecondOpenIDProvider(OpenIDAuth):
# name = "second-openId-provider"
KEY_FIELDS = ('backend_name',)
prefix = 'oa2'
backend_name = models.CharField(
max_length=50, blank=False, db_index=True,
help_text=(
u"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS
# which production.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name=u"Client ID")
secret = models.TextField(
blank=True,
verbose_name=u"Client Secret",
help_text=(
u'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text=u"Optional JSON object with advanced settings, if any.")
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
.. no_pii:
"""
KEY_FIELDS = ('site_id', 'slug')
site = models.ForeignKey(
Site,
default=settings.SITE_ID,
related_name='%(class)ss',
help_text=_(
'The Site that this SAML configuration belongs to.'
),
on_delete=models.CASCADE,
)
slug = models.SlugField(
max_length=30,
default=u'default',
help_text=(
u'A short string uniquely identifying this configuration. '
'Cannot contain spaces. Examples: "ubc", "mit-staging"'
),
)
private_key = models.TextField(
help_text=(
u'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
u'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name=u"Entity ID")
org_info_str = models.TextField(
verbose_name=u"Organization Info",
default=u'{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text=u"JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default=u'{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
u"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Configuration"
verbose_name_plural = verbose_name
def __str__(self):
"""
Return human-readable string representation.
"""
return u"SAMLConfiguration {site}: {slug} on {date:%Y-%m-%d %H:%M:%S}".format(
site=self.site.name,
slug=self.slug,
date=self.change_date,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
default_saml_contact = {
# Default contact information to put into the SAML metadata that gets generated by python-saml.
"givenName": _(u"{platform_name} Support").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
),
"emailAddress": configuration_helpers.get_value('TECH_SUPPORT_EMAIL', settings.TECH_SUPPORT_EMAIL),
}
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
else:
public_certs = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT', {})
return public_certs.get(self.slug, '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
if self.slug == 'default':
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
else:
private_keys = getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT', {})
return private_keys.get(self.slug, '')
other_config = {
# These defaults can be overriden by self.other_config_str
"GET_ALL_EXTRA_DATA": True, # Save all attribute values the IdP sends into the UserSocialAuth table
"TECHNICAL_CONTACT": default_saml_contact,
"SUPPORT_CONTACT": default_saml_contact,
}
other_config.update(json.loads(self.other_config_str))
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
def active_saml_configurations_filter():
"""
Returns a mapping to be used for the SAMLProviderConfig to limit the SAMLConfiguration choices to the current set.
"""
query_set = SAMLConfiguration.objects.current_set()
return {'id__in': query_set.values_list('id', flat=True)}
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
.. no_pii:
"""
prefix = 'saml'
backend_name = models.CharField(
max_length=50, default=u'tpa-saml', blank=False,
help_text=u"Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
entity_id = models.CharField(
max_length=255, verbose_name=u"Entity ID", help_text=u"Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
u"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name=u"User ID Attribute",
help_text=(
u"URN of the SAML attribute that we can use as a unique, "
"persistent user ID. Leave blank for default."
))
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Full Name Attribute",
help_text=u"URN of SAML attribute containing the user's full name. Leave blank for default.")
default_full_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Full Name",
help_text=u"Default value for full name to be used if not present in SAML response.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name=u"First Name Attribute",
help_text=u"URN of SAML attribute containing the user's first name. Leave blank for default.")
default_first_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for First Name",
help_text=u"Default value for first name to be used if not present in SAML response.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name=u"Last Name Attribute",
help_text=u"URN of SAML attribute containing the user's last name. Leave blank for default.")
default_last_name = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Last Name",
help_text=u"Default value for last name to be used if not present in SAML response.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name=u"Username Hint Attribute",
help_text=u"URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
default_username = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Username",
help_text=u"Default value for username to be used if not present in SAML response.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name=u"Email Attribute",
help_text=u"URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
default_email = models.CharField(
max_length=255, blank=True, verbose_name=u"Default Value for Email",
help_text=u"Default value for email to be used if not present in SAML response.")
automatic_refresh_enabled = models.BooleanField(
default=True, verbose_name=u"Enable automatic metadata refresh",
help_text=u"When checked, the SAML provider's metadata will be included "
"in the automatic refresh job, if configured."
)
identity_provider_type = models.CharField(
max_length=128, blank=False, verbose_name=u"Identity Provider Type", default=STANDARD_SAML_PROVIDER_KEY,
choices=get_saml_idp_choices(), help_text=(
u"Some SAML providers require special behavior. For example, SAP SuccessFactors SAML providers require an "
"additional API call to retrieve user metadata not provided in the SAML response. Select the provider type "
"which best matches your use case. If in doubt, choose the Standard SAML Provider type."
)
)
debug_mode = models.BooleanField(
default=False, verbose_name=u"Debug Mode",
help_text=(
u"In debug mode, all SAML XML requests and responses will be logged. "
"This is helpful for testing/setup but should always be disabled before users start using this provider."
),
)
country = models.CharField(
max_length=128,
help_text=(
u'URN of SAML attribute containing the user`s country.',
),
blank=True,
)
skip_hinted_login_dialog = models.BooleanField(
default=True,
help_text=_(
"If this option is enabled, users that visit a \"TPA hinted\" URL for this provider "
"(e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to "
"the login URL of the provider instead of being first prompted with a login dialog."
),
)
skip_registration_form = models.BooleanField(
default=True,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=True,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
send_to_registration_first = models.BooleanField(
default=True,
help_text=_(
"If this option is selected, users will be directed to the registration page "
"immediately after authenticating with the third party instead of the login page."
),
)
other_settings = models.TextField(
verbose_name=u"Advanced settings", blank=True,
help_text=(
u'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports {"requiredEntitlements": ["urn:..."]}, '
'which can be used to require the presence of a specific eduPersonEntitlement, '
'and {"extra_field_definitions": [{"name": "...", "urn": "..."},...]}, which can be '
'used to define registration form fields and the URNs that can be used to retrieve '
'the relevant values from the SAML response. Custom provider types, as selected '
'in the "Identity Provider Type" field, may make use of the information stored '
'in this field for additional configuration.'
))
archived = models.BooleanField(default=False)
saml_configuration = models.ForeignKey(
SAMLConfiguration,
on_delete=models.SET_NULL,
limit_choices_to=active_saml_configurations_filter,
null=True,
blank=True,
)
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.slug) + 1:]
def get_social_auth_uid(self, remote_id):
""" Get social auth uid from remote id by prepending idp_slug to the remote id """
return '{}:{}'.format(self.slug, remote_id)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if self.other_settings:
other_settings = json.loads(self.other_settings)
return other_settings[name]
raise KeyError
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id', 'country')
attr_defaults = {
'attr_full_name': 'default_full_name',
'attr_first_name': 'default_first_name',
'attr_last_name': 'default_last_name',
'attr_username': 'default_username',
'attr_email': 'default_email',
}
# Defaults for missing attributes in SAML Response
conf['attr_defaults'] = {}
for field in attrs:
field_name = attr_defaults.get(field)
val = getattr(self, field)
if val:
conf[field] = val
# Default values for SAML attributes
default = getattr(self, field_name) if field_name else None
conf['attr_defaults'][field] = default
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error(
'No SAMLProviderData found for provider "%s" with entity id "%s" and IdP slug "%s". '
'Run "manage.py saml pull" to fix or debug.',
self.name, self.entity_id, self.slug
)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
# Add SAMLConfiguration appropriate for this IdP
conf['saml_sp_configuration'] = (
self.saml_configuration or
SAMLConfiguration.current(self.site.id, 'default')
)
idp_class = get_saml_idp_class(self.identity_provider_type)
return idp_class(self.slug, **conf)
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
.. no_pii:
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name=u"SSO URL")
public_key = models.TextField()
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
class LTIProviderConfig(ProviderConfig):
"""
Configuration required for this edX instance to act as a LTI
Tool Provider and allow users to authenticate and be enrolled in a
course via third party LTI Tool Consumers.
.. no_pii:
"""
prefix = 'lti'
backend_name = 'lti'
# This provider is not visible to users
icon_class = None
icon_image = None
secondary = False
# LTI login cannot be initiated by the tool provider
accepts_logins = False
KEY_FIELDS = ('lti_consumer_key', )
lti_consumer_key = models.CharField(
max_length=255,
help_text=(
u'The name that the LTI Tool Consumer will use to identify itself'
)
)
lti_hostname = models.CharField(
default=u'localhost',
max_length=255,
help_text=(
u'The domain that will be acting as the LTI consumer.'
),
db_index=True
)
lti_consumer_secret = models.CharField(
default=create_hash256,
max_length=255,
help_text=(
u'The shared secret that the LTI Tool Consumer will use to '
'authenticate requests. Only this edX instance and this '
'tool consumer instance should know this value. '
'For increased security, you can avoid storing this in '
'your database by leaving this field blank and setting '
'SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} '
'in your instance\'s Django setttigs (or lms.auth.json)'
),
blank=True,
)
lti_max_timestamp_age = models.IntegerField(
default=10,
help_text=(
u'The maximum age of oauth_timestamp values, in seconds.'
)
)
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.lti_consumer_key + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.lti_consumer_key) + 1:]
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
try:
return (
self.backend_name == pipeline['backend'] and
self.lti_consumer_key == pipeline['kwargs']['response'][LTI_PARAMS_KEY]['oauth_consumer_key']
)
except KeyError:
return False
def get_lti_consumer_secret(self):
""" If the LTI consumer secret is not stored in the database, check Django settings instead """
if self.lti_consumer_secret:
return self.lti_consumer_secret
return getattr(settings, 'SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {}).get(self.lti_consumer_key, '')
class Meta(object):
app_label = "third_party_auth"
verbose_name = u"Provider Configuration (LTI)"
verbose_name_plural = verbose_name
|
msegado/edx-platform
|
common/djangoapps/third_party_auth/models.py
|
Python
|
agpl-3.0
| 38,532
|
[
"VisIt"
] |
5b2a64639869a8468fef76144c2c232a4e3facba06e2774a35e80f3340c28a52
|
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from sklearn.externals import joblib
class GPRegressor:
'''Gaussian Process regressor on CPU.
Takes input feature vectors X and target regression values Y and fits a Gaussian process,
that can be used to predict for query points X*. This implementation uses a squared
exponential kernel for determining the similarity between feature vectors. Tuning of
hyper-parameters is supported by optimizing the negative log-marginal-likelihood through
sklearn provided code.
'''
def fit(self, X, Y, length_scale=1.0, signal_std=1.0, noise_std=1e-10, normalize=False, optimize=False, repeat=0):
'''Fit a Gaussian Process regressor.
Params
------
X : mx4 array
Training feature vectors
Y : mx1 array
Target values
Kwargs
------
length_scale : scalar or 4x1 array, optional
Kernel length scaling input feature dimensions
signal_std : scalar, optional
Signal sigma
noise_std : scalar, optional
Observation noise sigma
normalize : bool, optional
Whether or not to normalize Y by mean adjustment
optimize : bool or list
Turn on/off optimization. If list, only the parameters in list will be tuned.
'''
optimizer = 'fmin_l_bfgs_b'
bounds_ls = bounds_ss = bounds_ns = (1e-3, 1e3)
signal_var = signal_std**2
noise_var = noise_std**2
if isinstance(optimize, list):
bounds_ls = (1e-3, 1e3) if 'length_scale' in optimize else (length_scale, length_scale)
bounds_ss = (1e-3, 1e3) if 'signal_std' in optimize else (signal_var, signal_var)
bounds_ns = (1e-3, 1e3) if 'noise_std' in optimize else (noise_var, noise_var)
elif not optimize:
optimizer = None
kernel = ConstantKernel(signal_var, bounds_ss) * RBF(length_scale, bounds_ls) + WhiteKernel(noise_var, bounds_ns)
self.gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0, normalize_y=normalize, optimizer=optimizer, n_restarts_optimizer=repeat)
self.gpr.fit(X, Y)
def predict(self, X, return_std=False):
'''Predict values.
Params
------
X : nx4 array
Input feature vectors.
Kwargs
------
return_std : bool, optional
If true, returns the uncertainty variances for query points. Useful for
computing confidence values.
Returns
-------
Y : nx1 array
Predictions
K : nx1 array
Variances for query points. Only if return_std = true
'''
return self.gpr.predict(X, return_std=return_std)
@property
def length_scale(self):
return self.gpr.kernel_.k1.k2.length_scale
@property
def signal_std(self):
return np.sqrt(self.gpr.kernel_.k1.k1.constant_value)
@property
def noise_std(self):
return np.sqrt(self.gpr.kernel_.k2.noise_level)
def save(self, fname):
joblib.dump(self.gpr, fname)
def load(self, fname):
self.gpr = joblib.load(fname)
class GPRegressorStandalone:
'''Standalone Gaussian Process regressor.'''
def fit(self, X, Y, W, signal_std=1.0, noise_std=1e-10, normalize=False):
self.noise_std = noise_std
self.signal_std = signal_std
self.W = W
self.X = X
if normalize:
self.ymean = np.mean(Y)
Y = Y - self.ymean
else:
self.ymean = np.zeros(1)
self.K = GPRegressorStandalone.kernel(X, X, self.W, self.signal_std) + np.eye(X.shape[0]) * self.noise_std
self.L = np.linalg.cholesky(self.K)
self.Li = stri(self.L.T, np.eye(self.L.shape[0]))
self.Ki = self.Li.dot(self.Li.T)
self.alpha = stri(self.L.T, stri(self.L, Y, check_finite=False, lower=True))
def predict(self, X, return_std=False):
Ks = GPRegressorStandalone.kernel(self.X, X, self.W, self.signal_std)
pred = Ks.T.dot(self.alpha) # Zero mean
pred += self.ymean
if return_std:
Kss = GPRegressorStandalone.kernel(X, X, self.W, self.signal_std)
sigma = np.copy(np.diag(Kss))
sigma -= np.einsum("ij,ij->i", np.dot(Ks.T, self.Ki), Ks.T)
sigma[sigma < 0.] = 0.
sigma = np.sqrt(sigma)
return pred, sigma
else:
return pred
@staticmethod
def dist(A, B, W):
'''Pairwise squared weighted distance.'''
diff = A[np.newaxis, :, :] - B[:, np.newaxis, :]
d = np.einsum('jil,jil->ij', np.tensordot(diff, W, axes=(2,0)), diff)
return d
@staticmethod
def kernel(A, B, W, signal_std=1.):
'''Squared exponential covariance function.'''
d = GPRegressorStandalone.dist(A, B, W)
return signal_std**2 * np.exp(-0.5 * d)
|
cheind/rgbd-correction
|
sensor_correction/gp_cpu.py
|
Python
|
bsd-3-clause
| 5,241
|
[
"Gaussian"
] |
60c42b9bb146400e8fe176500eb8caa94537e7902dfade37ef5e2180ab9640c2
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import Union
from ORCA.scripts.BaseScript import cBaseScript
class cSystemTemplate(cBaseScript):
""" template class for discover scripts """
def __init__(self):
cBaseScript.__init__(self)
self.uType:str = u'SYSTEM'
self.iHash:int = 0
def RunScript(self, *args, **kwargs) -> Union[Dict,None]:
""" main entry point to run the script """
if 'register' in args or kwargs.get("caller")=="appstart":
return self.Register(*args,**kwargs)
elif "unregister" in args:
return self.UnRegister(*args,**kwargs)
return None
def Register(self,*args,**kwargs) -> None:
return None
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def UnRegister(self,*args,**kwargs) -> None:
return None
|
thica/ORCA-Remote
|
src/ORCA/scripttemplates/Template_System.py
|
Python
|
gpl-3.0
| 1,717
|
[
"ORCA"
] |
0e9e2fa26e3cac0218f8860d7c94ea4141370dce472027f8ed0e63c867a1c461
|
#this reviews Single Dish weblog
# --------------------------------------------------------------------------------------------------
def ReadDataFromWeb(mousid):
# --------------------------------------------------------------------------------------------------
"""
This function reads the metadata about the project for the given MOUS
"""
import urllib2
dataurl="http://www.eso.org/~fstoehr/project_ous_eb_hierarchy.txt"
dataurl2="http://www.eso.org/~fstoehr/ous_eb_qa0status.txt"
#print "Gathering metadata..."
response = urllib2.urlopen(dataurl)
html = response.read().splitlines()
response = None
mousid = str(mousid)
datadict = {}
datadict = {'mous':mousid}
mousid2=mousid.replace("___","://").replace("_","/")
for line in html:
line=line.split()
#print line[0],line[1],line[2],line[3],line[4]
if line[4]==mousid:
#print "found MOUS"
#print line[0]
datadict['code']=line[0]
datadict['sgous']=line[2]
datadict['gous']=line[3]
datadict['mous']= line[4]
if datadict.has_key('sbuids'):
datadict['sbuids'].append(line[9])
else:
datadict['sbuids']=[line[9]]
if datadict.has_key('sbnames'):
datadict['sbnames'].append(line[10])
else:
datadict['sbnames']=[line[10]]
response2 = urllib2.urlopen(dataurl2)
html2 = response2.read().splitlines()
response2 = None
for line2 in html2:
line2=line2.split("|")
if line2[2]=='SemiPass':
continue
if line2[0]==mousid2:
if datadict.has_key('ebuids'):
datadict['ebuids'].append(line2[1])
else:
datadict['ebuids']=[line2[1]]
return datadict
# --------------------------------------------------------------------------------------------------
def DirectoryTree(path):
# --------------------------------------------------------------------------------------------------
'''
This maps the directory tree of the given path
'''
import os
import glob
os.chdir(path)
sous_dir = glob.glob('SOUS*') #figuring out SOUS and cd'ing into it
os.chdir('%s' % sous_dir[0])
gous_dir = glob.glob('GOUS*')
os.chdir('%s' % gous_dir[0])
mous_dir = glob.glob('MOUS*')
os.chdir('%s' % mous_dir[0])
project_sgm_dir = os.getcwd() #storing the whole path for later if ever needed
#continuing down to the HTML area to get the weblog directory
os.chdir('working')
pipeline_runs = sorted(glob.glob('pipeline-*/'), key=os.path.getmtime) #this sorts the glob list from oldest-to-newest
pipeline_dir = pipeline_runs[-1] #-1 selects the newest
os.chdir('%s/html' % pipeline_dir)
project_html_dir = os.getcwd()
return project_sgm_dir, project_html_dir
# --------------------------------------------------------------------------------------------------
def DisplayTelescopePointings(project_html_dir):
# --------------------------------------------------------------------------------------------------
'''
Typically, TP datasets have a lot of observations and it's a pain to navigate to every Telescope Pointing image in the weblog (Home > MS > Telescope Pointing)
'''
import os
import glob
#collecting all Telescope Pointing plots
os.chdir(project_html_dir)
sessions = glob.glob('sessionsession_*')
plots = []
for session in sessions:
os.chdir('%s/%s' % (project_html_dir, session))
uids = glob.glob('uid___*.ms')
for uid in uids:
os.chdir('%s/%s' % (project_html_dir, session))
os.chdir(uid)
whole_pointings = glob.glob('whole_pointing*') #there are target pointings as well in this directory - want the raster w/ reference
for pointing in whole_pointings:
plot_path = os.path.abspath(pointing)
plots.append(plot_path)
#copying the plots to another location and then viewing them together in a single session
os.chdir('%s/working' % project_sgm_dir)
os.system('rm -rf reference_plots') #removing the directory if this has been run before
os.mkdir('reference_plots')
for plot in plots:
os.system('cp %s %s/working/reference_plots/.' % (plot, project_sgm_dir))
#the plots all have the same name which gets overwritten when copied, so they are renamed to have unique identifiers
os.system('mv %s/working/reference_plots/%s %s/working/reference_plots/%s_%s_%s' % (project_sgm_dir, plot.split('/')[-1], project_sgm_dir, plot.split('/')[-3], plot.split('/')[-2], plot.split('/')[-1]))
os.chdir('%s/working/reference_plots' % project_sgm_dir)
#print here instead of raw_input bc raw_input would block the eog line from running
print """Scroll through every Telescope Pointing plot here. The off position needs to be ~3* from the target mapping. If it is not, do the following:
1.) Put the SB in suspended state in the project tracker
2.) Tell the contact scientist
3.) Report this in the P2G ticket
Note: Near zenith the antennas cannot rotate fast enough, if the roster line is not straight flag the antenna.
Close the window to continue
"""
whole_raster_plots = glob.glob('*.png')
os.system('eog %s' % whole_raster_plots[0])
# --------------------------------------------------------------------------------------------------
def CheckConversionFactors(project_sgm_dir, project_html_dir):
# --------------------------------------------------------------------------------------------------
'''
This checks the converion factors in hsd_k2jycal. The factors need to be between 40-50.
'''
import os
import glob
#first we need a robust way to locate where the jyperk.csv file is (which contains all the factors) I'll use os.walk for the robust search
os.chdir(project_html_dir)
for path, directories, files in os.walk(project_html_dir):
for item in files:
if item.endswith('.csv'):
factors_path = os.path.join(path, item)
#now scraping the values from the factors file
factors_page = open(factors_path).readlines()
factors = []
for line in factors_page:
factors.append(line.split(',')[-1].strip())
factors.remove('Factor') #this is removing the heading
#If 1 is a factor number, the task failed
if 1 in factors:
print("hsd_k2jycal task failed, notify DRM")
quit()
#checking values are between 40-50
for factor in factors:
if (float(factor) >= 50.0) or (float(factor) <= 30.0):
print("Value %s: hsd_k2jycal factor outside allowable range of 40-50. Notify DRM" % factor)
quit()
print 'Factors within allowable range'
# --------------------------------------------------------------------------------------------------
def Stager(path, project_sgm_dir, output, username):
# --------------------------------------------------------------------------------------------------
'''
#Running the stager, this makes the "--analysis" directory
'''
import os
import subprocess
import glob
os.chdir(path)
os.system('rm -f %s/products/PPR*.xml.original' % project_sgm_dir) # when the pipeline is run via PPR instead of calibPipeIF a copy of the PPR is made -- this stuffs up the Stager if 2 PPR files are present
stager_directions = open('StagerDirections','a')
stager_directions.write('QA_Pipeline_Stager(\'%s\', \'%s\', mode=\'copy\', PIscript=\'/home/casa/contrib/AIV/science/qa2/scriptForPI.py\', fake_flux_calibration=False)' % (path, output))
stager_directions.close()
subprocess.call(['casa -c StagerDirections'], shell=True)
os.chdir(path)
os.system('rm -f StagerDirections')
#Moving the package to the QA2 area, putting the scriptForImaging.py there, and copying the edited-README there
os.system('mv %s /lustre/naasc/sciops/qa2/%s' % (output, username))
package_sgm_dir = '/lustre/naasc/sciops/qa2/%s/%s/sg_ouss_id/group_ouss_id/member_ouss_id/' % (username, output)
os.system('cp /home/da_data/scripts/DRM/scriptForImaging.py %s/script/.' % package_sgm_dir)
os.system('cp /home/da_data/scripts/DRM/scriptForImagingPrep.py %s/script/.' % package_sgm_dir)
if os.path.isfile('%s/README.header.txt' % project_sgm_dir) == True:
os.system('cp %s/README.header.txt %s' % (project_sgm_dir, package_sgm_dir)) #Putting the README here as well which is more convenient for running packaging
else:
raw_input('README file not named README.header.txt, please edit and press enter to continue')
os.system('cp %s/README.header.txt %s' % (project_sgm_dir, package_sgm_dir)) #Putting the README here as well which is more convenient for running packaging
package_sgm_dir = '/lustre/naasc/sciops/qa2/%s/%s/sg_ouss_id/group_ouss_id/member_ouss_id/' % (username, output)
#Unpacking the weblog in the -analysis package
os.chdir('%s/qa' % package_sgm_dir)
weblog = glob.glob('*.weblog.tgz')
os.system('tar -xvf %s' % weblog[0])
return package_sgm_dir
# --------------------------------------------------------------------------------------------------
def Packaging(datadict, username, qa2_dir):
# --------------------------------------------------------------------------------------------------
'''
#Running the QA2 Packaging (creates the tarballs and directory to be checked by DRMs)
'''
import os
import glob
import subprocess
if datadict['code'].startswith('2015'):
cycle_num = '3'
if datadict['code'].startswith('2016'):
cycle_num = '4'
#moving to user's qa2 directory and checking if Packages directory is there -- if not, create it
os.chdir(qa2_dir)
if os.path.isdir('%s/Packages' % qa2_dir) == False:
os.mkdir('Packages')
#writing instructions to run in CASA (these are CASA functions)
packaging_instructions = open('PackagingInstructions','a')
packaging_instructions.write('from QA2_Packaging_module import *\n')
#packaging_instructions.write('QA_Packager(origpath=\'%s/%s\', readme=\'%s/README.header.cycle4.txt\', packpath=\'%s/Packages/%s\', gzip_caltables=True, style=\'cycle%s-pipe1\', mode=\'hard\')' % (qa2_dir, output, package_sgm_dir, qa2_dir, package_name, cycle_num)) #this is a more unique way of naming the package with the MOUS and SBNAME included to avoid overlap but the archive packager which writes the directory structure in the README doesn't like it
packaging_instructions.write('QA_Packager(origpath=\'%s/%s\', readme=\'%s/README.header.txt\', packpath=\'%s/Packages/%s\', gzip_caltables=True, style=\'cycle%s-pipe1\', mode=\'hard\')' % (qa2_dir, output, package_sgm_dir, qa2_dir, datadict['code'], cycle_num))
packaging_instructions.close()
#Running the packaging instructions and removing some of the output that's not used
subprocess.call(['casa -c PackagingInstructions'], shell=True)
os.system('rm -f PackagingInstructions')
os.system('rm -f %s.ticket.tar' % datadict['code'])
os.system('rm -f %s.ticket.zip' % datadict['code'])
#Running tarsplit, which makes the tarball off the packager-directory and then removing the directory after the tarball is made (not needed by DRMs and reduces issues of overwritting w/ science goals of the same project code)
os.chdir('%s/Packages' % qa2_dir)
subprocess.call(['/home/casa/contrib/AIV/science/DSO/tarsplit.py -f -o %s_%s %s' % (datadict['code'], mous2, datadict['code'])], shell=True)
os.system('rm -rf %s' % datadict['code'])
#Getting the name of the tarball
os.chdir('%s/Packages' % qa2_dir)
tarball_name = glob.glob('%s_%s*.tar' % (datadict['code'], mous2))
#Removing casa/ipython logfiles
os.chdir(qa2_dir)
os.system('rm -f casa-*.log')
os.system('rm -f ipython-*.log')
return tarball_name
# --------------------------------------------------------------------------------------------------
def ScrapeListobs(project_sgm_dir, datadict):
# --------------------------------------------------------------------------------------------------
'''
This checks the various listobs files to determine certain characteristics (check source, spw info) for when the calibrated_final.ms is created
'''
import os
import glob
import subprocess
#moving to the working directory and making a directions file for CASA
os.chdir('%s/working' % project_sgm_dir)
ms_code = datadict['ebuids'][0] #selecting the 1st one, no matter how many there are -- not sure if this is always right to do
ms_code = ms_code.replace(':','_').replace('/','_')
ms_name = '%s.ms' % ms_code
target_ms = '%s_target.ms' % ms_code
#opening a file to write directions to CASA to make listobs
os.system('rm -f ms.listobs') #removing if it already exists
create_listobs = open('CreateListobs', 'a')
create_listobs.write('listobs(vis=\'%s\', listfile=\'ms.listobs\')\n' % ms_name)
create_listobs.close()
#running the listobs commands and removing the directions file
subprocess.call(['casa -c CreateListobs'], shell=True)
os.system('rm -f CreateListobs')
#now scraping the ms listobs files for CHECK SOURCE information
ms_listobs = open('ms.listobs').readlines()
check_source = []
for line in ms_listobs:
if 'CHECK' in line:
check_source.append('Check source detected')
if not check_source: #if this array is empty, there is no check source
check_source = 'False'
else:
check_source = 'True'
#Since the TP doesn't have *_target.ms to make their listobs easy, I'll scrape the weblog for SPW information
single_EB = datadict['ebuids'][0].replace('://','___').replace('/','_')
os.chdir('%s/stage7/' % project_html_dir)
applycal_page = open('t2-4m_details.html').readlines()
strong_lines = []
for line in applycal_page:
if '</strong>' in line: #this character is bold in HTML; they mark the science SPWS with this in the weblog
strong_lines.append(line)
spw_list = []
for line in strong_lines[0].split('<strong>'):
if '</strong>' in line:
spw_list.append(line.split('</strong>')[0])
spw_string = ','.join(spw_list)
return check_source, spw_string
# --------------------------------------------------------------------------------------------------
def CreateSplitFiles(project_sgm_dir, datadict, spw_string):
# --------------------------------------------------------------------------------------------------
'''
Instructions were adapted from generateReducScript step to create the .split files. **This assumes you have the same number of spws for each MS**
These instructions are written into a file and then executed by CASA
'''
import os
import subprocess
#moving to the working directory and making a directions file for CASA
os.chdir('%s/working' % project_sgm_dir)
create_splits = open('CreateSplits','a')
asdms = datadict['ebuids']
for asdm in asdms:
asdm2 = asdm.replace('://','___').replace('/','_')
#removing any previous versions of the .split.cals
os.system('rm -rf %s.ms.split' % asdm2)
os.system('rm -rf %s.ms.split.flagversions' % asdm2)
#writing the CASA commands
create_splits.write('split(vis = \'%s.ms\', outputvis = \'%s.ms.split\', datacolumn = \'corrected\', spw = \'%s\', keepflags = True)\n' % (asdm2, asdm2, spw_string))
create_splits.close()
subprocess.call(['casa -c CreateSplits'], shell=True)
os.system('rm -f CreateSplits')
# --------------------------------------------------------------------------------------------------
def CreateSplitCalFiles(project_sgm_dir, datadict, check_source):
# --------------------------------------------------------------------------------------------------
'''
This creates the value-added data product that we provide to North American ALMA users, the calibrated visibilities. This happens in the original pipeline output's "working" directory (not to be confused with the current directory which is colloquially called 'working directory')
Instructions were adapted from generateReducScript step to create the .split.cal files
These instructions are written into a file and then executed by CASA
'''
import os
import subprocess
#moving to the working directory and making a directions file for CASA
os.chdir('%s/working' % project_sgm_dir)
create_split_cals = open('CreateSplitCals','a')
#getting a list of ASDMs to make .split.cal files from
if check_source == 'True':
#listOfIntents = ['CALIBRATE_BANDPASS#ON_SOURCE','CALIBRATE_FLUX#ON_SOURCE','CALIBRATE_PHASE#ON_SOURCE','CALIBRATE_WVR#ON_SOURCE','OBSERVE_CHECK_SOURCE#ON_SOURCE','OBSERVE_TARGET#ON_SOURCE']
listofIntents = [] #not sure what INTENTS are needed for a TP with a check source
if check_source == 'False':
listOfIntents = ['OBSERVE_TARGET#ON_SOURCE'] #this is only thing needed for TP?
asdms = datadict['ebuids']
for asdm in asdms:
asdm2 = asdm.replace('://','___').replace('/','_')
#removing any previous versions of the .split.cals
os.system('rm -rf %s.ms.split.cal' % asdm2)
os.system('rm -rf %s.ms.split.cal.flagversions' % asdm2)
# Split out *data* column; typically, this is the corrected column but since the corrected has already been seperated, another corrected doesn't exist
create_split_cals.write('split(vis = \'%s.ms.split\', outputvis = \'%s.ms.split.cal\', datacolumn = \'data\', intent = \',\'.join(%s), keepflags = True)\n' % (asdm2,asdm2,listOfIntents))
create_split_cals.close()
subprocess.call(['casa -c CreateSplitCals'], shell=True)
os.system('rm -f CreateSplitCals')
# --------------------------------------------------------------------------------------------------
def CreateCalibratedFinal(project_sgm_dir, datadict):
# --------------------------------------------------------------------------------------------------
'''
This creates the value-added data product that we provide to North American ALMA users, the calibrated visibilities. This happens in the original pipeline output's "working" directory (not to be confused with the current directory which is colloquially called 'working directory')
Instructions were adapted from scriptForImagingPrep.py to combine MS's and create calibration_final.ms
These instructions are written into a file and then executed by CASA
'''
import os
import glob
import subprocess
#moving to the working directory and making a directions file for CASA
os.chdir('%s/working' % project_sgm_dir)
os.system('rm -rf calibrated.ms')
os.system('rm -rf calibrated.ms.flagversions')
os.system('rm -rf calibrated_source.ms')
os.system('rm -rf calibrated_source.flagversions')
os.system('rm -rf calibrated_final.ms')
os.system('rm -rf calibrated_final.ms.backup')
#writing instructions file to CASA
vislist = glob.glob('*.ms.split.cal') #this vislist is for the for-loop
create_calibrated_final = open('CreateCalibratedFinal','a')
create_calibrated_final.write('vislist = glob.glob(\'*.ms.split.cal\')\n') #this vislist is for the CASA commands
create_calibrated_final.write('concatvis=\'calibrated.ms\'\n')
create_calibrated_final.write('rmtables(concatvis)\n')
if len(vislist) == 1:
create_calibrated_final.write('concatvis = vislist[0]\n')
else:
create_calibrated_final.write('concat(vis=%s, concatvis=concatvis)\n' % vislist)
create_calibrated_final.write('concatvis = \'calibrated.ms\'\n')
create_calibrated_final.write('sourcevis=\'calibrated_source.ms\'\n')
create_calibrated_final.write('rmtables(sourcevis)\n')
create_calibrated_final.write('split(vis=concatvis, intent=\'*TARGET*\', outputvis=sourcevis, datacolumn=\'data\')\n')
create_calibrated_final.write('listobs(vis=\'calibrated_source.ms\',listfile=\'calibrated_final.ms.listobs.txt\')\n')
create_calibrated_final.close()
#Running the CASA instructions and deleting them
subprocess.call(['casa -c CreateCalibratedFinal'], shell=True)
os.system('rm -f CreateCalibratedFinal')
# Rename and backup data set
os.system('mv -f calibrated_source.ms calibrated_final.ms')
os.system('cp -rf calibrated_final.ms calibrated_final.ms.backup')
os.system('tar -cvf calibrated_final.ms.tar calibrated_final.ms') #tarring this for SRDP delivery next
# --------------------------------------------------------------------------------------------------
def ProprietaryAccess(project_html_dir, project_sgm_dir, datadict, username):
# --------------------------------------------------------------------------------------------------
'''
This takes care of putting the SRDP (calibrated_final.ms) in the proprietary area for the PI to pickup
'''
import os
import glob
import sys
#gathering the PI name by loading that html page: finding the index of the PI and then stripping off the html tags
os.chdir(project_html_dir)
main_page = open(glob.glob('t1-1.html')[0]).readlines()
line_index = 0
for line in main_page:
if 'Principal Investigator' in line:
pi_index = line_index + 1
pi_code = main_page[pi_index].strip().split('>')[1].split('<')[0]
line_index = line_index + 1
#creating a directory for this PI in the area they can download it (if it doesn't already exist)
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/')
if os.path.isdir('%s' % pi_code) == False:
os.mkdir('%s' % pi_code)
#checking and creating/appending an .htaccess file
os.chdir('%s' % pi_code)
if os.path.isfile('.htaccess') == False:
htaccess = open('.htaccess', 'a')
htaccess.write('AuthType CAS\n')
htaccess.write('Require user %s %s dckim cbrogan aremijan ksharp cubach swood pmurphy teuben\n' % (pi_code, username))
htaccess.write('Order deny,allow\n')
htaccess.write('AuthName \"Authentication Required\"\n')
htaccess.close()
if os.path.isfile('.htaccess') == True:
#if the file exists, see if the current user is already listed for access
htaccess = open('.htaccess').readlines()
for line in htaccess:
if username in line:
continue #this means the user is already listed and nothing needs to be done
else: #their name needs to be added; so lets write a new file and add the name
if 'user' in line:
current_users = line
new_users = current_users.strip()+' %s' % username
new_htaccess = open('.htaccess_new', 'a')
new_htaccess.write('AuthType CAS\n')
new_htaccess.write(new_users+'\n')
new_htaccess.write('Order deny,allow\n')
new_htaccess.write('AuthName \"Authentication Required\"\n')
new_htaccess.close()
#putting the new file in place of the old one
os.system('mv -f .htaccess .htaccess_old')
os.system('mv -f .htaccess_new .htaccess')
#creating a directory in the PI area for this specific MOUS; in the case it does already exist (not sure why) then continue on
os.chdir('/lustre/naasc/ALMA_Data_Delivery/proprietary/%s' % pi_code)
if os.path.isdir('%s' % datadict['mous'].split('_')[-1]) == False:
os.mkdir('%s' % datadict['mous'].split('_')[-1])
os.chdir('%s' % datadict['mous'].split('_')[-1])
mous_dir = datadict['mous'].split('_')[-1]
delivery_path = os.getcwd()
#moving the calibrated_final.ms.tar into this directory
os.chdir('%s/working' % project_sgm_dir)
os.system('mv calibrated_final.ms.tar %s' % delivery_path)
#checking the directory and file are there:
if os.path.exists('/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)) == False:
raw_input('Error: package not in delivery area')
else:
SRDP_path = '/lustre/naasc/web/almadata/proprietary/%s/%s/calibrated_final.ms.tar' % (pi_code, mous_dir)
return mous_dir, pi_code, SRDP_path
# --------------------------------------------------------------------------------------------------
#main
# --------------------------------------------------------------------------------------------------
import os
import sys
import webbrowser
import glob
username = raw_input('What is your lustre username?:').strip()
mousid_orig = raw_input('What is the MOUS code? (from SCOPS-ticket):').strip()
path = raw_input('Enter path to the pipeline output:').strip()
project_sgm_dir, project_html_dir = DirectoryTree(path)
mousid=mousid_orig.replace('://','___').replace('/','_')
#Generating the datadict
datadict = ReadDataFromWeb(mousid)
print 'Scanning metadata...'
#Pipeline can fail with 40+ executions
if len(datadict['sbuids']) >= 40:
raw_input('This dataset has 40 or more executions which can cause the pipeline to fail. If failure occured notify DRM. Press Enter to continue')
print 'Opening the weblog...'
webbrowser.open('%s/index.html' % project_html_dir)
print 'Displaying Telescope Pointing files.'
DisplayTelescopePointings(project_html_dir)
print 'Checking Kelvin to Jy conversion factors...'
CheckConversionFactors(project_sgm_dir, project_html_dir)
raw_input('Review applycal, spectral baseline subtractions, and images. Press Enter if everything is correct')
print 'Generating your EPT stamps...\n'
#EPT stamp for calibration
print '#%s %s CALIBRATED YES/NO' % (datadict['code'], datadict['sbuids'][0].replace('___','://').replace('_','/'))
print 'SBName: %s\n' % datadict['sbnames'][0].strip('"')
#EPT stamps for imaging
print '#%s %s IMAGING NA_Pipeline' % (datadict['code'], datadict['sbuids'][0].replace('___','://').replace('_','/'))
print 'SBName: %s' % datadict['sbnames'][0].strip('"')
raw_input('\nPress Enter to continue with packaging')
#Creating the -analysis directory (staging), the packaged file, and the tarballs
mous2=mousid_orig.replace(':','_').replace('/','_')
output = datadict['code']+'.MOUS.'+mous2+'.SBNAME.'+datadict['sbnames'][0].strip('"')+'-analysis'
qa2_dir = '/lustre/naasc/sciops/qa2/%s' % username
package_sgm_dir = output+'/sg_ouss_id/group_ouss_id/member_ouss_id'
Stager(path, project_sgm_dir, output, username)
tarball_name = Packaging(datadict, username, qa2_dir)
raw_input('Post tarball package path to SCOPS for DRM check. Press Enter to continue')
#Creating the calibrated_final.ms files in the pipeline working directory
check_source, spw_string = ScrapeListobs(project_sgm_dir, datadict)
CreateSplitFiles(project_sgm_dir, datadict, spw_string)
CreateSplitCalFiles(project_sgm_dir, datadict, check_source)
CreateCalibratedFinal(project_sgm_dir, datadict)
#---------------------------
#moving tarball to the cycleX_release
os.chdir('/lustre/naasc/sciops/qa2/%s/Packages' % username)
if datadict['code'].startswith('2016'):
cycle = '4'
os.system('mv %s_%s_001_of_001.tar /lustre/naasc/sciops/cycle4_release' % (datadict['code'], datadict['mous']))
os.system('rm %s.ticket.tar' % datadict['code'])
os.system('rm %s.ticket.zip' % datadict['code'])
if datadict['code'].startswith('2015'):
cycle = '3'
os.system('mv %s_%s_001_of_001.tar /lustre/naasc/sciops/cycle3_release' % (datadict['code'], datadict['mous']))
os.system('rm %s.ticket.tar' % datadict['code'])
os.system('rm %s.ticket.zip' % datadict['code'])
#---------------------------
#moving *-analysis package to deliveries
os.chdir('/lustre/naasc/sciops/qa2/%s' % username)
os.system('mv %s /lustre/naasc/sciops/deliveries' % output)
#Putting the SRDP in the PI area for pickup
ProprietaryAccess(project_html_dir, project_sgm_dir, datadict, username)
# ---------------------------
#Print out for helpdesk ticket
print """\n\n\nFor normal delivery:
To: helpdesk-cv@nrao.edu
CC: mlacy, cubach, jmangum, dkunneri
Subject: Cycle %s data for ingestion: %s, MOUS: %s
Content:
Please upload the following file to JAO:
File Path: /lustre/naasc/sciops/cycle%s_release/%s_%s_001_of_001.tar
Project code: %s
GOUS: %s
MOUS: %s
SBName: %s
SBuid: %s
ASDMs: %s
Thanks!\n\n\n""" % (cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], mous2, datadict['code'], datadict['gous'], datadict['mous'], datadict['sbnames'][0].strip('"'), datadict['sbuids'][0].strip('"'), datadict['ebuids'][0].strip('"'))
print """For re-delivery (re-ingestion):
Create an APO ticket: http://jira.alma.cl/projects/APO
Set Issue Type as ARCHIVE
Summary: Cycle %s data for RE-ingestion: %s, %s
Category: Data Project Ingestion
Assignee: Jose Parra
Description: Cycle %s data for ingestion: %s, MOUS: %s
Please upload the following file to JAO:
File Path: /lustre/naasc/sciops/cycle%s_release/%s_%s_001_of_001.tar
Project code: %s
GOUS: %s
MOUS: %s
SBName: %s
SBuid: %s
ASDMs: %s
Thanks!
Additional users to email: alejandro.barrientos@alma.cl, bernardo.malet@alma.cl, cubach@nrao.edu, jotey@nrao.edu, mhatz@nrao.edu, mlacy@nrao.edu, nicolas.gonzalez@alma.cl\n\n\n""" % (cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], datadict['mous'], cycle, datadict['code'], mous2, datadict['code'], datadict['gous'], datadict['mous'], datadict['sbnames'][0].strip('"'), datadict['sbuids'][0].strip('"'), datadict['ebuids'][0].strip('"'))
# ---------------------------
#The final steps: needed webpages are opened for you
mous_dir, pi_code, SRDP_path = ProprietaryAccess(project_html_dir, project_sgm_dir, datadict, username)
raw_input('Once JAO ingests the package into the archive an automated delivery email is sent to the PI and to data_delivery@alma.cl. Press enter when notification email is received to start the final process:\n\n\n')
print('Loading webpages...')
webbrowser.open('http://help.almascience.org')
webbrowser.open('http://rcmail.cv.nrao.edu')
print """
Dear PI,
You should have recently received an email announcing that data for member ObsUnitSet %s of your project %s are now available for download through the ALMA Science Portal Request Handler.
For your convenience, a fully-calibrated MS is also available for download through the NAASC Web server. Unlike the data obtained through the Request Handler, these calibrated data will only be available for the next 30 days. A concatenated measurement set containing only target data is available as calibrated_final.ms. Your data may be found here:
https://bulk.cv.nrao.edu/almadata/proprietary/%s/%s/calibrated_final.ms.tar
In addition, we would like to recommend to you the services that NRAO can provide to assist you in the analysis of your data. We welcome visits from PIs who would like to rereduce their data from the original raw files, and from PIs who would like advice on how to further interpret and display the images they have received. Financial support for travel costs is available for PIs based in the US. Even if you don't feel you need to visit but have a few questions, we have scientists available who can talk with you by phone or video connection, or respond to email and Helpdesk queries.
With kind regards,
The North American ALMA Archive at the NAASC\n\n\n""" % (mous, datadict['code'], pi_code, mous_dir)
raw_input('1.) Post the previous stamp to the P2G Helpdesk Ticket and email the stamp to teuben@astro.umd.edu. Enter to continue\n')
webbrowser.open('https://asa.alma.cl/protrack/')
raw_input('2.) Update Project Tracker: change MOUS state \"Delivered\". Enter to continue\n')
webbrowser.open('https://webtest2.cv.nrao.edu/php/pfisher/drspreadCycle%s.php' % cycle)
raw_input('3.) Update DrSpreadSheet: set Delivery Date and QA2 status. Enter to continue\n')
raw_input('4.) Press Enter to move pipeline output directory to deliveries')
os.system('mv %s /lustre/naasc/sciops/deliveries/' % path)
raw_input('5.) Press Enter for the final SCOPS stamp and you\'re finished.')
print '<DRM>, for %s, the delivery letter to the PI has been sent. The dataset has been moved to /lustre/naasc/sciops/deliveries' % datadict['sbnames'][0]
|
bmarshallk/NAASC
|
totalpower_scripts/ProcessSingleDish.py
|
Python
|
gpl-3.0
| 30,986
|
[
"VisIt"
] |
ab4249957abc175717f565e6fffdcab3493e10b68ef9841137285ab3b0cadbfe
|
"""
Contains Experiment and Injection classes.
"""
import os
import logging
import numpy
from pint import DimensionalityError
from bitc.units import ureg, Quantity
# Use logger with name of module
logger = logging.getLogger(__name__)
class Injection(object):
"""
Data from a single injection.
Several types of information are stored about each injection:
* the ordinal number of the injection
* the programmed volume of the injection
* duration of the injection
* time between the beginning of the injection and the beginning of the next injection
* filtering period over which data channel is averaged to produce a single measurement of applied power
EXAMPLES
"""
# TODO Add docstring examples.
def __init__(self, number, volume, duration, spacing, filter_period, evolved_heat=None, titrant_amount=None, titrant_concentration=None):
# sequence number of injection
self.number = number
# programmed volume of injection
self.volume = volume
# duration of injection
self.duration = duration
# time between beginning of injection and beginning of next injection
self.spacing = spacing
# time over which data channel is averaged to produce a single measurement
# of applied power
self.filter_period = filter_period
# If provided, set the evolved_heat, making sure the unit is compatible
# with microcalorie
if evolved_heat:
self.evolved_heat = evolved_heat.to('microcalorie')
# the quantity of compound(s) injected
if titrant_amount:
self.titrant = titrant_amount
elif titrant_concentration:
self.contents(titrant_concentration)
else:
TypeError(
"Need to specify either a titrant amount, or a concentration")
def contents(self, titrant_concentration):
"""
Define the contents of what was injected
Takes a list/array of concentrations
"""
# Concentration of syringe contents
self.titrant_concentration = Quantity(
numpy.array(titrant_concentration), ureg.millimole / ureg.liter)
self.titrant = Quantity(
numpy.zeros(self.titrant_concentration.size), ureg.millimole)
for titr in range(self.titrant_concentration.size):
# Amount of titrant in the syringe (mole)
if titr == 0:
self.titrant[titr] = self.volume * self.titrant_concentration
else:
self.titrant[titr] = self.volume * self.titrant_concentration[titr]
class BaseExperiment(object):
"""
Abstract base class for an ITC experiment
"""
def __init__(self, data_source, experiment_name, instrument):
"""
Base init, prepare all the variables
:param data_source:
:type data_source: str
:param experiment_name:
:type experiment_name: str
:return:
:rtype:
"""
# Initialize.
# the source filename from which data is read
self.data_filename = None
self.instrument = instrument # the instrument that was used
self.number_of_injections = None # number of syringe injections
self.target_temperature = None # target temperature
# initial equilibration (delay) time before injections
self.equilibration_time = None
self.stir_speed = None # rate of stirring
self.reference_power = None # power applied to reference cell
# concentrations of various species in syringe
self.syringe_contents = None
# concentrations of various species in sample cell
self.sample_cell_contents = None
self.cell_volume = instrument.V0 # volume of liquid in sample cell
# list of injections (and their associated data)
self.injections = None
# time at end of filtering period
self.filter_period_end_time = None
# time at midpoint of filtering period
self.filter_period_midpoint_time = None
# "differential" power applied to sample cell
self.differential_power = None
self.cell_temperature = None # cell temperature
self.name = experiment_name
self.data_source = data_source
# Extract and store data about the experiment.
self.number_of_injections = None
self.target_temperature = None
self.equilibration_time = None
self.stir_rate = None
self.reference_power = None
# Store additional data about experiment.
self.syringe_concentration = None
# supposed concentration of receptor in cell
self.cell_concentration = None
# Allocate storage for power measurements.
self.time = None
self.heat = None
self.temperature = None
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = None
# "differential" power applied to sample cell (ucal/s)
self.differential_power = None
self.cell_temperature = None # cell temperature (K)
self.jacket_temperature = None # adiabatic jacket temperature (K)
def __str__(self):
"""
Show details of experiment in human-readable form.
"""
# TODO Clean up this definition
string = ""
string += "EXPERIMENT\n"
string += "\n"
string += "Source filename: %s\n" % self.data_filename
string += "Number of injections: %d\n" % self.number_of_injections
string += "Target temperature: %.1f K\n" % (
self.target_temperature / ureg.kelvin)
try:
string += "Equilibration time before first injection: %.1f s\n" % (
self.equilibration_time / ureg.second)
except TypeError:
string += "Equilibration time unknown"
# TODO temporary, needs to be uniform type among all experiment classes
if isinstance(self.syringe_concentration, Quantity):
string += "Syringe concentration: %.3f mM\n" % (self.syringe_concentration / (ureg.millimole / ureg.liter))
if isinstance(self.cell_concentration, Quantity):
string += "Cell concentration: %.3f mM\n" % (self.cell_concentration / (ureg.millimole / ureg.liter))
string += "Cell volume: %.3f ml\n" % (
self.cell_volume / ureg.milliliter)
if isinstance(self.cell_concentration, Quantity):
string += "Reference power: %.3f ucal/s\n" % (self.reference_power / (ureg.microcalorie / ureg.second))
string += "\n"
string += "INJECTIONS\n"
string += "\n"
string += "%16s %24s %24s %24s %24s %24s\n" % (
'injection',
'volume (uL)',
'duration (s)',
'collection time (s)',
'time step (s)',
'evolved heat (ucal)'
)
# for injection in range(self.number_of_injections):
# string += "%16d %16.3f %16.3f %16.3f %16.3f" % (injection, self.injection_volume[injection] / unit.microliter, self.injection_duration[injection] / unit.second, self.collection_time[injection] / unit.second, self.time_step[injection] / unit.second)
for injection in self.injections:
string += "%16d %24.3f %24.3f %24.3f %24.3f %24.3f\n" % (
injection.number,
injection.volume /
ureg.microliter, injection.duration / ureg.second,
injection.spacing / ureg.second, injection.filter_period /
ureg.second, injection.evolved_heat / ureg.microcalorie)
return string
def write_integrated_heats(self, filename):
"""
Write integrated heats in a format similar to that used by Origin.
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s %5s %12s %12s %12s %12s\n" % ("DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (
Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
def write_heats_csv(self, filename):
"""
Write integrated heats in a csv format
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s, %5s, %12s, %12s, %12s, %12s\n" % (
"DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
# TODO do we want all the details, including volumes?
def read_integrated_heats(self, heats_file, unit='microcalorie'):
"""
Read integrated heats from an origin file
:param heats_file:
:type heats_file:
:return:
:rtype:
"""
heats = self._parse_heats(heats_file, unit)
if heats.size != self.number_of_injections:
raise ValueError("The number of injections does not match the number of integrated heats in %s" % heats_file)
for inj, heat in enumerate(heats):
self.injections[inj].evolved_heat = heat
@staticmethod
def _parse_heats(heats_file, unit):
"""
Take as input a file with heats, format specification. Output a list of integrated heats in units of microcalorie
:param heats_file:
:type heats_file:
:param write_heats_compatible:
:type write_heats_compatible:
:return:
:rtype:
"""
import pandas as pd
assert isinstance(heats_file, str)
# Need python engine for skip_footer
dataframe = pd.read_table(heats_file, skip_footer=1, engine='python')
heats = numpy.array(dataframe['DH'])
return Quantity(heats, unit)
class ExperimentMicroCal(BaseExperiment):
"""
Data from an ITC experiment.
The experiment consists of several types of data:
* the instrument that was used
* experimental conditions (temperature, stir speed, etc.)
* concentrations of various components in syringe and sample cell
* injection volumes and durations, collection times
* time record of applied power and temperature difference
"""
# TODO Add type verification
def __init__(self, data_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentMicroCal, self).__init__(data_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = list()
# concentrations of various species in sample cell
self.sample_cell_contents = list()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
# cell temperature
self.name = experiment_name
# Check to make sure we can access the file.
if not os.access(data_filename, os.R_OK):
raise "The file '%s' cannot be opened." % data_filename
# Open the file and read is contents.
infile = open(data_filename, 'r')
lines = infile.readlines()
infile.close()
# Check the header to make sure it is a VP-ITC text-formatted .itc
# file.
if lines[0][0:4] != '$ITC':
raise "File '%s' doesn't appear to be a Microcal VP-ITC data file." % data_filename
# Store the datafile filename.
self.data_filename = data_filename
# Extract and store data about the experiment.
self.number_of_injections = int(lines[1][1:].strip())
self.target_temperature = (int(lines[3][1:].strip()) + 273.15) * ureg.kelvin # convert from C to K
self.equilibration_time = int(lines[4][1:].strip()) * ureg.second
self.stir_rate = int(lines[5][1:].strip()) * ureg.revolutions_per_minute
self.reference_power = float(lines[6][1:].strip()) * ureg.microcalorie / ureg.second
# Extract and store metadata about injections.
injection_number = 0
for line in lines[10:]:
if line[0] == '$':
# Increment injection counter.
injection_number += 1
# Read data about injection.
(injection_volume,
injection_duration,
spacing,
filter_period) = line[1:].strip().split(",")
# Extract data for injection and apply appropriate unit
# conversions.
injectiondict = dict()
injectiondict['number'] = injection_number
injectiondict['volume'] = float(injection_volume) * ureg.microliter
injectiondict['duration'] = float(injection_duration) * ureg.second
# time between beginning of injection and beginning of next injection
injectiondict['spacing'] = float(spacing) * ureg.second
# time over which data channel is averaged to produce a single measurement
injectiondict['filter_period'] = float(filter_period) * ureg.second
self.injections.append(Injection(**injectiondict))
else:
break
# Store additional data about experiment.
parsecline = 11 + self.number_of_injections
# supposed concentration of compound in syringe
self.syringe_concentration = {'ligand': float(lines[parsecline][1:].strip()) * ureg.millimole / ureg.liter}
for inj in self.injections:
# TODO add support for multiple components
inj.contents(sum(self.syringe_concentration.values()))
# supposed concentration of receptor in cell
self.cell_concentration = {'macromolecule': float(lines[parsecline + 1][1:].strip()) * ureg.millimole / ureg.liter}
self.cell_volume = float(lines[parsecline + 2][1:].strip()) * ureg.milliliter # cell volume
self.injection_tick = [0]
# Allocate storage for power measurements.
self.time = list()
self.heat = list()
self.temperature = list()
# Extract lines containing heat measurements.
for (index, line) in enumerate(lines):
if line[:2] == '@0':
break
measurement_lines = lines[index:]
# Count number of power measurements.
nmeasurements = 0
for line in measurement_lines:
if line[0] != '@':
nmeasurements += 1
logger.info("There are %d power measurements." % nmeasurements)
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.second)
# "differential" power applied to sample cell (ucal/s)
self.differential_power = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.microcalorie / ureg.second)
# cell temperature (K)
self.cell_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# adiabatic jacket temperature (K)
self.jacket_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# Process data.
# TODO this is a mess, need to clean up and do proper input
# verification
nmeasurements = 0
injection_labels = list()
for (index, line) in enumerate(measurement_lines):
if line[0] == '@':
injection_labels.append(nmeasurements)
else:
# Extract data for power measurement.
# TODO: Auto-detect file format?
#
jacket_temperature = 0.0
try:
(time,
power,
temperature,
a,
jacket_temperature,
c,
d,
e,
f) = line.strip().split(",") # Berkeley Auto iTC-200
except:
try:
# works with Shoichet lab VP-ITC .itc files---what are other readings (a,b,c,d)?
(time,
power,
temperature,
a,
jacket_temperature,
c,
d) = line.strip().split(",")
# b looks like adiabatic jacket temperature (~1 degree C below sample temperature)
except:
# works with David Minh's VP-ITC .itc files
(time, power, temperature) = line.strip().split(",")
# Store data about this measurement.
self.filter_period_end_time[nmeasurements] = float(time) * ureg.second
self.differential_power[nmeasurements] = float(power) * ureg.microcalorie / ureg.second
self.cell_temperature[nmeasurements] = (float(temperature) + 273.15) * ureg.kelvin
self.jacket_temperature[nmeasurements] = (float(jacket_temperature) + 273.15) * ureg.kelvin
nmeasurements += 1
# number of injections read, not including @0
number_of_injections_read = len(injection_labels) - 1
# Perform a self-consistency check on the data to make sure all injections are accounted for.
if number_of_injections_read != self.number_of_injections:
logger.warning("Number of injections read (%d) is not equal to number of injections declared (%d)." % (number_of_injections_read, self.number_of_injections) +
"This is usually a sign that the experimental run was terminated prematurely." +
"The analysis will not include the final %d injections declared." % (self.number_of_injections - number_of_injections_read))
# Remove extra injections.
self.injections = self.injections[0:number_of_injections_read]
self.number_of_injections = number_of_injections_read
logger.debug("self.injections has %d elements" % (len(self.injections)))
# Annotate list of injections.
for injection in self.injections:
injection_number = injection.number
logger.debug("%5d %8d" % (injection_number, injection_labels[injection_number]))
injection.first_index = injection_labels[injection_number]
if injection_number < len(injection_labels) - 1:
injection.last_index = injection_labels[
injection_number + 1] - 1
else:
injection.last_index = nmeasurements - 1
# Fit baseline.
self.fit_gaussian_process_baseline()
# Integrate heat evolved from each injection.
self.integrate_heat()
return
def write_power(self, filename):
"""
DEBUG: Write power.
"""
outfile = open(filename, 'w')
outfile.write("%%%7s %16s %16s\n" % ('time (s)', 'heat (ucal/s)', 'temperature (K)'))
for index in range(len(self.filter_period_end_time)):
outfile.write("%8.1f %16.8f %16.8f\n" % (self.filter_period_end_time[index] / ureg.second,
self.differential_power[index] / (ureg.microcalorie / ureg.second),
self.cell_temperature[index] / ureg.kelvin
)
)
outfile.close()
return
@staticmethod
def _plot_confidence_interval(axes, full_x, sigma, y_pred):
# Confidence interval
axes.fill(numpy.concatenate([full_x, full_x[::-1]]),
numpy.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]
]),
alpha=.7, fc='black', ec='None', label='95% confidence interval')
def _plot_gaussian_baseline(self, full_x, full_y, sigma, x, y, y_pred):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Adds a 95% confidence interval to the plot
ExperimentMicroCal._plot_confidence_interval(axes, full_x, sigma, y_pred)
# Entire set of data
axes.plot(full_x, full_y, 'o', markersize=2, lw=1, color='deepskyblue', alpha=.5, label='Raw data')
# Points for fit
axes.plot(x, y, 'o', color='crimson', markersize=2, alpha=.8, label='Fitted data')
# Prediction
axes.plot(full_x, y_pred, 'o', markersize=1, mec='w', mew=1, color='k', alpha=.5, label='Predicted baseline')
# Plot injection time markers.
[ymin, ymax] = axes.get_ybound()
for injection in self.injections:
# timepoint at start of syringe injection
last_index = injection.first_index
t = self.filter_period_end_time[last_index] / ureg.second
axes.plot([t, t], [ymin, ymax], '-', color='crimson')
# Adjust axis to zoom in on baseline.
ymax = self.baseline_power.max() / (ureg.microcalorie / ureg.second)
ymin = self.baseline_power.min() / (ureg.microcalorie / ureg.second)
width = ymax - ymin
ymax += width / 2
ymin -= width / 2
axes.set_ybound(ymin, ymax)
axes.set_xlabel('time (s)')
axes.set_ylabel(r'differential power ($\mu$cal / s)')
axes.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1), ncol=4, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
axes.set_title(self.data_filename)
canvas.print_figure(self.name + '-baseline.png', dpi=500)
def _plot_baseline_subtracted(self, x, y, raw=True, baseline=True):
"""Plot the baseline-subtracted data"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes1 = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Points for fit
axes1.plot(x, y, 'o', color='deepskyblue', markersize=2, alpha=1, label='Baseline-subtracted data')
axes1.set_xlabel('time (s)')
axes1.set_ylabel(r' corr. differential power ($\mu$cal / s)')
axes1.legend(loc='upper center', bbox_to_anchor=(0.2, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if raw:
axes2 = axes1.twinx()
axes2.plot(x, self.differential_power, 'o', color='gray', markersize=2, alpha=.3, label='Raw data')
axes2.set_ylabel(r'raw differential power ($\mu$cal / s)')
axes2.legend(loc='upper center', bbox_to_anchor=(0.8, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if baseline:
axes2.plot(x, self.baseline_power, '-', color='black', alpha=.3, label='baseline')
axes1.set_title(self.data_filename)
canvas.print_figure(self.name + '-subtracted.png', dpi=500)
def _retrieve_fit_indices(self, frac):
"""Form list of data to fit.
"""
x = list()
y = list()
fit_indices = list()
# Add data prior to first injection
for index in range(0, self.injections[0].first_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
# Add last x% of each injection.
for injection in self.injections:
start_index = injection.first_index
end_index = injection.last_index + 1
start_index = end_index - int((end_index - start_index) * frac)
for index in range(start_index, end_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
x = numpy.array(x)
y = numpy.array(y)
fit_indices = numpy.array(fit_indices)
return fit_indices, x, y
def fit_gaussian_process_baseline(self, frac=0.3, theta0=4.7, nugget=1.0, plot=True):
"""
Gaussian Process fit of baseline.
frac = fraction of baseline to use for fit
:return:
:rtype:
"""
from sklearn import gaussian_process
# Retrieve a reduced set of data
# (data up until first injection and x percent before every injection)
fit_indices, x, y = self._retrieve_fit_indices(frac)
# sklearn requires a 2d array, so make it pseudo 2d
full_x = numpy.atleast_2d(self.filter_period_end_time).T
x = numpy.atleast_2d(x).T
full_y = numpy.array(self.differential_power).T
y = numpy.array(y).T
gp = gaussian_process.GaussianProcess(regr='quadratic',
corr='squared_exponential',
theta0=theta0,
nugget=nugget,
random_start=100)
# Fit only based on the reduced set of the data
gp.fit(x, y)
y_pred, mean_squared_error = gp.predict(full_x, eval_MSE=True)
sigma = numpy.sqrt(mean_squared_error)
self.baseline_power = Quantity(y_pred, 'microcalories per second')
self.baseline_fit_data = {'x': full_x, 'y': y_pred, 'indices': fit_indices}
self.baseline_subtracted = self.differential_power - self.baseline_power
if plot:
self._plot_gaussian_baseline(full_x, full_y, sigma, x, y, y_pred)
self._plot_baseline_subtracted(full_x, self.baseline_subtracted)
def integrate_heat(self):
"""
Compute the heat evolved from each injection from differental power timeseries data.
"""
# Integrate heat produced by each injection.
for injection in self.injections:
# determine initial and final samples for injection i
# index of timepoint for first filtered differential power measurement
first_index = injection.first_index
# index of timepoint for last filtered differential power measurement
last_index = injection.last_index
# Determine excess energy input into sample cell (with respect to reference cell) throughout this injection and measurement period.
excess_energy_input = injection.filter_period * (
self.differential_power[
first_index:(last_index + 1)] - self.baseline_power[
first_index:(last_index + 1)]).sum()
logger.debug("injection %d, filter period %f s, integrating sample %d to %d" % (
injection.number,
injection.filter_period / ureg.second,
first_index,
last_index))
# Determine total heat evolved.
evolved_heat = - excess_energy_input
# Store heat evolved from this injection.
injection.evolved_heat = evolved_heat
return
class ExperimentYaml(BaseExperiment):
@staticmethod
def _parse_yaml(yaml_filename):
"""Open the yaml file and read is contents"""
import yaml
with open(yaml_filename, 'r') as infile:
# Experiment parameters
yaml_input = yaml.load(infile)
infile.close()
return yaml_input
def __init__(self, yaml_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentYaml, self).__init__(yaml_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = dict()
self.syringe_concentration = dict()
# concentrations of various species in sample cell
self.sample_cell_contents = dict()
self.cell_concentration = dict()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
self.name = experiment_name
# Store the datafile filename.
self.data_filename = yaml_filename
# Check to make sure we can access the file.
if not os.access(yaml_filename, os.R_OK):
raise IOError("The file '%s' cannot be opened." % yaml_filename)
yaml_input = self._parse_yaml(yaml_filename)
# TODO more preliminary dict entry validations
if len(yaml_input['injection_heats']) != len(yaml_input['injection_volumes']):
raise ValueError('Mismatch between number of heats and volumes per injection in %s.' % yaml_filename)
# Extract and store data about the experiment.
self.number_of_injections = len(yaml_input['injection_heats'])
self.temperature = Quantity(yaml_input['temperature'],
yaml_input['temperature_unit'])
# Store the stated syringe concentration(s)
for key in yaml_input['syringe_concentrations'].keys():
self.syringe_concentration[key] = Quantity(yaml_input['syringe_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Store the stated cell concentration(s)
for key in yaml_input['sample_cell_concentrations'].keys():
self.cell_concentration[key] = Quantity(yaml_input['sample_cell_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Extract and store metadata about injections.
for index, (heat, volume) in enumerate(zip(yaml_input['injection_heats'], yaml_input['injection_volumes']), start=1):
# Extract data for injection and apply appropriate unit conversions.
# Entering 0.0 for any values not in the yaml.
# TODO some values are set in integrate_heat functions, but we
# currently ignore all but the heat
injectiondict = dict()
injectiondict['number'] = index
injectiondict['volume'] = Quantity(volume, yaml_input['volume_unit'])
injectiondict['duration'] = 0.0 * ureg.second
# time between beginning of injection and beginning of next
# injection
injectiondict['spacing'] = 0.0 * ureg.second
# time over which data channel is averaged to produce a single
# measurement
injectiondict['filter_period'] = 0.0 * ureg.second
# Possible input includes heat / moles of injectant, or raw heat
injectiondict['titrant_amount'] = sum(
self.syringe_concentration.values()) * Quantity(volume, yaml_input['volume_unit'])
try:
injectiondict['evolved_heat'] = Quantity(heat, yaml_input['heat_unit']).to('microcalorie')
except DimensionalityError:
# TODO This is probably only really correct for one syringe component
# Multipy by number of moles injected
evolved_heat = Quantity(heat, yaml_input['heat_unit']) * (Quantity(volume, yaml_input['volume_unit']) * sum(self.syringe_concentration.values()))
injectiondict['evolved_heat'] = evolved_heat.to('microcalorie')
# Store injection.
self.injections.append(Injection(**injectiondict))
self.observed_injection_heats = Quantity(numpy.zeros(len(self.injections)), 'microcalorie')
self.injection_volumes = Quantity(numpy.zeros(len(self.injections)), 'milliliter')
for index, injection in enumerate(self.injections):
self.observed_injection_heats[index] = injection.evolved_heat
self.injection_volumes[index] = injection.volume
return
class ExperimentOrigin(BaseExperiment):
pass
|
MehtapIsik/bayesian-itc
|
bitc/experiments.py
|
Python
|
gpl-3.0
| 36,275
|
[
"Gaussian"
] |
da6ca78cf8da2254630e5eaa4a820e523bf061dde68993f58a234a94ea363b8c
|
'''
Helper to perform MMD tests.
Assumes you have the feature/bigtest branch of shogun installed (including the
modular Python bindings).
'''
from __future__ import division, print_function
import os
import numpy as np
from scipy import linalg, stats
try:
import modshogun as sg
except ImportError: # new versions just call it shogun
import shogun as sg
if 'OMP_NUM_THREADS' in os.environ:
num_threads = int(os.environ['OMP_NUM_THREADS'])
else:
import multiprocessing as mp
num_threads = mp.cpu_count()
sg.get_global_parallel().set_num_threads(num_threads)
def rbf_mmd_test(X, Y, bandwidth='median', null_samples=1000,
median_samples=1000, cache_size=32):
'''
Run an MMD test using a Gaussian kernel.
Parameters
----------
X : row-instance feature array
Y : row-instance feature array
bandwidth : float or 'median'
The bandwidth of the RBF kernel (sigma).
If 'median', estimates the median pairwise distance in the
aggregate sample and uses that.
null_samples : int
How many times to sample from the null distribution.
median_samples : int
How many points to use for estimating the bandwidth.
Returns
-------
p_val : float
The obtained p value of the test.
stat : float
The test statistic.
null_samples : array of length null_samples
The samples from the null distribution.
bandwidth : float
The used kernel bandwidth
'''
if bandwidth == 'median':
from sklearn.metrics.pairwise import euclidean_distances
sub = lambda feats, n: feats[np.random.choice(
feats.shape[0], min(feats.shape[0], n), replace=False)]
Z = np.r_[sub(X, median_samples // 2), sub(Y, median_samples // 2)]
D2 = euclidean_distances(Z, squared=True)
upper = D2[np.triu_indices_from(D2, k=1)]
kernel_width = np.median(upper, overwrite_input=True)
bandwidth = np.sqrt(kernel_width / 2)
# sigma = median / sqrt(2); works better, sometimes at least
del Z, D2, upper
else:
kernel_width = 2 * bandwidth**2
mmd = sg.QuadraticTimeMMD()
mmd.set_p(sg.RealFeatures(X.T.astype(np.float64)))
mmd.set_q(sg.RealFeatures(Y.T.astype(np.float64)))
mmd.set_kernel(sg.GaussianKernel(cache_size, kernel_width))
mmd.set_num_null_samples(null_samples)
samps = mmd.sample_null()
stat = mmd.compute_statistic()
p_val = np.mean(stat <= samps)
return p_val, stat, samps, bandwidth
def linear_mmd_test(X, Y, null_samples=1000):
mmd = sg.QuadraticTimeMMD()
mmd.set_p(sg.RealFeatures(X.T.astype(np.float64)))
mmd.set_q(sg.RealFeatures(Y.T.astype(np.float64)))
mmd.set_kernel(sg.LinearKernel())
mmd.set_num_null_samples(null_samples)
samps = mmd.sample_null()
stat = mmd.compute_statistic()
p_val = np.mean(stat <= samps)
return p_val, stat, samps
def linear_hotelling_test(X, Y, reg=0):
n, p = X.shape
Z = X - Y
Z_bar = Z.mean(axis=0)
Z -= Z_bar
S = Z.T.dot(Z)
S /= (n - 1)
if reg:
S[::p + 1] += reg
# z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
L = linalg.cholesky(S, lower=True, overwrite_a=True)
Linv_Z_bar = linalg.solve_triangular(L, Z_bar, lower=True, overwrite_b=True)
stat = n * Linv_Z_bar.dot(Linv_Z_bar)
p_val = stats.chi2.sf(stat, p)
return p_val, stat
|
dougalsutherland/opt-mmd
|
two_sample/mmd_test.py
|
Python
|
bsd-3-clause
| 3,458
|
[
"Gaussian"
] |
8c71c8972d2e9cc5a0f5ba77a219ac52d49b55da4cbfde6e3e1cc71a693899f0
|
from __future__ import unicode_literals
import logging
import warnings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Count, Q
from django.utils import six, timezone
from django.utils.translation import ugettext_lazy as _
from djblets.cache.backend import make_cache_key
from djblets.db.fields import CounterField, ModificationTimestampField
from djblets.db.query import get_object_or_none
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.models import DiffSet, DiffSetHistory
from reviewboard.reviews.errors import (PermissionError,
PublishError)
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.reviews.managers import ReviewRequestManager
from reviewboard.reviews.models.base_comment import BaseComment
from reviewboard.reviews.models.base_review_request_details import \
BaseReviewRequestDetails
from reviewboard.reviews.models.group import Group
from reviewboard.reviews.models.screenshot import Screenshot
from reviewboard.reviews.signals import (review_request_closed,
review_request_closing,
review_request_published,
review_request_publishing,
review_request_reopened,
review_request_reopening)
from reviewboard.scmtools.models import Repository
from reviewboard.signals import deprecated_signal_argument
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
def fetch_issue_counts(review_request, extra_query=None):
"""Fetches all issue counts for a review request.
This queries all opened issues across all public comments on a
review request and returns them.
"""
issue_counts = {
BaseComment.OPEN: 0,
BaseComment.RESOLVED: 0,
BaseComment.DROPPED: 0
}
q = Q(public=True) & Q(base_reply_to__isnull=True)
if extra_query:
q = q & extra_query
issue_statuses = review_request.reviews.filter(q).values(
'comments__pk',
'comments__issue_opened',
'comments__issue_status',
'file_attachment_comments__pk',
'file_attachment_comments__issue_opened',
'file_attachment_comments__issue_status',
'general_comments__pk',
'general_comments__issue_opened',
'general_comments__issue_status',
'screenshot_comments__pk',
'screenshot_comments__issue_opened',
'screenshot_comments__issue_status')
if issue_statuses:
comment_fields = {
'comments': set(),
'file_attachment_comments': set(),
'general_comments': set(),
'screenshot_comments': set(),
}
for issue_fields in issue_statuses:
for key, comments in six.iteritems(comment_fields):
issue_opened = issue_fields[key + '__issue_opened']
comment_pk = issue_fields[key + '__pk']
if issue_opened and comment_pk not in comments:
comments.add(comment_pk)
issue_status = issue_fields[key + '__issue_status']
if issue_status:
issue_counts[issue_status] += 1
logging.debug('Calculated issue counts for review request ID %s '
'across %s review(s): Resulting counts = %r; '
'DB values = %r; Field IDs = %r',
review_request.pk, len(issue_statuses), issue_counts,
issue_statuses, comment_fields)
return issue_counts
def _initialize_issue_counts(review_request):
"""Initializes the issue counter fields for a review request.
This will fetch all the issue counts and populate the counter fields.
Due to the way that CounterField works, this will only be called once
per review request, instead of once per field, due to all the fields
being set at once. This will also take care of the actual saving of
fields, rather than leaving that up to CounterField, in order to save
all at once,
"""
if review_request.pk is None:
return 0
issue_counts = fetch_issue_counts(review_request)
review_request.issue_open_count = issue_counts[BaseComment.OPEN]
review_request.issue_resolved_count = issue_counts[BaseComment.RESOLVED]
review_request.issue_dropped_count = issue_counts[BaseComment.DROPPED]
review_request.save(update_fields=[
'issue_open_count',
'issue_resolved_count',
'issue_dropped_count'
])
# Tell CounterField not to set or save any values.
return None
class ReviewRequest(BaseReviewRequestDetails):
"""A review request.
This is one of the primary models in Review Board. Most everything
is associated with a review request.
The ReviewRequest model contains detailed information on a review
request. Some fields are user-modifiable, while some are used for
internal state.
"""
PENDING_REVIEW = "P"
SUBMITTED = "S"
DISCARDED = "D"
STATUSES = (
(PENDING_REVIEW, _('Pending Review')),
(SUBMITTED, _('Submitted')),
(DISCARDED, _('Discarded')),
)
ISSUE_COUNTER_FIELDS = {
BaseComment.OPEN: 'issue_open_count',
BaseComment.RESOLVED: 'issue_resolved_count',
BaseComment.DROPPED: 'issue_dropped_count',
}
summary = models.CharField(
_("summary"),
max_length=BaseReviewRequestDetails.MAX_SUMMARY_LENGTH)
submitter = models.ForeignKey(User, verbose_name=_("submitter"),
related_name="review_requests")
time_added = models.DateTimeField(_("time added"), default=timezone.now)
last_updated = ModificationTimestampField(_("last updated"))
status = models.CharField(_("status"), max_length=1, choices=STATUSES,
db_index=True)
public = models.BooleanField(_("public"), default=False)
changenum = models.PositiveIntegerField(_("change number"), blank=True,
null=True, db_index=True)
repository = models.ForeignKey(Repository,
related_name="review_requests",
verbose_name=_("repository"),
null=True,
blank=True)
email_message_id = models.CharField(_("e-mail message ID"), max_length=255,
blank=True, null=True)
time_emailed = models.DateTimeField(_("time e-mailed"), null=True,
default=None, blank=True)
diffset_history = models.ForeignKey(DiffSetHistory,
related_name="review_request",
verbose_name=_('diff set history'),
blank=True)
target_groups = models.ManyToManyField(
Group,
related_name="review_requests",
verbose_name=_("target groups"),
blank=True)
target_people = models.ManyToManyField(
User,
verbose_name=_("target people"),
related_name="directed_review_requests",
blank=True)
screenshots = models.ManyToManyField(
Screenshot,
related_name="review_request",
verbose_name=_("screenshots"),
blank=True)
inactive_screenshots = models.ManyToManyField(
Screenshot,
verbose_name=_("inactive screenshots"),
help_text=_("A list of screenshots that used to be but are no "
"longer associated with this review request."),
related_name="inactive_review_request",
blank=True)
file_attachments = models.ManyToManyField(
FileAttachment,
related_name="review_request",
verbose_name=_("file attachments"),
blank=True)
inactive_file_attachments = models.ManyToManyField(
FileAttachment,
verbose_name=_("inactive file attachments"),
help_text=_("A list of file attachments that used to be but are no "
"longer associated with this review request."),
related_name="inactive_review_request",
blank=True)
file_attachment_histories = models.ManyToManyField(
FileAttachmentHistory,
related_name='review_request',
verbose_name=_('file attachment histories'),
blank=True)
changedescs = models.ManyToManyField(
ChangeDescription,
verbose_name=_("change descriptions"),
related_name="review_request",
blank=True)
depends_on = models.ManyToManyField('ReviewRequest',
blank=True, null=True,
verbose_name=_('Dependencies'),
related_name='blocks')
# Review-related information
# The timestamp representing the last public activity of a review.
# This includes publishing reviews and manipulating issues.
last_review_activity_timestamp = models.DateTimeField(
_("last review activity timestamp"),
db_column='last_review_timestamp',
null=True,
default=None,
blank=True)
shipit_count = CounterField(_("ship-it count"), default=0)
issue_open_count = CounterField(
_('open issue count'),
initializer=_initialize_issue_counts)
issue_resolved_count = CounterField(
_('resolved issue count'),
initializer=_initialize_issue_counts)
issue_dropped_count = CounterField(
_('dropped issue count'),
initializer=_initialize_issue_counts)
local_site = models.ForeignKey(LocalSite, blank=True, null=True,
related_name='review_requests')
local_id = models.IntegerField('site-local ID', blank=True, null=True)
# Set this up with the ReviewRequestManager
objects = ReviewRequestManager()
@staticmethod
def status_to_string(status):
"""Return a string representation of a review request status.
Args:
status (unicode):
A single-character string representing the status.
Returns:
unicode:
A longer string representation of the status suitable for use in
the API.
"""
if status == ReviewRequest.PENDING_REVIEW:
return 'pending'
elif status == ReviewRequest.SUBMITTED:
return 'submitted'
elif status == ReviewRequest.DISCARDED:
return 'discarded'
elif status is None:
return 'all'
else:
raise ValueError('Invalid status "%s"' % status)
@staticmethod
def string_to_status(status):
"""Return a review request status from an API string.
Args:
status (unicode):
A string from the API representing the status.
Returns:
unicode:
A single-character string representing the status, suitable for
storage in the ``status`` field.
"""
if status == 'pending':
return ReviewRequest.PENDING_REVIEW
elif status == 'submitted':
return ReviewRequest.SUBMITTED
elif status == 'discarded':
return ReviewRequest.DISCARDED
elif status == 'all':
return None
else:
raise ValueError('Invalid status string "%s"' % status)
def get_commit(self):
if self.commit_id is not None:
return self.commit_id
elif self.changenum is not None:
self.commit_id = six.text_type(self.changenum)
# Update the state in the database, but don't save this
# model, or we can end up with some state (if we haven't
# properly loaded everything yet). This affects docs.db
# generation, and may cause problems in the wild.
ReviewRequest.objects.filter(pk=self.pk).update(
commit_id=six.text_type(self.changenum))
return self.commit_id
return None
def set_commit(self, commit_id):
try:
self.changenum = int(commit_id)
except (TypeError, ValueError):
pass
self.commit_id = commit_id
commit = property(get_commit, set_commit)
@property
def approved(self):
"""Returns whether or not a review request is approved by reviewers.
On a default installation, a review request is approved if it has
at least one Ship It!, and doesn't have any open issues.
Extensions may customize approval by providing their own
ReviewRequestApprovalHook.
"""
if not hasattr(self, '_approved'):
self._calculate_approval()
return self._approved
@property
def approval_failure(self):
"""Returns the error indicating why a review request isn't approved.
If ``approved`` is ``False``, this will provide the text describing
why it wasn't approved.
Extensions may customize approval by providing their own
ReviewRequestApprovalHook.
"""
if not hasattr(self, '_approval_failure'):
self._calculate_approval()
return self._approval_failure
def get_participants(self):
"""Returns a list of users who have discussed this review request."""
# See the comment in Review.get_participants for this list
# comprehension.
return [u for review in self.reviews.all()
for u in review.participants]
participants = property(get_participants)
def get_new_reviews(self, user):
"""Returns all new reviews since last viewing this review request.
This will factor in the time the user last visited the review request,
and find any reviews that have been added or updated since.
"""
if user.is_authenticated():
# If this ReviewRequest was queried using with_counts=True,
# then we should know the new review count and can use this to
# decide whether we have anything at all to show.
if hasattr(self, "new_review_count") and self.new_review_count > 0:
query = self.visits.filter(user=user)
try:
visit = query[0]
return self.reviews.filter(
public=True,
timestamp__gt=visit.timestamp).exclude(user=user)
except IndexError:
# This visit doesn't exist, so bail.
pass
return self.reviews.get_empty_query_set()
def get_display_id(self):
"""Returns the ID that should be exposed to the user."""
if self.local_site_id:
return self.local_id
else:
return self.id
display_id = property(get_display_id)
def get_public_reviews(self):
"""Returns all public top-level reviews for this review request."""
return self.reviews.filter(public=True, base_reply_to__isnull=True)
def is_accessible_by(self, user, local_site=None, request=None,
silent=False):
"""Returns whether or not the user can read this review request.
This performs several checks to ensure that the user has access.
This user has access if:
* The review request is public or the user can modify it (either
by being an owner or having special permissions).
* The repository is public or the user has access to it (either by
being explicitly on the allowed users list, or by being a member
of a review group on that list).
* The user is listed as a requested reviewer or the user has access
to one or more groups listed as requested reviewers (either by
being a member of an invite-only group, or the group being public).
"""
# Users always have access to their own review requests.
if self.submitter == user:
return True
if not self.public and not self.is_mutable_by(user):
if not silent:
logging.warning('Review Request pk=%d (display_id=%d) is not '
'accessible by user %s because it has not yet '
'been published.',
self.pk, self.display_id, user,
request=request)
return False
if self.repository and not self.repository.is_accessible_by(user):
if not silent:
logging.warning('Review Request pk=%d (display_id=%d) is not '
'accessible by user %s because its repository '
'is not accessible by that user.',
self.pk, self.display_id, user,
request=request)
return False
if local_site and not local_site.is_accessible_by(user):
if not silent:
logging.warning('Review Request pk=%d (display_id=%d) is not '
'accessible by user %s because its local_site '
'is not accessible by that user.',
self.pk, self.display_id, user,
request=request)
return False
if (user.is_authenticated() and
self.target_people.filter(pk=user.pk).count() > 0):
return True
groups = list(self.target_groups.all())
if not groups:
return True
# We specifically iterate over these instead of making it part
# of the query in order to keep the logic in Group, and to allow
# for future expansion (extensions, more advanced policy)
#
# We're looking for at least one group that the user has access
# to. If they can access any of the groups, then they have access
# to the review request.
for group in groups:
if group.is_accessible_by(user, silent=silent):
return True
if not silent:
logging.warning('Review Request pk=%d (display_id=%d) is not '
'accessible by user %s because they are not '
'directly listed as a reviewer, and none of '
'the target groups are accessible by that user.',
self.pk, self.display_id, user, request=request)
return False
def is_mutable_by(self, user):
"""Returns whether the user can modify this review request."""
return (self.submitter == user or
user.has_perm('reviews.can_edit_reviewrequest',
self.local_site))
def is_status_mutable_by(self, user):
"""Returns whether the user can modify this review request's status."""
return (self.submitter == user or
user.has_perm('reviews.can_change_status', self.local_site))
def is_deletable_by(self, user):
"""Returns whether the user can delete this review request."""
return user.has_perm('reviews.delete_reviewrequest')
def get_draft(self, user=None):
"""Returns the draft of the review request.
If a user is specified, than the draft will be returned only if owned
by the user. Otherwise, None will be returned.
"""
if not user:
return get_object_or_none(self.draft)
elif user.is_authenticated():
return get_object_or_none(self.draft,
review_request__submitter=user)
return None
def get_pending_review(self, user):
"""Returns the pending review owned by the specified user, if any.
This will return an actual review, not a reply to a review.
"""
from reviewboard.reviews.models.review import Review
return Review.objects.get_pending_review(self, user)
def get_last_activity(self, diffsets=None, reviews=None):
"""Returns the last public activity information on the review request.
This will return the last object updated, along with the timestamp
of that object. It can be used to judge whether something on a
review request has been made public more recently.
"""
timestamp = self.last_updated
updated_object = self
# Check if the diff was updated along with this.
if not diffsets and self.repository_id:
latest_diffset = self.get_latest_diffset()
diffsets = []
if latest_diffset:
diffsets.append(latest_diffset)
if diffsets:
for diffset in diffsets:
if diffset.timestamp >= timestamp:
timestamp = diffset.timestamp
updated_object = diffset
# Check for the latest review or reply.
if not reviews:
try:
reviews = [self.reviews.filter(public=True).latest()]
except ObjectDoesNotExist:
reviews = []
for review in reviews:
if review.public and review.timestamp >= timestamp:
timestamp = review.timestamp
updated_object = review
return timestamp, updated_object
def changeset_is_pending(self, commit_id):
"""Returns whether the associated changeset is pending commit.
For repositories that support it, this will return whether the
associated changeset is pending commit. This requires server-side
knowledge of the change.
"""
cache_key = make_cache_key(
'commit-id-is-pending-%d-%s' % (self.pk, commit_id))
cached_values = cache.get(cache_key)
if cached_values:
return cached_values
is_pending = False
scmtool = self.repository.get_scmtool()
if (scmtool.supports_pending_changesets and
commit_id is not None):
changeset = scmtool.get_changeset(commit_id, allow_empty=True)
if changeset:
is_pending = changeset.pending
new_commit_id = six.text_type(changeset.changenum)
if commit_id != new_commit_id:
self.commit_id = new_commit_id
self.save(update_fields=['commit_id'])
commit_id = new_commit_id
draft = self.get_draft()
if draft:
draft.commit_id = new_commit_id
draft.save(update_fields=['commit_id'])
# If the changeset is pending, we cache for only one minute to
# speed things up a little bit when navigating through
# different pages. If the changeset is no longer pending, cache
# for the full default time.
if is_pending:
cache.set(cache_key, (is_pending, commit_id), 60)
else:
cache.set(cache_key, (is_pending, commit_id))
return is_pending, commit_id
def get_absolute_url(self):
if self.local_site:
local_site_name = self.local_site.name
else:
local_site_name = None
return local_site_reverse(
'review-request-detail',
local_site_name=local_site_name,
kwargs={'review_request_id': self.display_id})
def get_diffsets(self):
"""Returns a list of all diffsets on this review request.
This will also fetch all associated FileDiffs, as well as a count
of the number of files (stored in DiffSet.file_count).
"""
if not self.repository_id:
return []
if not hasattr(self, '_diffsets'):
self._diffsets = list(
DiffSet.objects
.filter(history__pk=self.diffset_history_id)
.annotate(file_count=Count('files'))
.prefetch_related('files'))
return self._diffsets
def get_latest_diffset(self):
"""Returns the latest diffset for this review request."""
try:
return DiffSet.objects.filter(
history=self.diffset_history_id).latest()
except DiffSet.DoesNotExist:
return None
def get_close_description(self):
"""Returns a tuple (description, is_rich_text) for the close text.
This is a helper which is used to gather the data which is rendered in
the close description boxes on various pages.
"""
# We're fetching all entries instead of just public ones because
# another query may have already prefetched the list of
# changedescs. In this case, a new filter() would result in more
# queries.
#
# Realistically, there will only ever be at most a single
# non-public change description (the current draft), so we
# wouldn't be saving much of anything with a filter.
changedescs = list(self.changedescs.all())
latest_changedesc = None
for changedesc in changedescs:
if changedesc.public:
latest_changedesc = changedesc
break
close_description = ''
is_rich_text = False
if latest_changedesc and 'status' in latest_changedesc.fields_changed:
status = latest_changedesc.fields_changed['status']['new'][0]
if status in (ReviewRequest.DISCARDED, ReviewRequest.SUBMITTED):
close_description = latest_changedesc.text
is_rich_text = latest_changedesc.rich_text
return (close_description, is_rich_text)
def get_blocks(self):
"""Returns the list of review request this one blocks.
The returned value will be cached for future lookups.
"""
if not hasattr(self, '_blocks'):
self._blocks = list(self.blocks.all())
return self._blocks
def save(self, update_counts=False, old_submitter=None, **kwargs):
if update_counts or self.id is None:
self._update_counts(old_submitter)
if self.status != self.PENDING_REVIEW:
# If this is not a pending review request now, delete any
# and all ReviewRequestVisit objects.
self.visits.all().delete()
super(ReviewRequest, self).save(**kwargs)
def delete(self, **kwargs):
from reviewboard.accounts.models import Profile, LocalSiteProfile
profile, profile_is_new = \
Profile.objects.get_or_create(user=self.submitter)
if profile_is_new:
profile.save()
local_site = self.local_site
site_profile, site_profile_is_new = \
LocalSiteProfile.objects.get_or_create(user=self.submitter,
profile=profile,
local_site=local_site)
site_profile.decrement_total_outgoing_request_count()
if self.status == self.PENDING_REVIEW:
site_profile.decrement_pending_outgoing_request_count()
if self.public:
self._decrement_reviewer_counts()
super(ReviewRequest, self).delete(**kwargs)
def can_publish(self):
return not self.public or get_object_or_none(self.draft) is not None
def close(self, close_type=None, user=None, description=None,
rich_text=False, **kwargs):
"""Closes the review request.
Args:
close_type (unicode):
How the close occurs. This should be one of
:py:attr:`SUBMITTED` or :py:attr:`DISCARDED`.
user (django.contrib.auth.models.User):
The user who is closing the review request.
description (unicode):
An optional description that indicates why the review request
was closed.
rich_text (bool):
Indicates whether or not that the description is rich text.
Raises:
ValueError:
The provided close type is not a valid value.
PermissionError:
The user does not have permission to close the review request.
TypeError:
Keyword arguments were supplied to the function.
.. versionchanged:: 3.0
The ``type`` argument is deprecated: ``close_type`` should be used
instead.
This method raises :py:exc:`ValueError` instead of
:py:exc:`AttributeError` when the ``close_type`` has an incorrect
value.
"""
if close_type is None:
try:
close_type = kwargs.pop('type')
except KeyError:
raise AttributeError('close_type must be provided')
warnings.warn(
'The "type" argument was deprecated in Review Board 3.0 and '
'will be removed in a future version. Use "close_type" '
'instead.'
)
if kwargs:
raise TypeError('close() does not accept keyword arguments.')
if (user and not self.is_mutable_by(user) and
not user.has_perm("reviews.can_change_status", self.local_site)):
raise PermissionError
if close_type not in [self.SUBMITTED, self.DISCARDED]:
raise ValueError("%s is not a valid close type" % type)
review_request_closing.send(
sender=type(self),
user=user,
review_request=self,
close_type=close_type,
type=deprecated_signal_argument(
signal_name='review_request_closing',
old_name='type',
new_name='close_type',
value=close_type),
description=description,
rich_text=rich_text)
draft = get_object_or_none(self.draft)
if self.status != close_type:
if (draft is not None and
not self.public and close_type == self.DISCARDED):
# Copy over the draft information if this is a private discard.
draft.copy_fields_to_request(self)
# TODO: Use the user's default for rich_text.
changedesc = ChangeDescription(public=True,
text=description or "",
rich_text=rich_text or False,
user=user or self.submitter)
status_field = get_review_request_field('status')(self)
status_field.record_change_entry(changedesc, self.status,
close_type)
changedesc.save()
self.changedescs.add(changedesc)
if close_type == self.SUBMITTED:
if not self.public:
raise PublishError("The draft must be public first.")
else:
self.commit_id = None
self.status = close_type
self.save(update_counts=True)
review_request_closed.send(
sender=type(self),
user=user,
review_request=self,
close_type=close_type,
type=deprecated_signal_argument(
signal_name='review_request_closed',
old_name='type',
new_name='close_type',
value=close_type),
description=description,
rich_text=rich_text)
else:
# Update submission description.
changedesc = self.changedescs.filter(public=True).latest()
changedesc.timestamp = timezone.now()
changedesc.text = description or ""
changedesc.rich_text = rich_text
changedesc.save()
# Needed to renew last-update.
self.save()
# Delete the associated draft review request.
if draft is not None:
draft.delete()
def reopen(self, user=None):
"""Reopens the review request for review."""
from reviewboard.reviews.models.review_request_draft import \
ReviewRequestDraft
if (user and not self.is_mutable_by(user) and
not user.has_perm("reviews.can_change_status", self.local_site)):
raise PermissionError
old_status = self.status
old_public = self.public
if old_status != self.PENDING_REVIEW:
# The reopening signal is only fired when actually making a status
# change since the main consumers (extensions) probably only care
# about changes.
review_request_reopening.send(sender=self.__class__,
user=user,
review_request=self)
changedesc = ChangeDescription(user=user or self.submitter)
status_field = get_review_request_field('status')(self)
status_field.record_change_entry(changedesc, old_status,
self.PENDING_REVIEW)
if old_status == self.DISCARDED:
# A draft is needed if reopening a discarded review request.
self.public = False
changedesc.save()
draft = ReviewRequestDraft.create(self)
draft.changedesc = changedesc
draft.save()
else:
changedesc.public = True
changedesc.save()
self.changedescs.add(changedesc)
self.status = self.PENDING_REVIEW
self.save(update_counts=True)
review_request_reopened.send(sender=self.__class__, user=user,
review_request=self,
old_status=old_status,
old_public=old_public)
def publish(self, user, trivial=False):
"""Publishes the current draft attached to this review request.
The review request will be mark as public, and signals will be
emitted for any listeners.
"""
if not self.is_mutable_by(user):
raise PermissionError
draft = get_object_or_none(self.draft)
old_submitter = self.submitter
review_request_publishing.send(sender=self.__class__, user=user,
review_request_draft=draft)
# Decrement the counts on everything. we lose them.
# We'll increment the resulting set during ReviewRequest.save.
# This should be done before the draft is published.
# Once the draft is published, the target people
# and groups will be updated with new values.
# Decrement should not happen while publishing
# a new request or a discarded request
if self.public:
self._decrement_reviewer_counts()
if draft is not None:
# This will in turn save the review request, so we'll be done.
try:
changes = draft.publish(self, send_notification=False,
user=user)
except Exception:
# The draft failed to publish, for one reason or another.
# Check if we need to re-increment those counters we
# previously decremented.
if self.public:
self._increment_reviewer_counts()
raise
draft.delete()
else:
changes = None
if not self.public and self.changedescs.count() == 0:
# This is a brand new review request that we're publishing
# for the first time. Set the creation timestamp to now.
self.time_added = timezone.now()
self.public = True
self.save(update_counts=True, old_submitter=old_submitter)
review_request_published.send(sender=self.__class__, user=user,
review_request=self, trivial=trivial,
changedesc=changes)
def determine_user_for_changedesc(self, changedesc):
"""Determine the user associated with the change description.
Args:
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description.
Returns:
django.contrib.auth.models.User:
The user associated with the change description.
"""
if 'submitter' in changedesc.fields_changed:
entry = changedesc.fields_changed['submitter']['old'][0]
return User.objects.get(pk=entry[2])
user_pk = None
changes = (
self.changedescs
.filter(pk__lt=changedesc.pk)
.order_by('-pk')
)
for changedesc in changes:
if 'submitter' in changedesc.fields_changed:
user_pk = changedesc.fields_changed['submitter']['new'][0][2]
break
if user_pk:
return User.objects.get(pk=user_pk)
return self.submitter
def _update_counts(self, old_submitter):
from reviewboard.accounts.models import Profile, LocalSiteProfile
submitter_changed = (old_submitter is not None and
old_submitter != self.submitter)
profile, profile_is_new = \
Profile.objects.get_or_create(user=self.submitter)
if profile_is_new:
profile.save()
local_site = self.local_site
site_profile, site_profile_is_new = \
LocalSiteProfile.objects.get_or_create(
user=self.submitter,
profile=profile,
local_site=local_site)
if site_profile_is_new:
site_profile.save()
if self.id is None:
# This hasn't been created yet. Bump up the outgoing request
# count for the user.
site_profile.increment_total_outgoing_request_count()
old_status = None
old_public = False
else:
# We need to see if the status has changed, so that means
# finding out what's in the database.
r = ReviewRequest.objects.get(pk=self.id)
old_status = r.status
old_public = r.public
if submitter_changed:
if not site_profile_is_new:
site_profile.increment_total_outgoing_request_count()
if self.status == self.PENDING_REVIEW:
site_profile.increment_pending_outgoing_request_count()
try:
old_profile = LocalSiteProfile.objects.get(
user=old_submitter, local_site=local_site)
old_profile.decrement_total_outgoing_request_count()
if old_status == self.PENDING_REVIEW:
old_profile.decrement_pending_outgoing_request_count()
except LocalSiteProfile.DoesNotExist:
pass
if self.status == self.PENDING_REVIEW:
if old_status != self.status and not submitter_changed:
site_profile.increment_pending_outgoing_request_count()
if self.public and self.id is not None:
self._increment_reviewer_counts()
elif old_status == self.PENDING_REVIEW:
if old_status != self.status and not submitter_changed:
site_profile.decrement_pending_outgoing_request_count()
if old_public:
self._decrement_reviewer_counts()
def _increment_reviewer_counts(self):
from reviewboard.accounts.models import LocalSiteProfile
groups = self.target_groups.all()
people = self.target_people.all()
Group.incoming_request_count.increment(groups)
LocalSiteProfile.direct_incoming_request_count.increment(
LocalSiteProfile.objects.filter(user__in=people,
local_site=self.local_site))
LocalSiteProfile.total_incoming_request_count.increment(
LocalSiteProfile.objects.filter(
Q(local_site=self.local_site) &
Q(Q(user__review_groups__in=groups) |
Q(user__in=people))))
LocalSiteProfile.starred_public_request_count.increment(
LocalSiteProfile.objects.filter(
profile__starred_review_requests=self,
local_site=self.local_site))
def _decrement_reviewer_counts(self):
from reviewboard.accounts.models import LocalSiteProfile
groups = self.target_groups.all()
people = self.target_people.all()
Group.incoming_request_count.decrement(groups)
LocalSiteProfile.direct_incoming_request_count.decrement(
LocalSiteProfile.objects.filter(
user__in=people,
local_site=self.local_site))
LocalSiteProfile.total_incoming_request_count.decrement(
LocalSiteProfile.objects.filter(
Q(local_site=self.local_site) &
Q(Q(user__review_groups__in=groups) |
Q(user__in=people))))
LocalSiteProfile.starred_public_request_count.decrement(
LocalSiteProfile.objects.filter(
profile__starred_review_requests=self,
local_site=self.local_site))
def _calculate_approval(self):
"""Calculates the approval information for the review request."""
from reviewboard.extensions.hooks import ReviewRequestApprovalHook
approved = True
failure = None
if self.shipit_count == 0:
approved = False
failure = 'The review request has not been marked "Ship It!"'
elif self.issue_open_count > 0:
approved = False
failure = 'The review request has open issues.'
for hook in ReviewRequestApprovalHook.hooks:
try:
result = hook.is_approved(self, approved, failure)
if isinstance(result, tuple):
approved, failure = result
elif isinstance(result, bool):
approved = result
else:
raise ValueError('%r returned an invalid value %r from '
'is_approved'
% (hook, result))
if approved:
failure = None
except Exception as e:
extension = hook.extension
logging.error('Error when running ReviewRequestApprovalHook.'
'is_approved function in extension: "%s": %s',
extension.id, e, exc_info=1)
self._approval_failure = failure
self._approved = approved
def get_review_request(self):
"""Returns this review request.
This is provided so that consumers can be passed either a
ReviewRequest or a ReviewRequestDraft and retrieve the actual
ReviewRequest regardless of the object.
"""
return self
class Meta:
app_label = 'reviews'
db_table = 'reviews_reviewrequest'
ordering = ['-last_updated', 'submitter', 'summary']
unique_together = (('commit_id', 'repository'),
('changenum', 'repository'),
('local_site', 'local_id'))
permissions = (
("can_change_status", "Can change status"),
("can_submit_as_another_user", "Can submit as another user"),
("can_edit_reviewrequest", "Can edit review request"),
)
verbose_name = _('Review Request')
verbose_name_plural = _('Review Requests')
|
brennie/reviewboard
|
reviewboard/reviews/models/review_request.py
|
Python
|
mit
| 44,587
|
[
"VisIt"
] |
6296e94cd4225f58ece44f187fbe65bf7f2c7097d41a35658a9661b3d6d9ffea
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.