code
stringlengths
1
199k
import os import json from mod import get_unique_name, safe_json_load, TextFileFlusher from mod.settings import BANKS_JSON_FILE, LAST_STATE_JSON_FILE def list_banks(brokenpedalbundles = [], shouldSave = True): banks = safe_json_load(BANKS_JSON_FILE, list) if len(banks) == 0: return [] changed = False checkbroken = len(brokenpedalbundles) > 0 banknames = [] for bank in banks: # check for unique names ntitle = get_unique_name(bank['title'], banknames) if ntitle is not None: bank['title'] = ntitle changed = True banknames.append(bank['title']) # check for valid pedalboards validpedals = [] for pb in bank['pedalboards']: if 'bundle' not in pb.keys() or not pb['bundle']: title = pb['title'].encode("ascii", "ignore").decode("ascii") print("Auto-removing pedalboard '%s' from bank (missing bundle)" % title) changed = True continue if not os.path.exists(pb['bundle']): bundle = pb['bundle'].encode("ascii", "ignore").decode("ascii") print("ERROR in banks.py: referenced pedalboard does not exist:", bundle) changed = True continue if checkbroken and os.path.abspath(pb['bundle']) in brokenpedalbundles: title = pb['title'].encode("ascii", "ignore").decode("ascii") print("Auto-removing pedalboard '%s' from bank (it's broken)" % title) changed = True continue validpedals.append(pb) if len(validpedals) == 0: title = bank['title'].encode("ascii", "ignore").decode("ascii") print("NOTE: bank with name '%s' does not contain any pedalboards" % title) bank['pedalboards'] = validpedals if changed and shouldSave: save_banks(banks) return banks def save_banks(banks): with TextFileFlusher(BANKS_JSON_FILE) as fh: json.dump(banks, fh, indent=4) def save_last_bank_and_pedalboard(bank, pedalboard): if bank is None: return try: with TextFileFlusher(LAST_STATE_JSON_FILE) as fh: json.dump({ 'bank': bank-1, 'pedalboard': pedalboard }, fh) except OSError: return def get_last_bank_and_pedalboard(): data = safe_json_load(LAST_STATE_JSON_FILE, dict) keys = data.keys() if len(keys) == 0 or "bank" not in keys or "pedalboard" not in keys or not isinstance(data['bank'], int): print("last state file does not exist or is corrupt") return (-1, None) return (data['bank']+1, data['pedalboard']) def remove_pedalboard_from_banks(pedalboard): newbanks = [] banks = safe_json_load(BANKS_JSON_FILE, list) for bank in banks: newpedalboards = [] for oldpedalboard in bank['pedalboards']: if os.path.abspath(oldpedalboard['bundle']) != os.path.abspath(pedalboard): newpedalboards.append(oldpedalboard) # if there's no pedalboards left ignore this bank (ie, delete it) if len(newpedalboards) == 0: title = bank['title'].encode("ascii", "ignore").decode("ascii") print("NOTE: bank with name '%s' does not contain any pedalboards" % title) bank['pedalboards'] = newpedalboards newbanks.append(bank) save_banks(newbanks)
__author__ = 'Mathias' import timeit def coin_sum(amount, coins): if amount == 0 or len(coins) == 1: return 1 else: different_ways = 0 coins = sorted(coins) largest_coin = coins[-1] ways = amount // largest_coin for n in range(ways + 1): different_ways += coin_sum(amount - largest_coin * n, coins[:-1]) return different_ways start = timeit.default_timer() print(coin_sum(200, [1, 2, 5, 10, 20, 50, 100, 200])) print(timeit.default_timer() - start)
from trepan.processor.command.base_subcmd import DebuggerSubcommand from pprint import pformat class InfoReturn(DebuggerSubcommand): """return value Show the value that is to be returned from a function. This command is useful after a 'finish' command or stepping just after a 'return' statement.""" min_abbrev = 1 need_stack = True short_help = 'Show function return value' def run(self, args): # pdb checks to see if __return__ is the frame's f_locals which doesn't work # at least on any Python I am aware of back to 2.4. # Testing on the event however does work. if self.proc.event in ['return', 'exception']: val = self.proc.event_arg formatted_val = pformat(val) self.msg("return value (type %s):\n\t%s" % (type(val), formatted_val)) else: self.errmsg("Must be in a 'return' or 'exception' event " "rather than a %s event." % self.proc.event) pass return if __name__ == '__main__': from trepan.processor.command import mock, info as Minfo d, cp = mock.dbg_setup() i = Minfo.InfoCommand(cp) sub = InfoReturn(i) print(sub.run([])) pass
from __future__ import unicode_literals import frappe from frappe.model.document import Document import json from six import iteritems from frappe.utils import flt, getdate from erpnext.regional.india import state_numbers class GSTR3BReport(Document): def before_save(self): self.get_data() def get_data(self): self.report_dict = { "gstin": "", "ret_period": "", "inward_sup": { "isup_details": [ { "ty": "GST", "intra": 0, "inter": 0 }, { "ty": "NONGST", "inter": 0, "intra": 0 } ] }, "sup_details": { "osup_zero": { "csamt": 0, "txval": 0, "iamt": 0 }, "osup_nil_exmp": { "txval": 0 }, "osup_det": { "samt": 0, "csamt": 0, "txval": 0, "camt": 0, "iamt": 0 }, "isup_rev": { "samt": 0, "csamt": 0, "txval": 0, "camt": 0, "iamt": 0 }, "osup_nongst": { "txval": 0, } }, "inter_sup": { "unreg_details": [], "comp_details": [], "uin_details": [] }, "itc_elg": { "itc_avl": [ { "csamt": 0, "samt": 0, "ty": "IMPG", "camt": 0, "iamt": 0 }, { "csamt": 0, "samt": 0, "ty": "IMPS", "camt": 0, "iamt": 0 }, { "samt": 0, "csamt": 0, "ty": "ISRC", "camt": 0, "iamt": 0 }, { "ty": "ISD", "iamt": 1, "camt": 1, "samt": 1, "csamt": 1 }, { "samt": 0, "csamt": 0, "ty": "OTH", "camt": 0, "iamt": 0 } ], "itc_net": { "samt": 0, "csamt": 0, "camt": 0, "iamt": 0 }, "itc_inelg": [ { "ty": "RUL", "iamt": 0, "camt": 0, "samt": 0, "csamt": 0 }, { "ty": "OTH", "iamt": 0, "camt": 0, "samt": 0, "csamt": 0 } ] } } self.gst_details = self.get_company_gst_details() self.report_dict["gstin"] = self.gst_details.get("gstin") self.report_dict["ret_period"] = get_period(self.month, self.year) self.month_no = get_period(self.month) self.account_heads = self.get_account_heads() outward_supply_tax_amounts = self.get_tax_amounts("Sales Invoice") inward_supply_tax_amounts = self.get_tax_amounts("Purchase Invoice", reverse_charge="Y") itc_details = self.get_itc_details() inter_state_supplies = self.get_inter_state_supplies(self.gst_details.get("gst_state")) inward_nil_exempt = self.get_inward_nil_exempt(self.gst_details.get("gst_state")) self.prepare_data("Sales Invoice", outward_supply_tax_amounts, "sup_details", "osup_det", ["Registered Regular"]) self.prepare_data("Sales Invoice", outward_supply_tax_amounts, "sup_details", "osup_zero", ["SEZ", "Deemed Export", "Overseas"]) self.prepare_data("Purchase Invoice", inward_supply_tax_amounts, "sup_details", "isup_rev", ["Registered Regular"], reverse_charge="Y") self.report_dict["sup_details"]["osup_nil_exmp"]["txval"] = flt(self.get_nil_rated_supply_value(), 2) self.set_itc_details(itc_details) self.set_inter_state_supply(inter_state_supplies) self.set_inward_nil_exempt(inward_nil_exempt) self.missing_field_invoices = self.get_missing_field_invoices() self.json_output = frappe.as_json(self.report_dict) def set_inward_nil_exempt(self, inward_nil_exempt): self.report_dict["inward_sup"]["isup_details"][0]["inter"] = flt(inward_nil_exempt.get("gst").get("inter"), 2) self.report_dict["inward_sup"]["isup_details"][0]["intra"] = flt(inward_nil_exempt.get("gst").get("intra"), 2) self.report_dict["inward_sup"]["isup_details"][1]["inter"] = flt(inward_nil_exempt.get("non_gst").get("inter"), 2) self.report_dict["inward_sup"]["isup_details"][1]["intra"] = flt(inward_nil_exempt.get("non_gst").get("intra"), 2) def set_itc_details(self, itc_details): itc_type_map = { 'IMPG': 'Import Of Capital Goods', 'IMPS': 'Import Of Service', 'ISD': 'Input Service Distributor', 'OTH': 'All Other ITC' } net_itc = self.report_dict["itc_elg"]["itc_net"] for d in self.report_dict["itc_elg"]["itc_avl"]: if d["ty"] == 'ISRC': reverse_charge = "Y" else: reverse_charge = "N" for account_head in self.account_heads: d["iamt"] = flt(itc_details.get((itc_type_map.get(d["ty"]), reverse_charge, account_head.get('igst_account')), {}).get("amount"), 2) net_itc["iamt"] += flt(d["iamt"], 2) d["camt"] = flt(itc_details.get((itc_type_map.get(d["ty"]), reverse_charge, account_head.get('cgst_account')), {}).get("amount"), 2) net_itc["camt"] += flt(d["camt"], 2) d["samt"] = flt(itc_details.get((itc_type_map.get(d["ty"]), reverse_charge, account_head.get('sgst_account')), {}).get("amount"), 2) net_itc["samt"] += flt(d["samt"], 2) d["csamt"] = flt(itc_details.get((itc_type_map.get(d["ty"]), reverse_charge, account_head.get('cess_account')), {}).get("amount"), 2) net_itc["csamt"] += flt(d["csamt"], 2) for account_head in self.account_heads: self.report_dict["itc_elg"]["itc_inelg"][1]["iamt"] = flt(itc_details.get(("Ineligible", "N", account_head.get("igst_account")), {}).get("amount"), 2) self.report_dict["itc_elg"]["itc_inelg"][1]["camt"] = flt(itc_details.get(("Ineligible", "N", account_head.get("cgst_account")), {}).get("amount"), 2) self.report_dict["itc_elg"]["itc_inelg"][1]["samt"] = flt(itc_details.get(("Ineligible", "N", account_head.get("sgst_account")), {}).get("amount"), 2) self.report_dict["itc_elg"]["itc_inelg"][1]["csamt"] = flt(itc_details.get(("Ineligible", "N", account_head.get("cess_account")), {}).get("amount"), 2) def prepare_data(self, doctype, tax_details, supply_type, supply_category, gst_category_list, reverse_charge="N"): account_map = { 'sgst_account': 'samt', 'cess_account': 'csamt', 'cgst_account': 'camt', 'igst_account': 'iamt' } txval = 0 total_taxable_value = self.get_total_taxable_value(doctype, reverse_charge) for gst_category in gst_category_list: txval += total_taxable_value.get(gst_category,0) for account_head in self.account_heads: for account_type, account_name in iteritems(account_head): if account_map.get(account_type) in self.report_dict.get(supply_type).get(supply_category): self.report_dict[supply_type][supply_category][account_map.get(account_type)] += \ flt(tax_details.get((account_name, gst_category), {}).get("amount"), 2) for k, v in iteritems(account_map): txval -= self.report_dict.get(supply_type, {}).get(supply_category, {}).get(v, 0) self.report_dict[supply_type][supply_category]["txval"] = flt(txval, 2) def set_inter_state_supply(self, inter_state_supply): for d in inter_state_supply.get("Unregistered", []): self.report_dict["inter_sup"]["unreg_details"].append(d) for d in inter_state_supply.get("Registered Composition", []): self.report_dict["inter_sup"]["comp_details"].append(d) for d in inter_state_supply.get("UIN Holders", []): self.report_dict["inter_sup"]["uin_details"].append(d) def get_total_taxable_value(self, doctype, reverse_charge): return frappe._dict(frappe.db.sql(""" select gst_category, sum(base_grand_total) as total from `tab{doctype}` where docstatus = 1 and month(posting_date) = %s and year(posting_date) = %s and reverse_charge = %s and company = %s and company_gstin = %s group by gst_category """ #nosec .format(doctype = doctype), (self.month_no, self.year, reverse_charge, self.company, self.gst_details.get("gstin")))) def get_itc_details(self, reverse_charge='N'): itc_amount = frappe.db.sql(""" select s.gst_category, sum(t.tax_amount) as tax_amount, t.account_head, s.eligibility_for_itc, s.reverse_charge from `tabPurchase Invoice` s , `tabPurchase Taxes and Charges` t where s.docstatus = 1 and t.parent = s.name and s.reverse_charge = %s and month(s.posting_date) = %s and year(s.posting_date) = %s and s.company = %s and s.company_gstin = %s group by t.account_head, s.gst_category, s.eligibility_for_itc """, (reverse_charge, self.month_no, self.year, self.company, self.gst_details.get("gstin")), as_dict=1) itc_details = {} for d in itc_amount: itc_details.setdefault((d.eligibility_for_itc, d.reverse_charge, d.account_head),{ "amount": d.tax_amount }) return itc_details def get_nil_rated_supply_value(self): return frappe.db.sql(""" select sum(i.base_amount) as total from `tabSales Invoice Item` i, `tabSales Invoice` s where s.docstatus = 1 and i.parent = s.name and i.is_nil_exempt = 1 and month(s.posting_date) = %s and year(s.posting_date) = %s and s.company = %s and s.company_gstin = %s""", (self.month_no, self.year, self.company, self.gst_details.get("gstin")), as_dict=1)[0].total def get_inter_state_supplies(self, state): inter_state_supply = frappe.db.sql(""" select sum(s.grand_total) as total, t.tax_amount, a.gst_state, s.gst_category from `tabSales Invoice` s, `tabSales Taxes and Charges` t, `tabAddress` a where t.parent = s.name and s.customer_address = a.name and s.docstatus = 1 and month(s.posting_date) = %s and year(s.posting_date) = %s and a.gst_state <> %s and s.company = %s and s.company_gstin = %s and s.gst_category in ('Unregistered', 'Registered Composition', 'UIN Holders') group by s.gst_category, a.state""", (self.month_no, self.year, state, self.company, self.gst_details.get("gstin")), as_dict=1) inter_state_supply_details = {} for d in inter_state_supply: inter_state_supply_details.setdefault( d.gst_category, [] ) inter_state_supply_details[d.gst_category].append({ "pos": get_state_code(d.gst_state), "txval": d.total, "iamt": d.tax_amount }) return inter_state_supply_details def get_inward_nil_exempt(self, state): inward_nil_exempt = frappe.db.sql(""" select a.gst_state, sum(i.base_amount) as base_amount, i.is_nil_exempt, i.is_non_gst from `tabPurchase Invoice` p , `tabPurchase Invoice Item` i, `tabAddress` a where p.docstatus = 1 and p.name = i.parent and p.supplier_address = a.name and i.is_nil_exempt = 1 or i.is_non_gst = 1 and month(p.posting_date) = %s and year(p.posting_date) = %s and p.company = %s and p.company_gstin = %s group by a.gst_state """, (self.month_no, self.year, self.company, self.gst_details.get("gstin")), as_dict=1) inward_nil_exempt_details = { "gst": { "intra": 0.0, "inter": 0.0 }, "non_gst": { "intra": 0.0, "inter": 0.0 } } for d in inward_nil_exempt: if d.is_nil_exempt == 1 and state == d.gst_state: inward_nil_exempt_details["gst"]["intra"] += d.base_amount elif d.is_nil_exempt == 1 and state != d.gst_state: inward_nil_exempt_details["gst"]["inter"] += d.base_amount elif d.is_non_gst == 1 and state == d.gst_state: inward_nil_exempt_details["non_gst"]["inter"] += d.base_amount elif d.is_non_gst == 1 and state != d.gst_state: inward_nil_exempt_details["non_gst"]["intra"] += d.base_amount return inward_nil_exempt_details def get_tax_amounts(self, doctype, reverse_charge="N"): if doctype == "Sales Invoice": tax_template = 'Sales Taxes and Charges' elif doctype == "Purchase Invoice": tax_template = 'Purchase Taxes and Charges' tax_amounts = frappe.db.sql(""" select s.gst_category, sum(t.tax_amount) as tax_amount, t.account_head from `tab{doctype}` s , `tab{template}` t where s.docstatus = 1 and t.parent = s.name and s.reverse_charge = %s and month(s.posting_date) = %s and year(s.posting_date) = %s and s.company = %s and s.company_gstin = %s group by t.account_head, s.gst_category """ #nosec .format(doctype=doctype, template=tax_template), (reverse_charge, self.month_no, self.year, self.company, self.gst_details.get("gstin")), as_dict=1) tax_details = {} for d in tax_amounts: tax_details.setdefault( (d.account_head,d.gst_category),{ "amount": d.get("tax_amount"), } ) return tax_details def get_company_gst_details(self): gst_details = frappe.get_all("Address", fields=["gstin", "gst_state", "gst_state_number"], filters={ "name":self.company_address }) if gst_details: return gst_details[0] else: frappe.throw("Please enter GSTIN and state for the Company Address {0}".format(self.company_address)) def get_account_heads(self): account_heads = frappe.get_all("GST Account", fields=["cgst_account", "sgst_account", "igst_account", "cess_account"], filters={ "company":self.company }) if account_heads: return account_heads else: frappe.throw("Please set account heads in GST Settings for Compnay {0}".format(self.company)) def get_missing_field_invoices(self): missing_field_invoices = [] for doctype in ["Sales Invoice", "Purchase Invoice"]: if doctype == "Sales Invoice": party_type = 'Customer' party = 'customer' else: party_type = 'Supplier' party = 'supplier' docnames = frappe.db.sql(""" select t1.name from `tab{doctype}` t1, `tab{party_type}` t2 where t1.docstatus = 1 and month(t1.posting_date) = %s and year(t1.posting_date) = %s and t1.company = %s and t1.place_of_supply IS NULL and t1.{party} = t2.name and t2.gst_category != 'Overseas' """.format(doctype = doctype, party_type = party_type, party=party), (self.month_no, self.year, self.company), as_dict=1) #nosec for d in docnames: missing_field_invoices.append(d.name) return ",".join(missing_field_invoices) def get_state_code(state): state_code = state_numbers.get(state) return state_code def get_period(month, year=None): month_no = { "January": 1, "February": 2, "March": 3, "April": 4, "May": 5, "June": 6, "July": 7, "August": 8, "September": 9, "October": 10, "November": 11, "December": 12 }.get(month) if year: return str(month_no).zfill(2) + str(year) else: return month_no @frappe.whitelist() def view_report(name): json_data = frappe.get_value("GSTR 3B Report", name, 'json_output') return json.loads(json_data) @frappe.whitelist() def make_json(name): json_data = frappe.get_value("GSTR 3B Report", name, 'json_output') file_name = "GST3B.json" frappe.local.response.filename = file_name frappe.local.response.filecontent = json_data frappe.local.response.type = "download"
from functools import reduce import ciso8601 import codecs import datetime import h5py import logging import numpy as np import os from osgeo import osr from matplotlib import colors from matplotlib import cm from PIL import Image from openradar import config UTM = 3405 DHDN = 4314 WGS84 = 4326 GOOGLE = 900913 AEQD_PROJ4 = ('+proj=aeqd +a=6378.137 +b=6356.752 +R_A' ' +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0') RD = ("+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 " "+k=0.999908 +x_0=155000 +y_0=463000 +ellps=bessel " "+towgs84=565.237,50.0087,465.658,-0.406857,0.350733,-1.87035,4.0812 " "+units=m +no_defs") def projection_aeqd(lat=None, lon=None): sr = osr.SpatialReference() sr.ImportFromProj4(str(AEQD_PROJ4.format(lat=lat, lon=lon))) return sr.ExportToWkt() def projection(desc, export='wkt'): sr = osr.SpatialReference() if isinstance(desc, int): sr.ImportFromEPSG(desc) elif isinstance(desc, (str)): if desc.startswith('+proj='): sr.ImportFromProj4(str(desc)) else: sr.ImportFromWkt(str(desc)) if export == 'wkt': return sr.ExportToWkt() if export == 'proj4': return sr.ExportToProj4() return sr def transform(point, desc): srs = [projection(e, export=None) for e in desc] ct = osr.CoordinateTransformation(*srs) return ct.TransformPoint(*point)[0:2] class CoordinateTransformer(object): """ Transform coordinates or give cached coordinates back. TODO Make this a simple function, caching is not trivial here. """ def __init__(self): self.cache = {} def transform(self, points, projections): """ Transform arrays of points from one projection to another. """ shape = np.array([p.shape for p in points]).max(0) points_in = np.array([ points[0].flatten(), points[1].flatten(), ]).transpose() ct = osr.CoordinateTransformation( projection(projections[0], export=None), projection(projections[1], export=None), ) points_out = np.array(ct.TransformPoints(points_in))[:, 0:2] result = points_out.reshape(shape[0], shape[1], 2).transpose(2, 0, 1) return result coordinate_transformer = CoordinateTransformer() def datetime_range(start, stop, step): """ Return generator of datetimes. """ datetime = start while datetime <= stop: yield datetime datetime += step def variabletimestamp2datetime(ts, fmt='%Y%m%d%H%M%S'): """ Trying to match and increasingly detailed timestamp. """ return datetime.datetime.strptime(ts, fmt[:len(ts) - 2]) def timestamp2datetime(ts, fmt='%Y%m%d%H%M%S'): return datetime.datetime.strptime(ts, fmt) def datetime2timestamp(dt, fmt='%Y%m%d%H%M%S'): return dt.strftime(fmt) class DateRange(object): STEP = datetime.timedelta(minutes=5) def __init__(self, text, step=STEP): self.step = step self.start, self.stop = self._start_stop_from_text(text) def _start_stop_from_text(self, text): """ Return start and end timestamps. """ if '-' in text: text1, text2 = text.split('-') start = self._single_from_text(text=text1) stop = self._single_from_text(text=text2, last=True) else: start = self._single_from_text(text=text) stop = self._single_from_text(text=text) return start, stop def _single_from_text(self, text, last=False): """ Return datetime that matches text. If last, return the last possible step for text. """ first = variabletimestamp2datetime(text) if not last: return first td_kwargs = { 8: {'days': 1}, 10: {'hours': 1}, 12: {'minutes': 5}, # Makes a range of minutes possible. }[len(text)] return first - self.step + datetime.timedelta(**td_kwargs) def iterdatetimes(self): dt = self.start while dt <= self.stop: yield dt dt += self.step class MultiDateRange(object): """ Hold a series of datetime ranges and generate it. """ def __init__(self, text, step=5): step = datetime.timedelta(minutes=step) self._dateranges = [DateRange(subtext, step) for subtext in text.strip(',').split(',')] def iterdatetimes(self): for dr in self._dateranges: for dt in dr.iterdatetimes(): yield dt class PathHelper(object): """ >>> import datetime >>> dt = datetime.datetime(2011, 03, 05, 14, 15, 00) >>> ph = PathHelper('a', 'c', '{code}:{timestamp}') >>> ph. path(dt) u'a/c/2011/03/05/c:20110305141500' """ TIMESTAMP_FORMAT = config.TIMESTAMP_FORMAT def __init__(self, basedir, code, template): """ Filetemplate must be something like '{code}_{timestamp}.' """ self._basedir = os.path.join(basedir, code) self._code = code self._template = template def path(self, dt): return os.path.join( self._basedir, dt.strftime('%Y'), dt.strftime('%m'), dt.strftime('%d'), self._template.format( code=self._code, timestamp=dt.strftime(self.TIMESTAMP_FORMAT) ) ) def path_with_hour(self, dt): return os.path.join( self._basedir, dt.strftime('%Y'), dt.strftime('%m'), dt.strftime('%d'), dt.strftime('%H'), self._template.format( code=self._code, timestamp=dt.strftime(self.TIMESTAMP_FORMAT) ) ) def closest_time(timeframe='f', dt_close=None): ''' Get corresponding datetime based on the timeframe. e.g. with dt_close = 2012-04-13 12:27 timeframe = 'f' returns 2012-04-13 12:25 timeframe = 'h' returns 2012-04-13 12:00 timeframe = 'd' returns 2012-04-12 08:00 ''' if dt_close is not None: now = dt_close else: now = datetime.datetime.utcnow() if timeframe == 'h': closesttime = now.replace(minute=0, second=0, microsecond=0) elif timeframe == 'd': closesttime = now.replace(hour=8, minute=0, second=0, microsecond=0) if closesttime > now: closesttime = closesttime - datetime.timedelta(days=1) else: closesttime = now.replace( minute=now.minute - (now.minute % 5), second=0, microsecond=0, ) return closesttime def get_valid_timeframes(datetime): """ Return a list of timeframe codes corresponding to a datetime.""" result = [] if datetime.second == 0 and datetime.microsecond == 0: if datetime.minute == (datetime.minute // 5) * 5: result.append('f') # if datetime.minute == 0: # result.append('h') # if datetime.hour == 8: # result.append('d') return result def get_aggregate_combinations(datetimes, timeframes=['f', 'h', 'd']): """ Return generator of dictionaries. """ for _datetime in datetimes: valid_timeframes = get_valid_timeframes(datetime=_datetime) for timeframe in timeframes: if timeframe in valid_timeframes: yield dict(nowcast=False, datetime=_datetime, timeframe=timeframe) if timeframe == 'f': yield dict(nowcast=True, datetime=_datetime, timeframe=timeframe) def get_product_combinations(datetimes, prodcodes=['r', 'n', 'a', 'u'], timeframes=['f', 'h', 'd']): """ Return generator of dictionaries. """ for _datetime in datetimes: valid_timeframes = get_valid_timeframes(datetime=_datetime) for prodcode in prodcodes: for timeframe in timeframes: if timeframe in valid_timeframes: yield dict(nowcast=False, datetime=_datetime, prodcode=prodcode, timeframe=timeframe) if timeframe == 'f' and prodcode == 'r': yield dict(nowcast=True, datetime=_datetime, prodcode=prodcode, timeframe=timeframe) def consistent_product_expected(prodcode, timeframe): """ Return if a consistent product is expected, for a product. It can be used to determine if a product needs to be published, or not. If not, the rescaled equivalent should be published. """ if prodcode in 'au' and timeframe in 'fh': return True if prodcode == 'n' and timeframe == 'f': return True return False def get_groundfile_datetimes(prodcode, timeframe, date): ''' Return datetime for groundfile for a given product code and datetime For ground data to be more complete, the datetime of the grounddata must a certain amount later than the radar datetime. So for a product at 2012-12-18 09:05 the groundfile datetimes must be: real-time => 2012-12-18-09:05 near-real-time => 2012-12-18-10:05 afterwards => 2012-12-20-09:05 Unfortunately, for technical reasons, the groundfile dating is not consistent over history. Therefore, this function now takes a timeframe argument and returns a generator of datetimes to try. The first datetime for which a ground file exists, must be used. ''' delta = dict( r=datetime.timedelta(minutes=0), n=datetime.timedelta(hours=1), a=datetime.timedelta(days=2) ) base_datetime = date + delta[prodcode] if timeframe == 'f': # It used to be on exact 5 minutes, but now it is 1 minute late. for minutes in [1, 0]: yield base_datetime + datetime.timedelta(minutes=minutes) elif timeframe == 'd' and prodcode == 'n': # It used to be on 8 o'clock utc, but now it is (correctly) at 9. for hours in [0, -1]: yield base_datetime + datetime.timedelta(hours=hours) else: yield base_datetime def rain_kwargs(max_rain=120, name='buienradar', threshold=0.1): """ Return colormap and normalizer suitable for rain. """ if name == 'buienradar': rain_colors = { 'red': ( (0, 240, 240), (2, 158, 110), (5, 88, 0), (10, 0, 255), (100, 131, 192), (max_rain, 192, 192), ), 'green': ( (0, 240, 240), (2, 158, 110), (5, 88, 0), (10, 0, 0), (100, 0, 0), (max_rain, 0, 0), ), 'blue': ( (0, 255, 255), (2, 255, 255), (5, 255, 200), (10, 110, 0), (100, 0, 192), (max_rain, 192, 192), ), } cdict = {} for key, value in rain_colors.items(): cdict[key] = [] for element in value: cdict[key].append(( element[0] / max_rain, element[1] / 255, element[2] / 255, )) colormap = colors.LinearSegmentedColormap('rain', cdict) normalize = colors.Normalize(vmin=0, vmax=max_rain) return dict(colormap=colormap, normalize=normalize) if name == 'jet': colormap = cm.jet def normalize(data): ma = np.ma.array(data) ma[np.less(ma, threshold)] = np.ma.masked return colors.LogNorm(vmin=threshold, vmax=max_rain)(ma) return dict(colormap=colormap, normalize=normalize) def merge(images): """ Return a pil image. Merge a list of pil images with equal sizes top down based on the alpha channel. """ def paste(image1, image2): image = image2.copy() mask = Image.fromarray(np.array(image1)[:, :, 3]) rgb = Image.fromarray(np.array(image1)[:, :, 0:3]) image.paste(rgb, None, mask) return image return reduce(paste, images) def makedir(dirname): """ Return True if directory was created, else False. """ try: os.makedirs(dirname) return True except OSError: return False class UTF8Recoder: """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ def __init__(self, f, encoding): self.reader = codecs.getreader(encoding)(f) def __iter__(self): return self def next(self): return self.reader.next().encode("utf-8") def save_attrs(h5, attrs): for key, value in attrs.items(): if isinstance(value, dict): group = h5.create_group(key) save_attrs(h5=group, attrs=value) continue if hasattr(value, 'encode'): value = value.encode('ascii') h5.attrs[key] = np.array([value]) def save_dataset(data, meta, path): ''' Accepts an array jampacked with data, a metadata file and a path to produce a fresh h5 file. ''' logging.debug('Saving hdf5 dataset: {}'.format(os.path.basename(path))) logging.debug(path) tmp_path = path + '.in' makedir(os.path.dirname(path)) h5 = h5py.File(tmp_path, 'w') # Geographic group left, right, top, bottom = meta['grid_extent'] width, height = meta['grid_size'] geographic = dict( geo_par_pixel=b'X,Y', geo_dim_pixel=b'KM,KM', geo_pixel_def=b'CENTRE', geo_number_columns=width, geo_number_rows=height, geo_pixel_size_x=1.000, geo_pixel_size_y=-1.000, geo_product_corners=[left, bottom, left, top, right, top, right, bottom], map_projection=dict( projection_indication=b'Y', projection_name=b'STEREOGRAPHIC', projection_proj4_params=projection(RD, export='proj4'), ), ) # Overview group availables = meta['available'] try: availables_any = [any(r) for r in zip(*availables)] except TypeError: availables_any = availables products_missing = str(', '.join( [radar for radar, available in zip(meta['radars'], availables_any) if not available], )) timestamp_last_composite = meta['timestamp_last_composite'] if hasattr(timestamp_last_composite, "decode"): timestamp_last_composite = timestamp_last_composite.decode("ascii") product_datetime_start = (timestamp2datetime( timestamp_last_composite, ) + config.TIMEFRAME_DELTA['f']).strftime('%d-%b-%Y;%H:%M:%S.%f').upper() product_datetime_end = product_datetime_start overview = dict( hdftag_version_number=b'3.5', number_image_groups=1, number_radar_groups=0, number_satellite_groups=0, number_station_groups=0, product_datetime_end=product_datetime_end, product_datetime_start=product_datetime_start, product_group_name=str(os.path.splitext(os.path.basename(path))[0]), products_missing=products_missing, ) # Image group calibration = dict( calibration_flag=b'Y', calibration_formulas=b'GEO = 0.010000 * PV + 0.000000', calibration_missing_data=0, calibration_out_of_image=65535, ) image1 = dict( calibration=calibration, image_bytes_per_pixel=2, image_geo_parameter=b'PRECIP_[MM]', image_product_name=overview['product_group_name'], image_size=data['precipitation'].size, ) groups = dict( overview=overview, geographic=geographic, image1=image1, ) save_attrs(h5, groups) dataset = h5.create_dataset('image1/image_data', data['precipitation'].shape, dtype='u2', compression='gzip', shuffle=True) image_data = dict( CLASS=b'IMAGE', VERSION=b'1.2', ) save_attrs(dataset, image_data) # Creating the pixel values dataset[...] = np.uint16(np.round(data['precipitation'] * 100)).filled( calibration['calibration_out_of_image'], ) # Keep the old way for compatibility with various products for name, value in data.items(): dataset = h5.create_dataset(name, value.shape, dtype='f4', compression='gzip', shuffle=True) dataset[...] = value.filled(config.NODATAVALUE) for name, value in meta.items(): if hasattr(value, 'encode'): value = value.encode('ascii') elif isinstance(value, list): value = [ e.encode('ascii') if hasattr(e, 'encode') else e for e in value ] h5.attrs[name] = value h5.close() os.rename(tmp_path, path) return path def get_countrymask(): """ Get a prefabricated country mask, 1 everywhere in the country and 0 50 km outside the country. If extent and cellsize are not as in config.py, this breaks. """ countrymask_path = os.path.join(config.MISC_DIR, 'countrymask.h5') h5 = h5py.File(countrymask_path, 'r') mask = h5['mask'][...] h5.close() return mask def get_geo_transform(): left, right, top, bottom = config.COMPOSITE_EXTENT width, height = config.COMPOSITE_CELLSIZE return left, width, 0, top, 0, -height def parse_datetime(text): """ Return a datetime instance. """ if len(text) == 4: # ciso8601 requires at least months to be specified return ciso8601.parse_datetime_unaware(text + '01') return ciso8601.parse_datetime_unaware(text) def convert_to_lists_and_unicode(meta): """ Updates dict. Return None. """ for k, v in meta.items(): # numpy array if hasattr(v, 'size'): if v.size == 0: meta[k] = [] elif v.size == 1: meta[k] = v.item() else: meta[k] = [ e.decode('ascii') if hasattr(e, 'decode') else e for e in v.tolist() ] # bytes elif hasattr(v, 'decode'): meta[k] = v.decode('ascii')
from cmio import ILImageService from cmio import ILImageService_init as init from cmio import ILImageService_v as v
from agui import Signal from agui.awidgets import AWidget class AComboBox(AWidget): def __init__(self, item = None): self._list = [] self._selected = 0 self.changed = Signal() AWidget.__init__(self, item) def emit_changed(self, *args): self.changed.emit(self.selected) def clear(self): self._list = [] def append(self, text): self._list.append(text) def prepend(self, text): self._list.insert(0, text) def remove(self, index): self._list.remove(index) def insert(self, index, text): self._list.insert(index, text) @property def items(self): return self._list @items.setter def items(self, items): self.clear() for item in items: self.append(item) @property def selected(self): return self._selected @selected.setter def selected(self, value): self._selected = value @property def selected_text(self): return self._list[self.selected] @selected_text.setter def selected_text(self, value): index = 0 for item in self._list: if text == item: self.selected = index break index += 1
import re from itertools import chain import mwparserfromhell from .encodings import dotencode from .title import canonicalize __all__ = [ "strip_markup", "get_adjacent_node", "get_parent_wikicode", "remove_and_squash", "get_section_headings", "get_anchors", "ensure_flagged_by_template", "ensure_unflagged_by_template", "is_flagged_by_template", "is_redirect", "parented_ifilter", ] def strip_markup(text, normalize=True, collapse=True): """ Parses the given text and returns the text after stripping all MediaWiki markup, leaving only the plain text. :param normalize: passed to :py:func:`mwparserfromhell.wikicode.Wikicode.strip_code` :param collapse: passed to :py:func:`mwparserfromhell.wikicode.Wikicode.strip_code` :returns: :py:obj:`str` """ wikicode = mwparserfromhell.parse(text) return wikicode.strip_code(normalize, collapse) def get_adjacent_node(wikicode, node, ignore_whitespace=False): """ Get the node immediately following `node` in `wikicode`. :param wikicode: a :py:class:`mwparserfromhell.wikicode.Wikicode` object :param node: a :py:class:`mwparserfromhell.nodes.Node` object :param ignore_whitespace: When True, the whitespace between `node` and the node being returned is ignored, i.e. the returned object is guaranteed to not be an all white space text, but it can still be a text with leading space. :returns: a :py:class:`mwparserfromhell.nodes.Node` object or None if `node` is the last object in `wikicode` """ i = wikicode.index(node) + 1 try: n = wikicode.get(i) while ignore_whitespace and n.isspace(): i += 1 n = wikicode.get(i) return n except IndexError: return None def get_parent_wikicode(wikicode, node): """ Returns the parent of `node` as a `wikicode` object. Raises :exc:`ValueError` if `node` is not a descendant of `wikicode`. """ context, index = wikicode._do_strong_search(node, True) return context def remove_and_squash(wikicode, obj): """ Remove `obj` from `wikicode` and fix whitespace in the place it was removed from. """ parent = get_parent_wikicode(wikicode, obj) index = parent.index(obj) parent.remove(obj) def _get_text(index): # the first node has no previous node, especially not the last node if index < 0: return None, None try: node = parent.get(index) # don't EVER remove whitespace from non-Text nodes (it would # modify the objects by converting to str, making the operation # and replacing the object with str, but we keep references to # the old nodes) if not isinstance(node, mwparserfromhell.nodes.text.Text): return None, mwparserfromhell.nodes.text.Text return node, mwparserfromhell.nodes.text.Text except IndexError: return None, None prev, prev_cls = _get_text(index - 1) next_, next_cls = _get_text(index) if prev is None and next_ is not None: # strip only at the beginning of the document, not after non-text elements, # see https://github.com/lahwaacz/wiki-scripts/issues/44 if prev_cls is None: next_.value = next_.lstrip() elif prev is not None and next_ is None: # strip only at the end of the document, not before non-text elements, # see https://github.com/lahwaacz/wiki-scripts/issues/44 if next_cls is None: prev.value = prev.value.rstrip() elif prev is not None and next_ is not None: if prev.endswith(" ") and next_.startswith(" "): prev.value = prev.rstrip(" ") next_.value = " " + next_.lstrip(" ") elif prev.endswith("\n") and next_.startswith("\n"): if prev[:-1].endswith("\n") or next_[1:].startswith("\n"): # preserve preceding blank line prev.value = prev.rstrip("\n") + "\n\n" next_.value = next_.lstrip("\n") else: # leave one linebreak prev.value = prev.rstrip("\n") + "\n" next_.value = next_.lstrip("\n") elif prev.endswith("\n"): next_.value = next_.lstrip() elif next_.startswith("\n"): # pragma: no branch prev.value = prev.rstrip() # merge successive Text nodes prev.value += next_.value parent.remove(next_) def get_section_headings(text): """ Extracts section headings from given text. Custom regular expression is used instead of :py:mod:`mwparserfromhell` for performance reasons. .. note:: Known issues: - templates are not handled (use :py:func:`ws.parser_helpers.template_expansion.expand_templates` prior to calling this function) :param str text: content of the wiki page :returns: list of section headings (without the ``=`` marks) """ # re.findall returns a list of tuples of the matched groups # gotcha: the line must start with '=', but does not have to end with '=' (trailing whitespace is ignored) matches = re.findall(r"^((\={1,6})[^\S\n]*)([^\n]+?)([^\S\n]*(\2))[^\S\n]*$", text, flags=re.MULTILINE | re.DOTALL) return [match[2] for match in matches] def get_anchors(headings, pretty=False, suffix_sep="_"): """ Converts section headings to anchors. .. note:: Known issues: - templates are not handled (call :py:func:`ws.parser_helpers.template_expansion.expand_templates` on the wikitext before extracting section headings) - all tags are always stripped, even invalid tags (:py:mod:`mwparserfromhell` is not that configurable) - if ``pretty`` is ``True``, tags escaped with <nowiki> in the input are not encoded in the output :param list headings: section headings (obtained e.g. with :py:func:`get_section_headings`) :param bool pretty: if ``True``, the anchors will be as pretty as possible (e.g. for use in wikilinks), otherwise they are fully dot-encoded :param str suffix_sep: the separator between the base anchor and numeric suffix for duplicate section names :returns: list of section anchors """ # MediaWiki markup should be stripped, but the text has to be parsed as a # heading, otherwise e.g. starting '#' would be understood as a list and # stripped as well. anchors = [strip_markup("={}=".format(heading)) for heading in headings] if pretty is False: anchors = [dotencode(a) for a in anchors] else: # anchors can't contain '[', '|', ']' and tags encode them manually anchors = [a.replace("[", "%5B").replace("|", "%7C").replace("]", "%5D") for a in anchors] # handle equivalent headings duplicated on the page for i, anchor in enumerate(anchors): j = 2 # this check should be case-insensitive, see https://wiki.archlinux.org/index.php/User:Lahwaacz/Notes#Section_anchors while anchor.lower() in [a.lower() for a in anchors[:i]]: anchor = anchors[i] + suffix_sep + "{}".format(j) j += 1 # update the main array anchors[i] = anchor return anchors def ensure_flagged_by_template(wikicode, node, template_name, *template_parameters, overwrite_parameters=True): """ Makes sure that ``node`` in ``wikicode`` is immediately (except for whitespace) followed by a template with ``template_name`` and optional ``template_parameters``. :param wikicode: a :py:class:`mwparserfromhell.wikicode.Wikicode` object :param node: a :py:class:`mwparserfromhell.nodes.Node` object :param str template_name: the name of the template flag :param template_parameters: optional template parameters :returns: the template flag, as a :py:class:`mwparserfromhell.nodes.template.Template` objet """ parent = get_parent_wikicode(wikicode, node) adjacent = get_adjacent_node(parent, node, ignore_whitespace=True) if template_parameters: flag = "{{%s}}" % "|".join([template_name, *template_parameters]) else: flag = "{{%s}}" % template_name flag = mwparserfromhell.parse(flag).nodes[0] assert(isinstance(flag, mwparserfromhell.nodes.Template)) if isinstance(adjacent, mwparserfromhell.nodes.Template) and adjacent.name.matches(template_name): # in case of {{Dead link}} we want to preserve the original parameters if overwrite_parameters is True: wikicode.replace(adjacent, flag) else: flag = adjacent else: wikicode.insert_after(node, flag) assert(get_parent_wikicode(wikicode, flag) is parent) return flag def ensure_unflagged_by_template(wikicode, node, template_name, *, match_only_prefix=False): """ Makes sure that ``node`` in ``wikicode`` is not immediately (except for whitespace) followed by a template with ``template_name``. :param wikicode: a :py:class:`mwparserfromhell.wikicode.Wikicode` object :param node: a :py:class:`mwparserfromhell.nodes.Node` object :param str template_name: the name of the template flag :param bool match_only_prefix: if ``True``, only the prefix of the adjacent template must match ``template_name`` """ parent = get_parent_wikicode(wikicode, node) adjacent = get_adjacent_node(parent, node, ignore_whitespace=True) if isinstance(adjacent, mwparserfromhell.nodes.Template): if match_only_prefix is True: if canonicalize(adjacent.name).startswith(canonicalize(template_name)): remove_and_squash(wikicode, adjacent) else: if adjacent.name.matches(template_name): remove_and_squash(wikicode, adjacent) def is_flagged_by_template(wikicode, node, template_name, *, match_only_prefix=False): """ Checks if ``node`` in ``wikicode`` is immediately (except for whitespace) followed by a template with ``template_name``. :param wikicode: a :py:class:`mwparserfromhell.wikicode.Wikicode` object :param node: a :py:class:`mwparserfromhell.nodes.Node` object :param str template_name: the name of the template flag :param bool match_only_prefix: if ``True``, only the prefix of the adjacent template must match ``template_name`` """ parent = get_parent_wikicode(wikicode, node) adjacent = get_adjacent_node(parent, node, ignore_whitespace=True) if isinstance(adjacent, mwparserfromhell.nodes.Template): if match_only_prefix is True: if canonicalize(adjacent.name).startswith(canonicalize(template_name)): return True else: if adjacent.name.matches(template_name): return True return False def is_redirect(text, *, full_match=False): """ Checks if the text represents a MediaWiki `redirect page`_. :param bool full_match: Restricts the behaviour to return ``True`` only for pages which do not contain anything else but the redirect line. .. _`redirect page`: https://www.mediawiki.org/wiki/Help:Redirects """ if full_match is True: f = re.fullmatch else: f = re.match match = f(r"#redirect\s*:?\s*\[\[[^[\]{}]+\]\]", text.strip(), flags=re.MULTILINE | re.IGNORECASE) return bool(match) FLAGS = re.IGNORECASE | re.DOTALL | re.UNICODE def parented_ifilter(wikicode, recursive=True, matches=None, flags=FLAGS, forcetype=None): """Iterate over nodes and their corresponding parents. The arguments are interpreted as for :meth:`ifilter`. For each tuple ``(parent, node)`` yielded by this method, ``parent`` is the direct parent wikicode of ``node``. The method is intended for performance optimization by avoiding expensive search e.g. in the ``replace`` method. See the :py:mod:`mwparserfromhell` issue for details: https://github.com/earwig/mwparserfromhell/issues/195 """ match = wikicode._build_matcher(matches, flags) if recursive: restrict = forcetype if recursive == wikicode.RECURSE_OTHERS else None def getter(node): for parent, ch in wikicode._get_children(node, restrict=restrict, contexts=True, parent=wikicode): yield (parent, ch) inodes = chain(*(getter(n) for n in wikicode.nodes)) else: inodes = ((wikicode, node) for node in wikicode.nodes) for parent, node in inodes: if (not forcetype or isinstance(node, forcetype)) and match(node): yield (parent, node)
import os import sys TEST_ROOT_DIR = os.path.dirname(__file__) parentdir = os.path.dirname(TEST_ROOT_DIR) parentdir2 = os.path.dirname(TEST_ROOT_DIR) sys.path.insert(0,parentdir) sys.path.insert(0,parentdir2) os.chdir(TEST_ROOT_DIR)
""" The top-level *pypers* package export just one utility function (see below). The rest of the functionality is in the four sub-packages listed above. """ def import_all(namespace, dir): """ Import all Python files in the directory `dir` and add all the symbols they define into the namespace `namespace.__name__`. This utility function is meant to be called in a __init__.py file in order to load all the Python source files in the same directory as the __init__.py file itself. In these cases, the input parameter `namespace` is typically set to globals(). """ from imp import find_module, load_module import os from sys import modules from types import ModuleType _this_module_name = namespace['__name__'] _this_module = modules[_this_module_name] assert namespace is _this_module.__dict__ # Find all the .py files to import... _sources = [_f for _f in os.listdir(dir) if _f.endswith('.py') and _f != '__init__.py'] # ...and import them, one by one. for _source in _sources: _name = _source[:-3] _fp, _path, _desc = find_module(_name, [dir, ]) try: _m = load_module(_this_module_name + '.' + _name, _fp, _path, _desc) finally: if _fp: _fp.close() # Now add all the Step subclasses to the current package. for (_name, _symbol) in _m.__dict__.items(): if(not isinstance(_symbol, ModuleType) and not _name.startswith('_')): setattr(_this_module, _name, _symbol) return
""" This file contains the `Board` class, which implements the rules for the game Isolation as described in lecture, modified so that the players move like knights in chess rather than queens. You MAY use and modify this class, however ALL function signatures must remain compatible with the defaults provided, and none of your changes will be available to project reviewers. """ import timeit from copy import deepcopy from copy import copy TIME_LIMIT_MILLIS = 200 class Board(object): """ Implement a model for the game Isolation assuming each player moves like a knight in chess. Parameters ---------- player_1 : object An object with a get_move() function. This is the only function directly called by the Board class for each player. player_2 : object An object with a get_move() function. This is the only function directly called by the Board class for each player. width : int (optional) The number of columns that the board should have. height : int (optional) The number of rows that the board should have. """ BLANK = 0 NOT_MOVED = None def __init__(self, player_1, player_2, width=7, height=7): self.width = width self.height = height self.move_count = 0 self.__player_1__ = player_1 self.__player_2__ = player_2 self.__active_player__ = player_1 self.__inactive_player__ = player_2 self.__board_state__ = [[Board.BLANK for i in range(width)] for j in range(height)] self.__last_player_move__ = {player_1: Board.NOT_MOVED, player_2: Board.NOT_MOVED} self.__player_symbols__ = {Board.BLANK: Board.BLANK, player_1: 1, player_2: 2} @property def active_player(self): """ The object registered as the player holding initiative in the current game state. """ return self.__active_player__ @property def inactive_player(self): """ The object registered as the player in waiting for the current game state. """ return self.__inactive_player__ def get_opponent(self, player): """ Return the opponent of the supplied player. Parameters ---------- player : object An object registered as a player in the current game. Raises an error if the supplied object is not registered as a player in this game. Returns ---------- object The opponent of the input player object. """ if player == self.__active_player__: return self.__inactive_player__ elif player == self.__inactive_player__: return self.__active_player__ raise RuntimeError("`player` must be an object registered as a player in the current game.") def copy(self): """ Return a deep copy of the current board. """ new_board = Board(self.__player_1__, self.__player_2__, width=self.width, height=self.height) new_board.move_count = self.move_count new_board.__active_player__ = self.__active_player__ new_board.__inactive_player__ = self.__inactive_player__ new_board.__last_player_move__ = copy(self.__last_player_move__) new_board.__player_symbols__ = copy(self.__player_symbols__) new_board.__board_state__ = deepcopy(self.__board_state__) return new_board def forecast_move(self, move): """ Return a deep copy of the current game with an input move applied to advance the game one ply. Parameters ---------- move : (int, int) A coordinate pair (row, column) indicating the next position for the active player on the board. Returns ---------- `isolation.Board` A deep copy of the board with the input move applied. """ new_board = self.copy() new_board.apply_move(move) return new_board def move_is_legal(self, move): """ Test whether a move is legal in the current game state. Parameters ---------- move : (int, int) A coordinate pair (row, column) indicating the next position for the active player on the board. Returns ---------- bool Returns True if the move is legal, False otherwise """ row, col = move return 0 <= row < self.height and \ 0 <= col < self.width and \ self.__board_state__[row][col] == Board.BLANK def get_blank_spaces(self): """ Return a list of the locations that are still available on the board. """ return [(i, j) for j in range(self.width) for i in range(self.height) if self.__board_state__[i][j] == Board.BLANK] def get_player_location(self, player): """ Find the current location of the specified player on the board. Parameters ---------- player : object An object registered as a player in the current game. Returns ---------- (int, int) The coordinate pair (row, column) of the input player. """ return self.__last_player_move__[player] def get_legal_moves(self, player=None): """ Return the list of all legal moves for the specified player. Parameters ---------- player : object (optional) An object registered as a player in the current game. If None, return the legal moves for the active player on the board. Returns ---------- list<(int, int)> The list of coordinate pairs (row, column) of all legal moves for the player constrained by the current game state. """ if player is None: player = self.active_player return self.__get_moves__(self.__last_player_move__[player]) def apply_move(self, move): """ Move the active player to a specified location. Parameters ---------- move : (int, int) A coordinate pair (row, column) indicating the next position for the active player on the board. Returns ---------- None """ row, col = move self.__last_player_move__[self.active_player] = move self.__board_state__[row][col] = self.__player_symbols__[self.active_player] self.__active_player__, self.__inactive_player__ = self.__inactive_player__, self.__active_player__ self.move_count += 1 def is_winner(self, player): """ Test whether the specified player has won the game. """ return player == self.inactive_player and not self.get_legal_moves(self.active_player) def is_loser(self, player): """ Test whether the specified player has lost the game. """ return player == self.active_player and not self.get_legal_moves(self.active_player) def utility(self, player): """ Returns the utility of the current game state from the perspective of the specified player. / +infinity, "player" wins utility = | -infinity, "player" loses \ 0, otherwise Parameters ---------- player : object (optional) An object registered as a player in the current game. If None, return the utility for the active player on the board. Returns ---------- float The utility value of the current game state for the specified player. The game has a utility of +inf if the player has won, a value of -inf if the player has lost, and a value of 0 otherwise. """ if not self.get_legal_moves(self.active_player): if player == self.inactive_player: return float("inf") if player == self.active_player: return float("-inf") return 0. def __get_moves__(self, move): """ Generate the list of possible moves for an L-shaped motion (like a knight in chess). """ if move == Board.NOT_MOVED: return self.get_blank_spaces() r, c = move directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)] valid_moves = [(r+dr,c+dc) for dr, dc in directions if self.move_is_legal((r+dr, c+dc))] return valid_moves def print_board(self): """DEPRECATED - use Board.to_string()""" return self.to_string() def to_string(self): """Generate a string representation of the current game state, marking the location of each player and indicating which cells have been blocked, and which remain open. """ p1_loc = self.__last_player_move__[self.__player_1__] p2_loc = self.__last_player_move__[self.__player_2__] out = '' for i in range(self.height): out += ' | ' for j in range(self.width): if not self.__board_state__[i][j]: out += ' ' elif p1_loc and i == p1_loc[0] and j == p1_loc[1]: out += '1' elif p2_loc and i == p2_loc[0] and j == p2_loc[1]: out += '2' else: out += '-' out += ' | ' out += '\n\r' return out def play(self, time_limit=TIME_LIMIT_MILLIS): """ Execute a match between the players by alternately soliciting them to select a move and applying it in the game. Parameters ---------- time_limit : numeric (optional) The maximum number of milliseconds to allow before timeout during each turn. Returns ---------- (player, list<[(int, int),]>, str) Return multiple including the winning player, the complete game move history, and a string indicating the reason for losing (e.g., timeout or invalid move). """ move_history = [] curr_time_millis = lambda: 1000 * timeit.default_timer() while True: print(self.to_string()) legal_player_moves = self.get_legal_moves() game_copy = self.copy() move_start = curr_time_millis() time_left = lambda : time_limit - (curr_time_millis() - move_start) curr_move = self.active_player.get_move(game_copy, legal_player_moves, time_left) move_end = time_left() # print move_end if curr_move is None: curr_move = Board.NOT_MOVED if self.active_player == self.__player_1__: move_history.append([curr_move]) else: move_history[-1].append(curr_move) if move_end < 0: return self.__inactive_player__, move_history, "timeout" if curr_move not in legal_player_moves: return self.__inactive_player__, move_history, "illegal move" self.apply_move(curr_move)
import uuid import os import sys import datetime sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'lib', 'transit'))) import Transit import ConfigParser config = ConfigParser.RawConfigParser() config.read(os.path.abspath(os.path.join(os.path.dirname(__file__), 'settings.cfg'))) SESSIONS_HOST = config.get('sessions', 'host') SESSIONS_PORT = config.get('sessions', 'port') SESSIONS_DBNAME = config.get('sessions', 'dbname') SESSIONS_USER = config.get('sessions', 'user') SESSIONS_PASSWORD = config.get('sessions', 'password') SESSIONS_CONN_STRING = "host='"+SESSIONS_HOST+"' port='"+SESSIONS_PORT+"' dbname='"+SESSIONS_DBNAME+"' user='"+SESSIONS_USER+"' password='"+SESSIONS_PASSWORD+"'" SESSIONS_SECRET_KEY_PUBLIC = int(config.get('sessions', 'secret_key_public'), 16) SESSIONS_SECRET_KEY_PRIVATE = int(config.get('sessions', 'secret_key_private'), 16) SESSION_EXPIRATION_TIME = int(config.get('sessions', 'expiration_time')) class EnmodalSessionManager(object): def __init__(self): self.sessions = [] def get_by_sid(self, sid): return redis_store.get(str(sid)) def get_by_public_key(self, h): sid = self.get_sid_from_public_key(h) return redis_store.get(str(sid)) def get_by_private_key(self, h): sid = self.get_sid_from_private_key(h) return redis_store.get(str(sid)) def get_sid_from_public_key(self, h): return h ^ SESSIONS_SECRET_KEY_PUBLIC def get_sid_from_private_key(self, h): return h ^ SESSIONS_SECRET_KEY_PRIVATE def add(self, s): self.sessions.append(s) # Whenever we add a new session, check for old ones to remove. purged = self.purge() print str(len(self.sessions))+" active sessions, "+str(purged)+" purged." def save(self, s): def remove_by_sid(self, sid): s = self.get_by_sid(sid) if s is not None: self.sessions.remove(s) def auth_by_key(self, h): public_session = self.get_by_public_key(h) if public_session is not None: public_session.keep_alive() a = EnmodalSessionAuthentication(public_session, False) return a private_session = self.get_by_private_key(h) if private_session is not None: private_session.keep_alive() a = EnmodalSessionAuthentication(private_session, True) return a return None def purge(self): num_sessions_start = len(self.sessions) for session in self.sessions: if session.is_expired(): save_session(session, None, False) self.sessions = [x for x in self.sessions if not x.is_expired()] return num_sessions_start - len(self.sessions) class EnmodalSession(object): def __init__(self): self.sid = uuid.uuid4().int & (1<<63)-1 self.map = Transit.Map(0) self.last_edit_time = datetime.datetime.now() def public_key(self): return self.sid ^ SESSIONS_SECRET_KEY_PUBLIC def private_key(self): return self.sid ^ SESSIONS_SECRET_KEY_PRIVATE def keep_alive(self): self.last_edit_time = datetime.datetime.now() def is_expired(self): return (datetime.datetime.now() - self.last_edit_time).total_seconds() > SESSION_EXPIRATION_TIME class EnmodalSessionAuthentication(object): def __init__(self, s, editable): self.session = s self.editable = editable def returnable_key(self): if self.editable: return '{:16x}'.format(self.session.private_key()) else: return '{:16x}'.format(self.session.public_key()) def check_for_session_errors(h): if session_manager.auth_by_key(h) is None: print("session auth problem with key "+str(h)) return json.dumps({"error": "Invalid session"}) return 0 session_manager = EnmodalSessionManager()
__author__ = 'https://github.com/arkadoel' """ Compilar en linux sudo /opt/miniconda3/bin/python3 ./setup.py build """ import sys from cx_Freeze import setup, Executable import core.Constantes as const build_exe_options = { "packages": ['', 'gui', 'core', 'core.manzana', 'directORM'], "excludes": ["tkinter"] } base = None if sys.platform == "win32": base = "Win32GUI" setup( name = const.APP_NAME, version = const.APP_VERSION, description = const.APP_NAME + ' ' + const.APP_VERSION, options = {"build_exe": build_exe_options}, executables = [Executable("__init__.py", base=base)])
""" Tests for ReducePyCkovPlot """ import base64 import json import unittest from ReducePyCkovPlot import ReducePyCkovPlot class ReducePyCkovPlotTestCase(unittest.TestCase): # pylint: disable=R0904, C0301 """ Test class for ReducePyCkovPlot """ @classmethod def setUpClass(self): # pylint: disable=C0202 """ Prepare for test by setting up worker. @param self Object reference. """ self.__reducer = ReducePyCkovPlot() def setUp(self): """ Invoke "birth" and check for success. @param self Object reference. """ success = self.__reducer.birth("""{"root_batch_mode":1}""") if not success: raise Exception('Test setUp failed '+str(success), 'reducer.birth() failed') def test_birth_default(self): """ Check default configuration after "birth" is called. @param self Object reference. """ self.assertEquals(0, self.__reducer.spill_count, "Unexpected reducer.spill_count") self.assertEquals("eps", self.__reducer.image_type, "Unexpected reducer.image_type") self.assertTrue(not self.__reducer.auto_number, "Unexpected reducer.auto_number") def test_birth_file_type(self): """ Test configuration when "birth" is called with a supported file type. @param self Object reference. """ self.__reducer = ReducePyCkovPlot() success = self.__reducer.birth("""{"histogram_image_type":"png"}""") self.assertTrue(success, "reducer.birth() failed") self.assertEquals("png", self.__reducer.image_type, "Unexpected reducer.image_type") def test_invalid_json(self): """ Test "process" with a bad JSON document as an argument string. @param self Object reference. """ result_str = self.__reducer.process("{") result = json.loads(result_str) self.assertTrue("errors" in result, "No errors field") errors = result["errors"] self.assertTrue("ReducePyCkovPlot" in errors, "No ReducePyCkovPlot field") errors = errors["ReducePyCkovPlot"] self.assertTrue(len(errors) >= 1, "Missing error trace") def test_no_digits(self): """ Test "process" with a JSON document with no "digits" entry. @param self Object reference.# """ noDigit_json = {"run_number": 1, "maus_event_type": "Spill", "recon_events": [], "spill_number": 0, "errors": {}, "daq_event_type": "physics_event", "daq_data": {}} # pylint: disable=C0301 result = self.__process(noDigit_json) self.assertTrue("errors" in result, "No errors field") errors = result["errors"] self.assertTrue("ReducePyCkovPlot" in errors, "space points not in spill") def __process(self, json_doc): """ Convert given JSON document to a string and pass to "process". @param self Object reference. @param json_doc JSON document. @returns JSON document string from "process". """ json_str = json.dumps(json_doc) result_str = self.__reducer.process(json_str) return json.loads(result_str) def __check_result(self, spill_id, result): """ Validate results from "process". Check the current spill count in the worker and the image name is as expected. Then check the "image" contents. This method assumes the image type is "eps". @param self Object reference. @param spill_id ID of spill just processed. @param result JSON document from "process". """ json_doc = ('%s/src/reduce/ReducePyCkovPlot/processTest.txt' % os.environ.get("MAUS_ROOT_DIR"))#pylint: disable=E0602 result = self.__process(json_doc) self.assertEquals(spill_id + 1, self.__reducer.spill_count, "Unexpected reducer.spill_count") self.assertTrue("image" in result, "No image field") image = result["image"] self.assertEquals(self.__reducer.image_type, image["image_type"], "Unexpected image_type") if (self.__reducer.auto_number): tag = "CkovChargePMT1-8%06d" % (spill_id + 1) else: tag = "CkovChargePMT1-8" self.assertEquals(tag, image["tag"], "Unexpected tag") self.assertTrue("content" in image, "No content field") self.assertTrue("data" in image, "No data field") decoded_data = base64.b64decode(image["data"]) self.assertTrue(decoded_data.find("EPS") != -1, "Unexpected image data") def tearDown(self): """ Invoke "death". @param self Object reference. """ success = self.__reducer.death() if success != None: raise Exception('Test tearDown failed', 'reducer.death() failed') if __name__ == '__main__': unittest.main()
from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals from __future__ import division from builtins import * # noqa from hamcrest import ( assert_that, contains, contains_inanyorder, empty, matches_regexp, has_entries, has_item ) from nose.tools import eq_ from pprint import pformat import requests from ycmd import handlers from ycmd.tests.java import ( DEFAULT_PROJECT_DIR, IsolatedYcmd, PathToTestFile, SharedYcmd ) from ycmd.tests.test_utils import ( CombineRequest, ChunkMatcher, CompletionEntryMatcher, LocationMatcher, WithRetry ) from ycmd.utils import ReadFile from mock import patch def ProjectPath( *args ): return PathToTestFile( DEFAULT_PROJECT_DIR, 'src', 'com', 'test', *args ) @WithRetry def RunTest( app, test ): """ Method to run a simple completion test and verify the result test is a dictionary containing: 'request': kwargs for BuildRequest 'expect': { 'response': server response code (e.g. httplib.OK) 'data': matcher for the server response json } """ contents = ReadFile( test[ 'request' ][ 'filepath' ] ) app.post_json( '/event_notification', CombineRequest( test[ 'request' ], { 'event_name': 'FileReadyToParse', 'contents': contents, } ), expect_errors = True ) # We ignore errors here and we check the response code ourself. # This is to allow testing of requests returning errors. response = app.post_json( '/completions', CombineRequest( test[ 'request' ], { 'contents': contents } ), expect_errors = True ) print( 'completer response: {0}'.format( pformat( response.json ) ) ) eq_( response.status_code, test[ 'expect' ][ 'response' ] ) assert_that( response.json, test[ 'expect' ][ 'data' ] ) PUBLIC_OBJECT_METHODS = [ CompletionEntryMatcher( 'equals', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'getClass', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'hashCode', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'notify', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'notifyAll', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'toString', 'Object', { 'kind': 'Method' } ), CompletionEntryMatcher( 'wait', 'Object', { 'menu_text': matches_regexp( 'wait\\(long .*, int .*\\) : void' ), 'kind': 'Method', } ), CompletionEntryMatcher( 'wait', 'Object', { 'menu_text': matches_regexp( 'wait\\(long .*\\) : void' ), 'kind': 'Method', } ), CompletionEntryMatcher( 'wait', 'Object', { 'menu_text': 'wait() : void', 'kind': 'Method', } ), ] def WithObjectMethods( *args ): return list( PUBLIC_OBJECT_METHODS ) + list( args ) @SharedYcmd def GetCompletions_NoQuery_test( app ): RunTest( app, { 'description': 'semantic completion works for builtin types (no query)', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestFactory.java' ), 'line_num' : 27, 'column_num': 12, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains_inanyorder( *WithObjectMethods( CompletionEntryMatcher( 'test', 'TestFactory.Bar', { 'kind': 'Field' } ), CompletionEntryMatcher( 'testString', 'TestFactory.Bar', { 'kind': 'Field' } ) ) ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_WithQuery_test( app ): RunTest( app, { 'description': 'semantic completion works for builtin types (with query)', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestFactory.java' ), 'line_num' : 27, 'column_num': 15, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains_inanyorder( CompletionEntryMatcher( 'test', 'TestFactory.Bar', { 'kind': 'Field' } ), CompletionEntryMatcher( 'testString', 'TestFactory.Bar', { 'kind': 'Field' } ) ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_DetailFromCache_test( app ): for i in range( 0, 2 ): RunTest( app, { 'description': 'completion works when the elements come from the cache', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestLauncher.java' ), 'line_num' : 32, 'column_num': 15, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 11, 'completions': has_item( CompletionEntryMatcher( 'doSomethingVaguelyUseful', 'AbstractTestWidget', { 'kind': 'Method', 'menu_text': 'doSomethingVaguelyUseful() : void', } ) ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_Package_test( app ): RunTest( app, { 'description': 'completion works for package statements', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'wobble', 'Wibble.java' ), 'line_num' : 1, 'column_num': 18, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 9, 'completions': contains( CompletionEntryMatcher( 'com.test.wobble', None, { 'kind': 'Module' } ), ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_Import_Class_test( app ): RunTest( app, { 'description': 'completion works for import statements with a single class', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestLauncher.java' ), 'line_num' : 3, 'column_num': 34, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 34, 'completions': contains( CompletionEntryMatcher( 'Tset;', None, { 'menu_text': 'Tset - com.youcompleteme.testing', 'kind': 'Class', } ) ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_Import_Classes_test( app ): filepath = ProjectPath( 'TestLauncher.java' ) RunTest( app, { 'description': 'completion works for imports with multiple classes', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 4, 'column_num': 52, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 52, 'completions': contains( CompletionEntryMatcher( 'A;', None, { 'menu_text': 'A - com.test.wobble', 'kind': 'Class', } ), CompletionEntryMatcher( 'A_Very_Long_Class_Here;', None, { 'menu_text': 'A_Very_Long_Class_Here - com.test.wobble', 'kind': 'Class', } ), CompletionEntryMatcher( 'Waggle;', None, { 'menu_text': 'Waggle - com.test.wobble', 'kind': 'Interface', } ), CompletionEntryMatcher( 'Wibble;', None, { 'menu_text': 'Wibble - com.test.wobble', 'kind': 'Enum', } ), ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_Import_ModuleAndClass_test( app ): filepath = ProjectPath( 'TestLauncher.java' ) RunTest( app, { 'description': 'completion works for imports of classes and modules', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 3, 'column_num': 26, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 26, 'completions': contains( CompletionEntryMatcher( 'testing.*;', None, { 'menu_text': 'com.youcompleteme.testing', 'kind': 'Module', } ), CompletionEntryMatcher( 'Test;', None, { 'menu_text': 'Test - com.youcompleteme', 'kind': 'Class', } ), ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_WithFixIt_test( app ): filepath = ProjectPath( 'TestFactory.java' ) RunTest( app, { 'description': 'semantic completion with when additional textEdit', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 19, 'column_num': 25, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 22, 'completions': contains_inanyorder( CompletionEntryMatcher( 'CUTHBERT', 'com.test.wobble.Wibble', { 'kind': 'EnumMember', 'extra_data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'Wibble', LocationMatcher( filepath, 19, 15 ), LocationMatcher( filepath, 19, 21 ) ), # OK, so it inserts the import ChunkMatcher( '\n\nimport com.test.wobble.Wibble;\n\n', LocationMatcher( filepath, 1, 18 ), LocationMatcher( filepath, 3, 1 ) ), ), } ) ), } ), } ), ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_RejectMultiLineInsertion_test( app ): filepath = ProjectPath( 'TestLauncher.java' ) RunTest( app, { 'description': 'completion item discarded when not valid', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 28, 'column_num' : 16, 'force_semantic': True }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 16, 'completions': contains( CompletionEntryMatcher( 'TestLauncher', 'com.test.TestLauncher', { 'kind': 'Constructor' } ) # Note: There would be a suggestion here for the _real_ thing we want, # which is a TestLauncher.Launchable, but this would generate the code # for an anonymous inner class via a completion TextEdit (not # AdditionalTextEdit) which we don't support. ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_UnicodeIdentifier_test( app ): filepath = PathToTestFile( DEFAULT_PROJECT_DIR, 'src', 'com', 'youcompleteme', 'Test.java' ) RunTest( app, { 'description': 'Completion works for unicode identifier', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 16, 'column_num' : 35, 'force_semantic': True }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 35, 'completions': contains_inanyorder( *WithObjectMethods( CompletionEntryMatcher( 'a_test', 'Test.TéstClass', { 'kind': 'Field', 'detailed_info': 'a_test : int\n\n', } ), CompletionEntryMatcher( 'åtest', 'Test.TéstClass', { 'kind': 'Field', 'detailed_info': 'åtest : boolean\n\n', } ), CompletionEntryMatcher( 'testywesty', 'Test.TéstClass', { 'kind': 'Field', } ), ) ), 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_ResolveFailed_test( app ): filepath = PathToTestFile( DEFAULT_PROJECT_DIR, 'src', 'com', 'youcompleteme', 'Test.java' ) from ycmd.completers.language_server import language_server_protocol as lsapi def BrokenResolveCompletion( request_id, completion ): return lsapi.BuildRequest( request_id, 'completionItem/FAIL', completion ) with patch( 'ycmd.completers.language_server.language_server_protocol.' 'ResolveCompletion', side_effect = BrokenResolveCompletion ): RunTest( app, { 'description': 'Completion works for unicode identifier', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 16, 'column_num' : 35, 'force_semantic': True }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 35, 'completions': contains_inanyorder( *WithObjectMethods( CompletionEntryMatcher( 'a_test', 'Test.TéstClass', { 'kind': 'Field', 'detailed_info': 'a_test : int\n\n', } ), CompletionEntryMatcher( 'åtest', 'Test.TéstClass', { 'kind': 'Field', 'detailed_info': 'åtest : boolean\n\n', } ), CompletionEntryMatcher( 'testywesty', 'Test.TéstClass', { 'kind': 'Field', } ), ) ), 'errors': empty(), } ) }, } ) @IsolatedYcmd def GetCompletions_ServerNotInitialized_test( app ): filepath = PathToTestFile( 'simple_eclipse_project', 'src', 'com', 'test', 'AbstractTestWidget.java' ) completer = handlers._server_state.GetFiletypeCompleter( [ 'java' ] ) def MockHandleInitializeInPollThread( self, response ): pass with patch.object( completer, '_HandleInitializeInPollThread', MockHandleInitializeInPollThread ): RunTest( app, { 'description': 'Completion works for unicode identifier', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 16, 'column_num' : 35, 'force_semantic': True }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'errors': empty(), 'completions': empty(), 'completion_start_column': 6 } ), } } ) @SharedYcmd def GetCompletions_MoreThan100FilteredResolve_test( app ): RunTest( app, { 'description': 'More that 100 match, but filtered set is fewer as this ' 'depends on max_num_candidates', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestLauncher.java' ), 'line_num' : 4, 'column_num': 15, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': has_item( CompletionEntryMatcher( 'com.youcompleteme.*;', None, { 'kind': 'Module', 'detailed_info': 'com.youcompleteme\n\n', } ), ), 'completion_start_column': 8, 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_MoreThan100ForceSemantic_test( app ): RunTest( app, { 'description': 'When forcing we pass the query, which reduces candidates', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestLauncher.java' ), 'line_num' : 4, 'column_num': 15, 'force_semantic': True }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'com.youcompleteme.*;', None, { 'kind': 'Module', 'detailed_info': 'com.youcompleteme\n\n', } ), CompletionEntryMatcher( 'com.youcompleteme.testing.*;', None, { 'kind': 'Module', 'detailed_info': 'com.youcompleteme.testing\n\n', } ), ), 'completion_start_column': 8, 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_ForceAtTopLevel_NoImport_test( app ): RunTest( app, { 'description': 'When forcing semantic completion, pass the query to server', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestWidgetImpl.java' ), 'line_num' : 30, 'column_num': 20, 'force_semantic': True, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'TestFactory', None, { 'kind': 'Class', 'menu_text': 'TestFactory - com.test', } ), ), 'completion_start_column': 12, 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_NoForceAtTopLevel_NoImport_test( app ): RunTest( app, { 'description': 'When not forcing semantic completion, use no context', 'request': { 'filetype' : 'java', 'filepath' : ProjectPath( 'TestWidgetImpl.java' ), 'line_num' : 30, 'column_num': 20, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'TestFactory', '[ID]', {} ), ), 'completion_start_column': 12, 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_ForceAtTopLevel_WithImport_test( app ): filepath = ProjectPath( 'TestWidgetImpl.java' ) RunTest( app, { 'description': 'Top level completions have import FixIts', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 34, 'column_num': 16, 'force_semantic': True, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': has_item( CompletionEntryMatcher( 'InputStreamReader', None, { 'kind': 'Class', 'menu_text': 'InputStreamReader - java.io', 'extra_data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( '\n\nimport java.io.InputStreamReader;\n\n', LocationMatcher( filepath, 1, 18 ), LocationMatcher( filepath, 3, 1 ) ), ), } ) ), } ), } ), ), 'completion_start_column': 12, 'errors': empty(), } ) }, } ) @SharedYcmd def GetCompletions_UseServerTriggers_test( app ): filepath = ProjectPath( 'TestWidgetImpl.java' ) RunTest( app, { 'description': 'We use the semantic triggers from the server (@ here)', 'request': { 'filetype' : 'java', 'filepath' : filepath, 'line_num' : 24, 'column_num': 7, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completion_start_column': 4, 'completions': has_item( CompletionEntryMatcher( 'Override', None, { 'kind': 'Interface', 'menu_text': 'Override - java.lang', } ) ) } ) } } )
version_string = "0.1dev"
import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multidatabase_project.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
__author__ = 'onelife' __license__ = "GPLv3" __version__ = '1.30' __setup = False __depth = 1 import sys from os import path while True: try: stack = sys._getframe(__depth) __depth += 1 except: break __setup = path.basename(stack.f_globals.get('__file__')) == 'setup.py' if not __setup: from .pyusblamp import * from .imap2usblamp import *
""" RASP: Rapid Amplicon Sequence Pipeline Copyright (C) 2016, Jakob Willforss and Björn Canbäck All rights reserved. This file is part of RASP. RASP is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RASP is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with RASP. If not, <http://www.gnu.org/licenses/>. """ import argparse import re program_description = """ Filter OTUs based on their taxa level Uses table containing information and OTUs and their taxa depth Filters away OTUs in OTU file that hasn't been determined on sufficient depth Can also be used to filter abundancy matrix and rdp-fixrank-output """ FILTER_DEPTH_THRESHOLD = 3 OTU_PATTERN = re.compile(r'OTU\d+') def main(): args = parse_arguments() filehandles = open_filehandles(args) # Mandatory filehandles. Will never return None. otu_fh = filehandles[0] tax_table_fh = filehandles[1] filtered_otu_fh = filehandles[2] # Optional, None is returned if option isn't specified. abund_matrix_fh = filehandles[3] filtered_abund_matrix_fh = filehandles[4] fixrank_fh = filehandles[5] # filtered_fixrank_fh = filehandles[6] tax_depth_dict = get_tax_depth_dict(tax_table_fh) tax_table_fh.close() # Produces filtered otu file remaining_labels = output_filtered_otus(otu_fh, filtered_otu_fh, tax_depth_dict, FILTER_DEPTH_THRESHOLD) # Produces filtered abundancy-matrix file if args.abund_matrix: output_linefiltered_file(abund_matrix_fh, filtered_abund_matrix_fh, remaining_labels) # Produces filtered fix-rank file if args.fixed_rank: output_linefiltered_file(fixrank_fh, tax_depth_dict, remaining_labels) for fh in filehandles: if fh is not None: fh.close() def parse_arguments(): """Parses command line arguments""" parser = argparse.ArgumentParser(description=program_description) parser.add_argument('-i', '--input', required=True) parser.add_argument('-t', '--taxa_table', required=True) # Optional files to parse parser.add_argument('-a', '--abund_matrix') parser.add_argument('-f', '--fixed_rank') parser.add_argument('-d', '--output_dir', required=True) parser.add_argument('-s', '--suffix', default='.taxfiltered') args = parser.parse_args() return args def open_filehandles(args): """ Opens and returns file handles Mandatory (1) OTUs (input) (2) OTU/taxa-table (input) (3) Filtered OTUs (output) Optional (returns None-handle if option not specified) (4) Abundancy matrix (input) (5) Filtered abundancy matrix (output) (6) Fixrank file (input) (7) Filtered fixrank file (output) """ out_format_string = args.output_dir + '/{}' + args.suffix otu_name = args.input.split('/')[-1] otu_fh = open(args.input, 'r') tax_table_fh = open(args.taxa_table, 'r') filtered_otu_fh = open(out_format_string.format(otu_name), 'w') abund_matrix_fh = None filtered_abund_matrix_fh = None if args.abund_matrix: abund_matrix_name = args.abund_matrix.split('/')[-1] abund_matrix_fh = open(args.abund_matrix, 'r') filtered_abund_matrix_fh = open(out_format_string.format(abund_matrix_name), 'w') fixrank_fh = None filtered_fixrank_fh = None if args.fixed_rank: fixrank_name = args.fixed_rank.split('/')[-1] fixrank_fh = open(args.fixed_rank, 'r') filtered_fixrank_fh = open(out_format_string.format(fixrank_name), 'w') fhs = [otu_fh, # (1) tax_table_fh, # (2) filtered_otu_fh, # (3) abund_matrix_fh, # (4) filtered_abund_matrix_fh, # (5) fixrank_fh, # (6) filtered_fixrank_fh] # (7) return fhs def output_linefiltered_file(input_fh, output_fh, remaining_labels): """ Output filtered files with lines starting with 'OTU\d+' """ for line in input_fh: line = line.rstrip() re_match = re.match(OTU_PATTERN, line) if re_match is None: raise Exception('Unexpected input, each line should start with "OTU\d+"') otu = re_match.group(0) if otu in remaining_labels: output_fh.write('{}\n'.format(line)) def output_filtered_otus(input_otu_fh, output_otu_fh, tax_depth_dict, tax_depth_threshold): """ Output those OTUs that are identified at a taxa depth of filter_depth or deeper """ remaining_labels = [] output_flag = False for line in input_otu_fh: line = line.rstrip() if line.startswith('>'): label = line.replace('>', '') output_flag = evaluate_otu(label, tax_depth_dict, tax_depth_threshold) if output_flag: remaining_labels.append(label) if output_flag: output_otu_fh.write('{}\n'.format(line)) return remaining_labels def evaluate_otu(label, tax_depth_dict, tax_depth_threshold): """ Check OTU header in OTU file in the tax_depth_dict to evaluate how deep the RDP taxa identification managed to go Returns a bool showing of the identification went deep enough """ otu_taxa_depth = tax_depth_dict.get(label) if otu_taxa_depth is not None: if otu_taxa_depth >= tax_depth_threshold: output_flag = True else: output_flag = False return output_flag else: print('WARNING - The key {} was not present in dictionary'.format(label)) def get_tax_depth_dict(tax_table_fh): """Retrieve dictionary with taxa depth from otu taxa table""" tax_depth_dict = {} for line in tax_table_fh: line_args = line.split('\t') otu = line_args[0] depth = line_args[2] tax_depth_dict[otu] = int(depth) return tax_depth_dict if __name__ == '__main__': main()
__author__ = 'Andrea "TexZK" Zoppi' __all__ = ('audio', 'compression', 'game', 'graphics', 'persistence', 'utils')
import datetime import logging import pytz def setup_logging(log_file_path=None): logging.basicConfig( filename=log_file_path, # stream=sys.stdout, # format='[%(asctime)s][%(name)s][PID: %(process)d][%(levelname)6s]: %(message)s', format='[%(asctime)s][%(threadName)10s][%(levelname)6s][%(name)s]: %(message)s', level=logging.DEBUG ) def utc_now(): """ :return datetime.datetime: Current utc datetime with specified timezone """ u = datetime.datetime.utcnow() u = u.replace(tzinfo=pytz.utc) return u
"""Factory functions for creating proximal operators. Functions with ``convex_conj`` mean the proximal of the convex conjugate and are provided for convenience. For more details see :ref:`proximal_operators` and references therein. For more details on proximal operators including how to evaluate the proximal operator of a variety of functions see [PB2014]. References ---------- [PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1 (2014), pp 127-239. """ from __future__ import print_function, division, absolute_import import numpy as np from odl.operator import (Operator, IdentityOperator, ScalingOperator, ConstantOperator, DiagonalOperator, PointwiseNorm, MultiplyOperator) from odl.space import ProductSpace from odl.set.space import LinearSpaceElement __all__ = ('combine_proximals', 'proximal_convex_conj', 'proximal_translation', 'proximal_arg_scaling', 'proximal_quadratic_perturbation', 'proximal_composition', 'proximal_const_func', 'proximal_box_constraint', 'proximal_nonnegativity', 'proximal_l1', 'proximal_convex_conj_l1', 'proximal_l2', 'proximal_convex_conj_l2', 'proximal_l2_squared', 'proximal_convex_conj_l2_squared', 'proximal_l1_l2', 'proximal_convex_conj_l1_l2', 'proximal_convex_conj_kl', 'proximal_convex_conj_kl_cross_entropy', 'proximal_huber') def combine_proximals(*factory_list): """Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\\sigma F}(x), \mathrm{prox}_{\\sigma G}(y)). """ def diag_op_factory(sigma): """Diagonal matrix of operators. Parameters ---------- sigma : positive float or sequence of positive floats Step size parameter(s), if a sequence, the length must match the length of the ``factory_list``. Returns ------- diag_op : `DiagonalOperator` """ if np.isscalar(sigma): sigma = [sigma] * len(factory_list) return DiagonalOperator( *[factory(sigmai) for sigmai, factory in zip(sigma, factory_list)]) return diag_op_factory def proximal_convex_conj(prox_factory): """Calculate the proximal of the dual using Moreau decomposition. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The Moreau identity states that for any convex function :math:`F` with convex conjugate :math:`F^*`, the proximals satisfy .. math:: \mathrm{prox}_{\\sigma F^*}(x) +\\sigma \, \mathrm{prox}_{F / \\sigma}(x / \\sigma) = x where :math:`\\sigma` is a scalar step size. Using this, the proximal of the convex conjugate is given by .. math:: \mathrm{prox}_{\\sigma F^*}(x) = x - \\sigma \, \mathrm{prox}_{F / \\sigma}(x / \\sigma) Note that since :math:`(F^*)^* = F`, this can be used to get the proximal of the original function from the proximal of the convex conjugate. For reference on the Moreau identity, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def convex_conj_prox_factory(sigma): """Create proximal for the dual with a given sigma. Parameters ---------- sigma : positive float or array-like Step size parameter. Can be a pointwise positive space element or a sequence of positive floats if `prox_factory` supports that. Returns ------- proximal : `Operator` The proximal operator of ``s * F^*`` where ``s`` is the step size """ # Get the underlying space. At the same time, check if the given # prox_factory accepts stepsize objects of the type given by sigma. space = prox_factory(sigma).domain mult_inner = MultiplyOperator(1.0 / sigma, domain=space, range=space) mult_outer = MultiplyOperator(sigma, domain=space, range=space) result = (IdentityOperator(space) - mult_outer * prox_factory(1.0 / sigma) * mult_inner) return result return convex_conj_prox_factory def proximal_translation(prox_factory, y): """Calculate the proximal of the translated function F(x - y). Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F``. y : Element in domain of ``F``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, this is calculated according to the rule .. math:: \mathrm{prox}_{\\sigma F( \cdot - y)}(x) = y + \mathrm{prox}_{\\sigma F}(x - y) where :math:`y` is the translation, and :math:`\\sigma` is the step size. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def translation_prox_factory(sigma): """Create proximal for the translation with a given sigma. Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``s * F( . - y)`` where ``s`` is the step size """ return (ConstantOperator(y) + prox_factory(sigma) * (IdentityOperator(y.space) - ConstantOperator(y))) return translation_prox_factory def proximal_arg_scaling(prox_factory, scaling): """Calculate the proximal of function F(x * scaling). Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` scaling : float or sequence of floats or space element Scaling parameter. The permissible types depent on the stepsizes accepted by prox_factory. It may not contain any nonzero imaginary parts. If it is a scalar, it may be zero, in which case the resulting proxmial operator is the identity. If not a scalar, it may not contain any zero components. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, and scaling factor :math:`\\alpha` this is calculated according to the rule .. math:: \mathrm{prox}_{\\sigma F(\\alpha \, \cdot)}(x) = \\frac{1}{\\alpha} \mathrm{prox}_{\\sigma \\alpha^2 F(\cdot) }(\\alpha x) where :math:`\\sigma` is the step size. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ # To begin, we could check for two things: # * Currently, we do not support complex scaling. We could therefore catch # nonempty imaginary parts. # * If some components of scaling are zero, then the following routine will # crash with a division-by-zero error. The correct solution would be to # just keep these components and do the following computations only for # the others. # Since these checks are computationally expensive, we do not execute them # unconditionally, but only if the scaling factor is a scalar: if np.isscalar(scaling): if scaling == 0: return proximal_const_func(prox_factory(1.0).domain) elif scaling.imag != 0: raise ValueError("Complex scaling not supported.") else: scaling = float(scaling) else: scaling = np.asarray(scaling) def arg_scaling_prox_factory(sigma): """Create proximal for the translation with a given sigma. Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``sigma * F( . * a)`` where ``sigma`` is the step size """ scaling_square = scaling * scaling prox = prox_factory(sigma * scaling_square) space = prox.domain mult_inner = MultiplyOperator(scaling, domain=space, range=space) mult_outer = MultiplyOperator(1 / scaling, domain=space, range=space) return mult_outer * prox * mult_inner return arg_scaling_prox_factory def proximal_quadratic_perturbation(prox_factory, a, u=None): """Calculate the proximal of function F(x) + a * \|x\|^2 + <u,x>. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` a : non-negative float Scaling of the quadratic term u : Element in domain of F, optional Defines the linear functional. For ``None``, the zero element is taken. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, this is calculated according to the rule .. math:: \mathrm{prox}_{\\sigma \left(F( \cdot ) + a \| \cdot \|^2 + <u, \cdot >\\right)}(x) = c \; \mathrm{prox}_{\\sigma F( \cdot \, c)}((x - \\sigma u) c) where :math:`c` is the constant .. math:: c = \\frac{1}{\\sqrt{2 \\sigma a + 1}}, :math:`a` is the scaling parameter belonging to the quadratic term, :math:`u` is the space element defining the linear functional, and :math:`\\sigma` is the step size. For reference on the identity used, see [CP2011c]. Note that this identity is not the exact one given in the reference, but was recalculated for arbitrary step lengths. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ a = float(a) if a < 0: raise ValueError('scaling parameter muts be non-negative, got {}' ''.format(a)) if u is not None and not isinstance(u, LinearSpaceElement): raise TypeError('`u` must be `None` or a `LinearSpaceElement` ' 'instance, got {!r}.'.format(u)) def quadratic_perturbation_prox_factory(sigma): """Create proximal for the quadratic perturbation with a given sigma. Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``sigma * (F(x) + a * \|x\|^2 + <u,x>)``, where ``sigma`` is the step size """ if np.isscalar(sigma): sigma = float(sigma) else: sigma = np.asarray(sigma) const = 1.0 / np.sqrt(sigma * 2.0 * a + 1) prox = proximal_arg_scaling(prox_factory, const)(sigma) if u is not None: return (MultiplyOperator(const, domain=u.space, range=u.space) * prox * (MultiplyOperator(const, domain=u.space, range=u.space) - sigma * const * u)) else: space = prox.domain return (MultiplyOperator(const, domain=space, range=space) * prox * MultiplyOperator(const, domain=space, range=space)) return quadratic_perturbation_prox_factory def proximal_composition(proximal, operator, mu): """Proximal operator factory of functional composed with unitary operator. For a functional ``F`` and a linear unitary `Operator` ``L`` this is the factory for the proximal operator of ``F * L``. Parameters ---------- proximal : callable A factory function that, when called with a step size returns the proximal operator of ``F`` operator : `Operator` The operator to compose the functional with mu : ``operator.field`` element Scalar such that ``(operator.adjoint * operator)(x) = mu * x`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a linear operator :math:`L` with the property that for a scalar :math:`\\mu` .. math:: L^*(L(x)) = \\mu * x and a convex function :math:`F`, the following identity holds .. math:: \mathrm{prox}_{\\sigma F \circ L}(x) = x + \\frac{1}{\\mu} L^* \left( \mathrm{prox}_{\\mu \\sigma F}(Lx) - Lx \\right) This factory function implements this functionality. There is no simple formula for more general operators. The function cannot verify that the operator is unitary, the user needs to verify this. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def proximal_composition_factory(sigma): """Create proximal for the dual with a given sigma Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``prox[sigma * F * L](x)`` """ Id = IdentityOperator(operator.domain) Ir = IdentityOperator(operator.range) prox_muf = proximal(mu * sigma) return (Id + (1.0 / mu) * operator.adjoint * ((prox_muf - Ir) * operator)) return proximal_composition_factory def proximal_const_func(space): """Proximal operator factory of the constant functional. Function to initialize the proximal operator of the constant functional defined on ``space``. Parameters ---------- space : `LinearSpace` Domain of the functional G=constant Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The constant functional :math:`G` is defind as :math:`G(x) = constant` for all values of :math:`x`. The proximal operator of this functional is the identity operator .. math:: \mathrm{prox}_{\\sigma G}(x) = x Note that it is independent of :math:`\\sigma`. """ def identity_factory(sigma): """Return an instance of the proximal operator. Parameters ---------- sigma : positive float Unused step size parameter. Introduced to provide a unified interface. Returns ------- id : `IdentityOperator` The proximal operator instance of G = 0 which is the identity operator """ return IdentityOperator(space) return identity_factory def proximal_box_constraint(space, lower=None, upper=None): """Proximal operator factory for ``G(x) = ind(a <= x <= b)``. If P is the set of elements with a <= x <= b, the indicator function of which is defined as:: ind(a <= x <= b) = {0 if x in P, infinity if x is not in P} with x being an element in ``space``. Parameters ---------- space : `LinearSpace` Domain of the functional G(x) lower : ``space.field`` element or ``space`` `element-like`, optional The lower bound. Default: ``None``, interpreted as -infinity upper : ``space.field`` element or ``space`` `element-like`, optional The upper bound. Default: ``None``, interpreted as +infinity Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- If :math:`P` is an interval :math:`[a,b]`, the indicator function is defined as .. math:: I_{P}(x) = \\begin{cases} 0 & \\text{if } x \\in P, \\\\ \\infty & \\text{if } x \\not \\in P \\end{cases} For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma I_{P}` is given by the projection onto the interval .. math:: \mathrm{prox}_{\\sigma I_{P}}(x) = \\begin{cases} a & \\text{if } x < a, \\\\ x & \\text{if } x \\in [a,b], \\\\ b & \\text{if } x > b. \\end{cases} The proximal operator is independent of :math:`\\sigma` and invariant under a positive rescaling of :math:`I_{P}(x)`, since that leaves the indicator function unchanged. For spaces of the form :math:`R^n`, the definition extends naturally in each component. See Also -------- proximal_nonnegativity : Special case with ``lower=0, upper=infty`` """ # Convert element-likes if needed, also does some space checking if lower is not None and lower not in space and lower not in space.field: lower = space.element(lower) if upper is not None and upper not in space and upper not in space.field: upper = space.element(upper) if lower in space.field and upper in space.field: if lower > upper: raise ValueError('invalid values, `lower` ({}) > `upper` ({})' ''.format(lower, upper)) class ProxOpBoxConstraint(Operator): """Proximal operator for G(x) = ind(a <= x <= b).""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter, not used. """ super(ProxOpBoxConstraint, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``.""" if lower is not None and upper is None: x.ufuncs.maximum(lower, out=out) elif lower is None and upper is not None: x.ufuncs.minimum(upper, out=out) elif lower is not None and upper is not None: x.ufuncs.maximum(lower, out=out) out.ufuncs.minimum(upper, out=out) else: out.assign(x) return ProxOpBoxConstraint def proximal_nonnegativity(space): """Function to create the proximal operator of ``G(x) = ind(x >= 0)``. Function for the proximal operator of the functional ``G(x)=ind(x >= 0)`` to be initialized. Parameters ---------- space : `LinearSpace` Domain of the functional G(x) Returns ------- prox_factory : function Factory for the proximal operator to be initialized See Also -------- proximal_box_constraint """ return proximal_box_constraint(space, lower=0) def proximal_convex_conj_l2(space, lam=1, g=None): """Proximal operator factory of the convex conj of the l2-norm/distance. Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Most problems are forumlated for the squared norm/distance, in that case use the `proximal_convex_conj_l2_squared` instead. The :math:`L_2`-norm/distance :math:`F` is given by is given by .. math:: F(x) = \\lambda \|x - g\|_2 The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \\begin{cases} 0 & \\text{if } \|y-g\|_2 \leq \\lambda, \\\\ \\infty & \\text{else.} \\end{cases} For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F^*` is given by the projection onto the set of :math:`y` satisfying :math:`\|y-g\|_2 \leq \\lambda`, i.e., by .. math:: \mathrm{prox}_{\\sigma F^*}(y) = \\begin{cases} \\lambda \\frac{y - g}{\|y - g\|} & \\text{if } \|y-g\|_2 > \\lambda, \\\\ y & \\text{if } \|y-g\|_2 \leq \\lambda \\end{cases} Note that the expression is independent of :math:`\sigma`. See Also -------- proximal_l2 : proximal without convex conjugate proximal_convex_conj_l2_squared : proximal for squared norm/distance """ prox_l2 = proximal_l2(space, lam=lam, g=g) return proximal_convex_conj(prox_l2) def proximal_l2(space, lam=1, g=None): """Proximal operator factory of the l2-norm/distance. Function for the proximal operator of the functional ``F`` where ``F`` is the l2-norm (or distance to g, if given):: ``F(x) = lam ||x - g||_2`` Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- Most problems are forumlated for the squared norm/distance, in that case use `proximal_l2_squared` instead. The :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \\lambda \|x - g\|_2 For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F` is given by .. math:: \mathrm{prox}_{\\sigma F}(y) = \\begin{cases} \\frac{1 - c}{\|y-g\|} \\cdot y + c \cdot g & \\text{if } c < g, \\\\ g & \\text{else}, \\end{cases} where :math:`c = \\sigma \\frac{\\lambda}{\|y - g\|_2}`. See Also -------- proximal_l2_squared : proximal for squared norm/distance proximal_convex_conj_l2 : proximal for convex conjugate """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalL2(Operator): """Proximal operator of the l2-norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter """ super(ProximalL2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Apply the operator to ``x`` and stores the result in ``out``.""" dtype = getattr(self.domain, 'dtype', float) eps = np.finfo(dtype).resolution * 10 if g is None: x_norm = x.norm() * (1 + eps) if x_norm > 0: step = self.sigma * lam / x_norm else: step = np.infty if step < 1.0: out.lincomb(1.0 - step, x) else: out.set_zero() else: x_norm = (x - g).norm() * (1 + eps) if x_norm > 0: step = self.sigma * lam / x_norm else: step = np.infty if step < 1.0: out.lincomb(1.0 - step, x, step, g) else: out.assign(g) return ProximalL2 def proximal_convex_conj_l2_squared(space, lam=1, g=None): """Proximal operator factory of the convex conj of the squared l2-norm/dist Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \\lambda \|x - g\|_2^2. The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \\frac{1}{4\\lambda} \left( \| y\|_2^2 + \langle y, g \\rangle \\right) For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F^*` is given by .. math:: \mathrm{prox}_{\\sigma F^*}(y) = \\frac{y - \\sigma g}{1 + \\sigma/(2 \\lambda)} See Also -------- proximal_convex_conj_l2 : proximal without square proximal_l2_squared : proximal without convex conjugate """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL2Squared(Operator): """Proximal operator of the convex conj of the squared l2-norm/dist.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalConvexConjL2Squared, self).__init__( domain=space, range=space, linear=g is None) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``""" # (x - sig*g) / (1 + sig/(2 lam)) sig = self.sigma if np.isscalar(sig): if g is None: out.lincomb(1.0 / (1 + 0.5 * sig / lam), x) else: out.lincomb(1.0 / (1 + 0.5 * sig / lam), x, -sig / (1 + 0.5 * sig / lam), g) elif sig in space: if g is None: x.divide(1 + 0.5 / lam * sig, out=out) else: sig.multiply(g, out=out) out.lincomb(1.0, x, -1.0, out=out) out.divide(1 + 0.5 / lam * sig, out=out) else: raise RuntimeError('Error in ProximalConvexConjL2Squared: sig ' 'is neither a scalar nor a space element.') return ProximalConvexConjL2Squared def proximal_l2_squared(space, lam=1, g=None): """Proximal operator factory of the squared l2-norm/distance. Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \\lambda \|x - g\|_2^2. For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F` is given by .. math:: \mathrm{prox}_{\\sigma F}(x) = \\frac{x + 2 \\sigma \\lambda g} {1 + 2 \\sigma \\lambda}. See Also -------- proximal_l2 : proximal without square proximal_convex_conj_l2_squared : proximal for convex conjugate """ class ProximalL2Squared(Operator): """Proximal operator of the squared l2-norm/dist.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalL2Squared, self).__init__( domain=space, range=space, linear=g is None) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``""" # (x + 2*sig*lam*g) / (1 + 2*sig*lam)) sig = self.sigma if np.isscalar(sig): if g is None: out.lincomb(1.0 / (1 + 2 * sig * lam), x) else: out.lincomb(1.0 / (1 + 2 * sig * lam), x, 2 * sig * lam / (1 + 2 * sig * lam), g) else: # sig in space if g is None: x.divide(1.0 + 2.0 * sig * lam, out=out) else: sig.multiply(2.0 * lam * g, out=out) out.lincomb(1.0, x, 1.0, out=out) out.divide(1.0 + 2 * sig * lam, out=out) return ProximalL2Squared def proximal_convex_conj_l1(space, lam=1, g=None): """Proximal operator factory of the L1 norm/distance convex conjugate. Implements the proximal operator of the convex conjugate of the functional :: F(x) = lam ||x - g||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Parameters ---------- space : `LinearSpace` or `ProductSpace` of `LinearSpace` spaces Domain of the functional F lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. Notes ----- The convex conjugate :math:`F^*` of the functional .. math:: F(x) = \\lambda \|x - g\|_1. is in the case of scalar-valued functions given by .. math:: F^*(y) = \iota_{B_\infty} \\big( \\lambda^{-1}\, y \\big) + \\left\\langle \\lambda^{-1}\, y,\: g \\right\\rangle, where :math:`\iota_{B_\infty}` is the indicator function of the unit ball with respect to :math:`\|\cdot\|_\infty`. For vector-valued functions, the convex conjugate is .. math:: F^*(y) = \sum_{k=1}^d F^*(y_k) due to separability of the (non-isotropic) 1-norm. For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F^*` is given by .. math:: \mathrm{prox}_{\\sigma F^*}(y) = \\frac{\\lambda (y - \\sigma g)}{ \\max(\\lambda, |y - \\sigma g|)} Here, all operations are to be read pointwise. For vector-valued :math:`x` and :math:`g`, the (non-isotropic) proximal operator is the component-wise scalar proximal: .. math:: \mathrm{prox}_{\\sigma F^*}(x) = \\left( \mathrm{prox}_{\\sigma F^*}(x_1), \dots, \mathrm{prox}_{\\sigma F^*}(x_d) \\right), where :math:`d` is the number of components of :math:`x`. See Also -------- proximal_convex_conj_l1_l2 : isotropic variant for vector-valued functions proximal_l1 : proximal without convex conjugate """ # Fix for rounding errors dtype = getattr(space, 'dtype', float) eps = np.finfo(dtype).resolution * 10 lam = float(lam * (1 - eps)) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL1(Operator): """Proximal operator of the L1 norm/distance convex conjugate.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalConvexConjL1, self).__init__( domain=space, range=space, linear=False) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # lam * (x - sig * g) / max(lam, |x - sig * g|) # diff = x - sig * g if g is not None: diff = self.domain.element() diff.lincomb(1, x, -self.sigma, g) else: if x is out: # Handle aliased `x` and `out` # This is necessary since we write to both `diff` and # `out`. diff = x.copy() else: diff = x # out = max( |x-sig*g|, lam ) / lam diff.ufuncs.absolute(out=out) out.ufuncs.maximum(lam, out=out) out /= lam # out = diff / ... diff.divide(out, out=out) return ProximalConvexConjL1 def proximal_convex_conj_l1_l2(space, lam=1, g=None): """Proximal operator factory of the L1-L2 norm/distance convex conjugate. Implements the proximal operator of the convex conjugate of the functional :: F(x) = lam || |x - g|_2 ||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Here, ``|.|_2`` is the pointwise Euclidean norm of a vector-valued function. Parameters ---------- space : `LinearSpace` or `ProductSpace` of `LinearSpace` spaces Domain of the functional F lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. Notes ----- The convex conjugate :math:`F^*` of the functional .. math:: F(x) = \\lambda \| |x - g|_2 \|_1. is given by .. math:: F^*(y) = \iota_{B_\infty} \\big( \\lambda^{-1}\, |y|_2 \\big) + \\left\\langle \\lambda^{-1}\, y,\: g \\right\\rangle, where :math:`\iota_{B_\infty}` is the indicator function of the unit ball with respect to :math:`\|\cdot\|_\infty`. For a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F^*` is given by .. math:: \mathrm{prox}_{\\sigma F^*}(y) = \\frac{\\lambda (y - \\sigma g)}{ \\max(\\lambda, |y - \\sigma g|_2)} Here, all operations are to be read pointwise. See Also -------- proximal_convex_conj_l1 : Scalar or non-isotropic vectorial variant """ # Fix for rounding errors dtype = getattr(space, 'dtype', float) eps = np.finfo(dtype).resolution * 10 lam = float(lam * (1 - eps)) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL1L2(Operator): """Proximal operator of the convex conj of the l1-norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter """ super(ProximalConvexConjL1L2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # lam * (x - sig * g) / max(lam, |x - sig * g|) # diff = x - sig * g if g is not None: diff = self.domain.element() diff.lincomb(1, x, -self.sigma, g) else: diff = x # denom = max( |x-sig*g|_2, lam ) / lam (|.|_2 pointwise) pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) denom.ufuncs.maximum(lam, out=denom) denom /= lam # Pointwise division for out_i, diff_i in zip(out, diff): diff_i.divide(denom, out=out_i) return ProximalConvexConjL1L2 def proximal_l1(space, lam=1, g=None): """Proximal operator factory of the L1 norm/distance. Implements the proximal operator of the functional :: F(x) = lam ||x - g||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Parameters ---------- space : `LinearSpace` or `ProductSpace` Domain of the functional. lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- For the functional .. math:: F(x) = \\lambda \|x - g\|_1, and a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F` is given as the "soft-shrinkage" operator .. math:: \mathrm{prox}_{\\sigma F}(x) = \\begin{cases} g, & \\text{where } |x - g| \\leq \sigma\\lambda, \\\\ x - \sigma\\lambda \mathrm{sign}(x - g), & \\text{elsewhere.} \\end{cases} Here, all operations are to be read pointwise. For vector-valued :math:`x` and :math:`g`, the (non-isotropic) proximal operator is the component-wise scalar proximal: .. math:: \mathrm{prox}_{\\sigma F}(x) = \\left( \mathrm{prox}_{\\sigma F}(x_1), \dots, \mathrm{prox}_{\\sigma F}(x_d) \\right), where :math:`d` is the number of components of :math:`x`. See Also -------- proximal_convex_conj_l1 : proximal for convex conjugate proximal_l1_l2 : isotropic variant of the group L1 norm proximal """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalL1(Operator): """Proximal operator of the L1 norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalL1, self).__init__( domain=space, range=space, linear=False) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # diff = x - g if g is not None: diff = x - g else: if x is out: # Handle aliased `x` and `out` (original `x` needed later) diff = x.copy() else: diff = x # We write the operator as # x - (x - g) / max(|x - g| / sig*lam, 1) denom = diff.ufuncs.absolute() denom /= self.sigma * lam denom.ufuncs.maximum(1, out=denom) # out = (x - g) / denom diff.ufuncs.divide(denom, out=out) # out = x - ... out.lincomb(1, x, -1, out) return ProximalL1 def proximal_l1_l2(space, lam=1, g=None): """Proximal operator factory of the group-L1-L2 norm/distance. Implements the proximal operator of the functional :: F(x) = lam || |x - g|_2 ||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Here, ``|.|_2`` is the pointwise Euclidean norm of a vector-valued function. Parameters ---------- space : `LinearSpace` or `ProductSpace` Domain of the functional. lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1-L2 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- For the functional .. math:: F(x) = \\lambda \| |x - g|_2 \|_1, and a step size :math:`\\sigma`, the proximal operator of :math:`\\sigma F` is given as the "soft-shrinkage" operator .. math:: \mathrm{prox}_{\\sigma F}(x) = \\begin{cases} g, & \\text{where } |x - g|_2 \\leq \sigma\\lambda, \\\\ x - \sigma\\lambda \\frac{x - g}{|x - g|_2}, & \\text{elsewhere.} \\end{cases} Here, all operations are to be read pointwise. See Also -------- proximal_l1 : Scalar or non-isotropic vectorial variant """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalL1L2(Operator): """Proximal operator of the group-L1-L2 norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter. """ super(ProximalL1L2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # diff = x - g if g is not None: diff = x - g else: if x is out: # Handle aliased `x` and `out` (original `x` needed later) diff = x.copy() else: diff = x # We write the operator as # x - (x - g) / max(|x - g|_2 / sig*lam, 1) pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) denom /= self.sigma * lam denom.ufuncs.maximum(1, out=denom) # out = (x - g) / denom for out_i, diff_i in zip(out, diff): diff_i.divide(denom, out=out_i) # out = x - ... out.lincomb(1, x, -1, out) return ProximalL1L2 def proximal_convex_conj_kl(space, lam=1, g=None): """Proximal operator factory of the convex conjugate of the KL divergence. Function returning the proximal operator of the convex conjugate of the functional F where F is the entropy-type Kullback-Leibler (KL) divergence:: F(x) = sum_i (x_i - g_i + g_i ln(g_i) - g_i ln(pos(x_i))) + ind_P(x) with ``x`` and ``g`` elements in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl_cross_entropy : proximal for releated functional Notes ----- The functional is given by the expression .. math:: F(x) = \\sum_i (x_i - g_i + g_i \\ln(g_i) - g_i \\ln(pos(x_i))) + I_{x \\geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \\sum_i (-g_i \\ln(\\text{pos}({1_X}_i - p_i))) + I_{1_X - p \geq 0}(p) where :math:`p` is the variable dual to :math:`x`, and :math:`1_X` is an element of the space :math:`X` with all components set to 1. The proximal operator of the convex conjugate of F is .. math:: \mathrm{prox}_{\\sigma (\\lambda F)^*}(x) = \\frac{\\lambda 1_X + x - \\sqrt{(x - \\lambda 1_X)^2 + 4 \\lambda \\sigma g}}{2} where :math:`\\sigma` is the step size-like parameter, and :math:`\\lambda` is the weighting in front of the function :math:`F`. KL based objectives are common in MLEM optimization problems and are often used when data noise governed by a multivariate Poisson probability distribution is significant. The intermediate image estimates can have negative values even though the converged solution will be non-negative. Non-negative intermediate image estimates can be enforced by adding an indicator function ind_P the primal objective. This functional :math:`F`, described above, is related to the Kullback-Leibler cross entropy functional. The KL cross entropy is the one described in `this Wikipedia article <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and the functional :math:`F` is obtained by switching place of the prior and the varialbe in the KL cross entropy functional. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKL(Operator): """Proximal operator of the convex conjugate of the KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ super(ProximalConvexConjKL, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # (x + lam - sqrt((x - lam)^2 + 4*lam*sig*g)) / 2 # out = (x - lam)^2 if x is out: # Handle aliased `x` and `out` (need original `x` later on) x = x.copy() else: out.assign(x) out -= lam out.ufuncs.square(out=out) # out = ... + 4*lam*sigma*g # If g is None, it is taken as the one element if g is None: out += 4.0 * lam * self.sigma else: out.lincomb(1, out, 4.0 * lam * self.sigma, g) # out = x - sqrt(...) + lam out.ufuncs.sqrt(out=out) out.lincomb(1, x, -1, out) out += lam # out = 1/2 * ... out /= 2 return ProximalConvexConjKL def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None): """Proximal factory of the convex conjugate of cross entropy KL divergence. Function returning the proximal factory of the convex conjugate of the functional F, where F is the cross entropy Kullback-Leibler (KL) divergence given by:: F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x) with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl : proximal for related functional Notes ----- The functional is given by the expression .. math:: F(x) = \\sum_i (x_i \\ln(pos(x_i)) - x_i \\ln(g_i) + g_i - x_i) + I_{x \\geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \\sum_i g_i (exp(p_i) - 1) where :math:`p` is the variable dual to :math:`x`. The proximal operator of the convex conjugate of :math:`F` is .. math:: \mathrm{prox}_{\\sigma (\\lambda F)^*}(x) = x - \\lambda W(\\frac{\\sigma}{\\lambda} g e^{x/\\lambda}) where :math:`\\sigma` is the step size-like parameter, :math:`\\lambda` is the weighting in front of the function :math:`F`, and :math:`W` is the Lambert W function (see, for example, the `Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_). For real-valued input x, the Lambert :math:`W` function is defined only for :math:`x \\geq -1/e`, and it has two branches for values :math:`-1/e \\leq x < 0`. However, for inteneded use-cases, where :math:`\\lambda` and :math:`g` are positive, the argument of :math:`W` will always be positive. `Wikipedia article on Kullback Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_. For further information about the functional, see for example `this article <http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_. The KL cross entropy functional :math:`F`, described above, is related to another functional functional also know as KL divergence. This functional is often used as data discrepancy term in inverse problems, when data is corrupted with Poisson noise. This functional is obtained by changing place of the prior and the variable. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKLCrossEntropy(Operator): """Proximal operator of conjugate of cross entropy KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalConvexConjKLCrossEntropy, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" # Lazy import to improve `import odl` time import scipy.special if g is None: # If g is None, it is taken as the one element # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * np.exp(x / lam)) else: # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * g * np.exp(x / lam)) if not np.issubsctype(self.domain.dtype, np.complexfloating): lambw = lambw.real lambw = x.space.element(lambw) out.lincomb(1, x, -lam, lambw) return ProximalConvexConjKLCrossEntropy def proximal_huber(space, gamma): """Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``. """ gamma = float(gamma) class ProximalHuber(Operator): """Proximal operator of Huber norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() idx = norm.ufuncs.less_equal(gamma + self.sigma) out[idx] = gamma / (gamma + self.sigma) * x[idx] idx.ufuncs.logical_not(out=idx) sign_x = x.ufuncs.sign() out[idx] = x[idx] - self.sigma * sign_x[idx] return out return ProximalHuber if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests()
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Donation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('year', models.CharField(max_length=20)), ('party', models.CharField(max_length=200)), ('donor', models.CharField(max_length=200)), ('address', models.CharField(max_length=200)), ('party_state', models.CharField(max_length=4)), ('state', models.CharField(max_length=3)), ('postcode', models.CharField(max_length=4)), ('donor_type', models.CharField(max_length=40)), ('amount', models.FloatField()), ], options={ }, bases=(models.Model,), ), ]
from logger import logLevel import json def validateRequest(debugAllowed, data, logger): try: request = json.loads(data) except ValueError: logger(logLevel.DEBUG, "Request is not valid JSON") return None if not isinstance(request, dict): logger(logLevel.DEBUG, "Requst is not an object") return None # Check if this is a debug request if debugAllowed: debugRequest = validateDebugRequest(request, logger) if debugRequest: return debugRequest if "stacks" not in request: logger(logLevel.DEBUG, "Request does not contain 'stacks'") return None if "memoryMap" not in request: logger(logLevel.DEBUG, "Request does not contain 'memoryMap'") return None if "version" not in request: logger(logLevel.DEBUG, "Request does not contain 'version'") return None version = request["version"] stacks = request["stacks"] memoryMap = request["memoryMap"] if version != 4 and version != 3: logger(logLevel.WARNING, "Server currently supports versions 3 and 4 only ({} requested)" .format(version)) return None if not isinstance(memoryMap, list): logger(logLevel.DEBUG, "The request's memoryMap is not a list") return None for data in memoryMap: if not isinstance(data, list): logger(logLevel.DEBUG, "An element of the memoryMap is not a list") return None if len(data) != 2: logger(logLevel.DEBUG, "There are more than two members of the memoryMap element") return None if not isinstance(data[0], basestring): logger(logLevel.DEBUG, "The first element of the memoryMap element is not a string") return None if not isinstance(data[1], basestring): logger(logLevel.DEBUG, "The second element of the memoryMap element is not a string") return None moduleCount = len(memoryMap) if not isinstance(stacks, list): logger(logLevel.DEBUG, "The request's stacks are not a list") return None for stack in stacks: if not isinstance(stack, list): logger(logLevel.DEBUG, "One of the request's stacks is not a list") return None for frame in stack: if not isinstance(frame, list): logger(logLevel.DEBUG, "One of the request's stack frames is not a list") return None if len(frame) != 2: logger(logLevel.DEBUG, "There are more than two members of the stack frame") return None if not isinstance(frame[0], (int, long)): logger(logLevel.DEBUG, "A stack frame module index is not an integer") return None if frame[0] >= moduleCount: logger(logLevel.DEBUG, "A stack frame module index is out of range") return None if not isinstance(frame[1], (int, long)): logger(logLevel.DEBUG, "A stack frame offset is not an integer") return None return request def validateDebugRequest(request, logger): # Validation for debug requests is a bit less strict, but MUST come from the # localhost. If this function is called, it is assumed that the request has # been verified as coming from localhost. if 'debug' not in request: return None if request['debug'] is not True: return None if 'action' not in request: return None if not isinstance(request['action'], basestring): return None logger(logLevel.WARNING, "Received debug request") return request
'''ioriodb CLI client to interact with the api from the command line''' from __future__ import print_function import time import json import argparse import iorio def get_arg_parser(): '''build the cli arg parser''' parser = argparse.ArgumentParser(description='Iorio DB CLI') parser.add_argument('--verbose', '-v', action='count') parser.add_argument('-u', '--username', default='admin', help='username used for authentication') parser.add_argument('-p', '--password', default='secret', help='password used for authentication') parser.add_argument('-t', '--token', default=None, help='token from an already authenticated user') parser.add_argument('-H', '--host', default='localhost', help='host where ioriodb is running') parser.add_argument('-P', '--port', default=8080, type=int, help='port where ioriodb is running') parser.add_argument('-c', '--count', default=1, type=int, help='how many times to do the action') parser.add_argument('--human', action='store_true', default=False) parser.add_argument('--pathprefix', default='', help="change the prefix of API path") parser.add_argument('-s', '--secure', action='store_true', default=False, help="use HTTPS") subparsers = parser.add_subparsers() p_post = subparsers.add_parser('post', help='add an event to a stream') p_patch = subparsers.add_parser('patch', help='patch last event from a stream') p_list_buckets = subparsers.add_parser('list-buckets', help='list buckets') p_list_streams = subparsers.add_parser('list-streams', help='list streams') p_get = subparsers.add_parser('get', help='get content from a stream') p_listen = subparsers.add_parser('listen', help='listen to new content from streams') p_stats = subparsers.add_parser('stats', help='get server stats') p_stats.set_defaults(action='stats') #p_admin = subparsers.add_parser('admin', help='admin tasks') p_post.set_defaults(action='post') p_post.add_argument('bucket', help='bucket name') p_post.add_argument('stream', help='stream name') p_post.add_argument('-c', '--content-type', default='application/json', help='content-type for the request') p_post.add_argument('data', help='literal JSON data or if starts with @ ' + 'path to a file with JSON data') p_patch.set_defaults(action='patch') p_patch.add_argument('bucket', help='bucket name') p_patch.add_argument('stream', help='stream name') p_patch.add_argument('-c', '--content-type', default='application/json-patch+json', help='content-type for the request') p_patch.add_argument('data', help='literal JSON data or if starts with @ ' + 'path to a file with JSON data') p_get.set_defaults(action='get') p_get.add_argument('bucket', help='bucket name') p_get.add_argument('stream', help='stream name') p_get.add_argument('-l', '--limit', default=10, type=int, help='amount of items to retrieve') p_get.add_argument('-f', '--from', default=None, type=int, dest='fromsn', help='sequence number to start from') p_list_buckets.set_defaults(action='list-buckets') p_list_streams.set_defaults(action='list-streams') p_list_streams.add_argument('bucket', help='bucket name') p_listen.set_defaults(action='listen') p_listen.add_argument('subscriptions', nargs='+', help="subscription descriptiors (bucket:stream or bucket:stream:from)") return parser def parse_args(): '''parse arguments and return them''' parser = get_arg_parser() args = parser.parse_args() return args def parse_data_from_raw(data_raw): '''parse data from literal, if it starts wit @ parse content from file''' if data_raw.startswith('@'): return json.load(open(data_raw[1:])) else: return json.loads(data_raw) def do_when_authenticated(args, fun, conn=None): '''if auth works run fun''' if conn is None: conn = iorio.Connection(args.host, args.port, secure=args.secure, path_prefix=args.pathprefix) auth_t1 = time.time() auth_ok, auth_resp = conn.authenticate(args.username, args.password) auth_t2 = time.time() if args.verbose and args.verbose > 1: print("Auth request time", (auth_t2 - auth_t1) * 1000, "ms") if auth_ok: req_t1 = time.time() response = fun(conn) req_t2 = time.time() if args.verbose and args.verbose > 1: print("Request time", (req_t2 - req_t1) * 1000, "ms") print(response) else: print("Auth Failed") print(auth_resp) def post_or_patch(args, name): '''avoid duplication''' bucket = args.bucket stream = args.stream content_type = args.content_type data_raw = args.data data = parse_data_from_raw(data_raw) def fun(conn): '''fun that does the work''' function = getattr(conn, name) for _ in range(args.count): result = function(bucket, stream, data, content_type) return result do_when_authenticated(args, fun) def handle_post_event(args): '''post a new event''' post_or_patch(args, 'send') def handle_patch_event(args): '''patch a new event''' post_or_patch(args, 'send_patch') def handle_get_events(args): '''get events''' bucket = args.bucket stream = args.stream limit = args.limit fromsn = args.fromsn def fun(conn): '''fun that does the work''' return conn.query(bucket, stream, fromsn, limit) do_when_authenticated(args, fun) def handle_list_streams(args): '''get events''' bucket = args.bucket def fun(conn): '''fun that does the work''' return conn.list_streams(bucket) do_when_authenticated(args, fun) def handle_list_buckets(args): '''get events''' def fun(conn): '''fun that does the work''' return conn.list_buckets() do_when_authenticated(args, fun) def diff_keys(dict1, dict2, keys): '''calculate differebce between key on d2 and d1''' result = {} for key in keys: val1 = dict1.get(key) val2 = dict2.get(key) if isinstance(val1, int) and isinstance(val2, int): result[key] = val2 - val1 return result def handle_stats(args): '''get events''' def fun(conn): '''fun that does the work''' response = conn.stats() stats = response.body node_stats = stats['node'] abs1 = node_stats['abs1'] abs2 = node_stats['abs2'] keys = ['error_logger_queue_len', 'memory_atoms' 'memory_bin', 'memory_ets', 'memory_procs', 'memory_total', 'process_count', 'run_queue'] abs_diff = diff_keys(abs1, abs2, keys) stats['abs_diff'] = abs_diff return response do_when_authenticated(args, fun) def parse_subscription(sub): '''parse a subscription in notation bucket:stream[:from]''' parts = sub.split(':') parts_count = len(parts) if parts_count == 2: return True, parts + [None] elif parts_count == 3: try: seqnum = int(parts[2]) return True, [parts[0], parts[1], seqnum] except ValueError: return (False, "expected subscription to have format " + "bucket:stream:from where from is a number, got %s" % sub) else: return (False, "expected subscription to have format " + "bucket:stream[:from], got %s" % sub) def handle_listen(args): '''listen to events in subscriptions''' raw_subs = args.subscriptions subs = iorio.Subscriptions() for sub in raw_subs: ok, result = parse_subscription(sub) if not ok: print(result) return bucket, stream, count = result subs.add(bucket, stream, count) def fun(conn): '''fun that does the work''' while True: current_subs = subs.to_list() print('listening', ' '.join(current_subs)) response = conn.listen(current_subs) print(response) print() if response.status == 200: subs.update_seqnums(response.body) do_when_authenticated(args, fun) HANDLERS = { 'post': handle_post_event, 'patch': handle_patch_event, 'get': handle_get_events, 'listen': handle_listen, 'list-buckets': handle_list_buckets, 'list-streams': handle_list_streams, 'stats': handle_stats } def main(): '''cli entry point''' args = parse_args() handler = HANDLERS[args.action] handler(args) if __name__ == '__main__': main()
import os import sys import threading import pprint from pprint import pformat from optparse import OptionParser import json import urllib from urlparse import urlparse, parse_qs from time import time try: import astrodata from astrodata.adutils import jsutil from astrodata import AstroDataType from astrodata.priminspect import PrimInspect from astrodata.ConfigSpace import cs except: print "couldn't import astrodata" raise import socket import fcntl import struct from astrodata.RecipeManager import RecipeLibrary from astrodata.AstroDataType import get_classification_library recipelibrary = RecipeLibrary() classificationlib = get_classification_library() def get_ip_address(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) parser = OptionParser() parser.set_description( "Gemini Observatory Pipeline Development Kit Tool" "(v_1.0 2011)") parser.add_option("-d", "--server", action="store_true", dest="server", default=True, help="start a server for interactive use via browser") parser.add_option("-p", "--port", dest = "port", default = 8888, type = "int", help="this is the port it will provide the ADKTOOL GUI on (http://localhost:<port>)" ) parser.add_option("--nonlocal", action="store_true", dest="nonlocal", help="used to put the server in non-local mode (no editing)") (options, args) = parser.parse_args() server = options.server class EditThread(threading.Thread): fname = None lnum = None def __init__(self, fname, lnum): self.fname = fname self.lnum = lnum threading.Thread.__init__(self) def run(self): from subprocess import call if "PDK_EDITOR" in os.environ: PDK_EDITOR = os.environ["PDK_EDITOR"] else: PDK_EDITOR = "nedit +%(lnum)s %(filename)s" editcmd = PDK_EDITOR % { "lnum":self.lnum, "filename":self.fname } print "pt41:", editcmd call ( editcmd, shell=True) class RCat(): def __init__(self, trd): self.cat = trd def makeCats(self,catlist, curcat = None): if curcat == None: curcat = self.cat if len(catlist) ==0: return curcat if "subcats" not in curcat: curcat.update({"subcats":[]}) subcats = curcat["subcats"] goodcat = None for cat in subcats: if cat["cat_name"] == catlist[0]: goodcat = cat if goodcat == None: goodcat = {"cat_name":catlist[0]} subcats.append(goodcat) subcat = goodcat return self.makeCats(catlist[1:], curcat = subcat) def add(self, nd): path = nd["path"] category = os.path.dirname(path) recipename = os.path.basename(path) cats = category.split("/") # print "adkt105:", cats, nd nd.update({"cat_name":cats[-1]}) if nd["package"] != "*": cats = cats[cats.index(nd["package"])+1:] else: # package does equal "*", the wildcard, set when typeinfo == True for i in range(0,len(cats)): if "astrodata_" in cats[i]: break cats =cats[i+1:] correctcat = self.makeCats(cats) #print "pt66", category #print "pt67:", pprint.pformat(self.cat,4,80) #raw_input() #sys.exit() if "members" not in correctcat: correctcat.update({"members":[]}) members = correctcat["members"] members.append(nd) def printDict(self,pdict = None, indent = ""): if pdict == None: pdict = self.cat print indent,"p-------t" if "name" in pdict: print indent,"name",pdict["name"] if "cat_name" in pdict: print indent,"cat_name", pdict["cat_name"] if "members" in pdict: membs = pdict["members"] memnames = [ mem["name"] for mem in membs] print indent, ", ".join(memnames) if "subcats" in pdict: for cat in pdict["subcats"]: self.printDict(cat, indent=indent+" ") if server: import BaseHTTPServer HOST_NAME = "" PORT_NUMBER = options.port class IfaceHandler(BaseHTTPServer.BaseHTTPRequestHandler): pi = None; def address_string(self): host, port = self.client_address[:2] return host def do_HEAD(s): s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() def do_GET(s): """Respond to a GET request.""" try: myIP = get_ip_address('eth0') except: print "Can't Get Local Host Addr on 'etho0'." myIP = None self = s clientIP = self.client_address[0] print "at159: host", myIP,"client", clientIP if clientIP == myIP or clientIP == "127.0.0.1": localClient = True else: localClient = False print "at165:",localClient,options.nonlocal if localClient == True and options.nonlocal == True: localClient = False self = s # If someone went to "http://something.somewhere.net/foo/bar/", # then s.path equals "/foo/bar/". #s.wfile.write("<p>You accessed path: %s</p>" % s.path) pres = urlparse(s.path) urlparms = parse_qs(pres.query) cmds = pres.path.split("/")[1:] command = scope = subscope = None if len(cmds)>0: command = cmds[0] if len(cmds)>1: scope = cmds[1] if len(cmds)>2: subscope = cmds[2] pparams = pres.query print "186:", command, scope, subscope, "urlparms:", repr(urlparms) if command=="qap" or command=="docs": if ".." in self.path: self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() data = "<b>bad path error</b>" self.wfile.write(data) dirname = os.path.dirname(astrodata.__file__) print "lP114:", dirname if command == "qap": joinlist = [dirname, "scripts/adcc_faceplate/"] elif command == "docs": joinlist = [dirname, "doc/docscripts/build/"] # Split out any parameters in the URL self.path = self.path.split("?")[0] print "adk204:", repr(self.path) #append any further directory info. joinlist.append( self.path[len(command)+2:]) # print "ppw790:", repr(joinlist), self.path fname = os.path.join(*joinlist) # print "pt97: QAP IF: trying to open\n\t%s" % fname responsecode = 200 try: f = open(fname, "r") data = f.read() f.close() except IOError: print "lP131: no such resource as",fname data = "<b>NO SUCH RESOURCE AVAILABLE</b>" responsecode = 404 #print "pt100:",data self.send_response(responsecode) if self.path.endswith(".js"): self.send_header('Content-type', 'text/javascript') self.send_header("Expires", "157785000") elif self.path.endswith(".css"): self.send_header("Content-type", "text/css") elif fname.endswith(".png"): self.send_header('Content-type', "image/png") else: self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(data) return print "233: at second if:(%s)" % command # preprocess command for typedict arg graphtype = "NICI" #"GMOS_MOS" showprims = None if "showprims" in urlparms: from astrodata.RecipeManager import centralPrimitivesIndex showprims = centralPrimitivesIndex if command == "typedict.py": command = "" if "astrotype" in urlparms: graphtype = urlparms["astrotype"][0] if command == "" or command.isdigit(): s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() dirname = os.path.dirname(__file__) templname = os.path.join(dirname, "adcc_faceplate/adktool%s.html"% str(command)) try: template = open(templname) except IOError: s.wfile.write("CAN'T FIND TEMPLATE: %s" % templname) return template = template.read() template_args = {} selopts = [] for pkpath in cs.package_paths: packname = os.path.basename(pkpath) path = pkpath link = "/pkinfo/"+packname selopts.append( { "name":packname, "path":path, "url":link }) accordfeature = jsutil.JSAccord() template_args.update({"accordian_div":accordfeature.div()}) sel = jsutil.JSPackageSelection(selopts) template_args.update({"select_options": sel.div()}) typdiv = jsutil.JSTypes() template_args.update({"types_div": typdiv.div()}) desdiv = jsutil.JSDescriptors() template_args.update({"descriptors_div": desdiv.div()}) recdiv = jsutil.JSRecipeSystem() template_args.update({"recipes_div": recdiv.div()}) from astrodata.datatypes import get_classification_library classlib = get_classification_library() typeobj = classlib.get_type_obj("GEMINI") print "akdtool281:",pformat(typeobj.json_typetree()) typestree_json = typeobj.json_typetree() # dot = classlib.gviz_doc(astrotype=graphtype, assign_dict = showprims) template_args.update( { "local_client": "true" if localClient else "false", # "types_tree_dot": dot, "typestree_json":typestree_json, }) pagetext = template % template_args s.wfile.write(pagetext) elif command == "old": # main interface s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() s.wfile.write(""" <html><head><title>Astrodata Package Viewer</title> <link href="/qap/js/jquery-ui/css/ui-lightness/jquery-ui-1.8.20.custom.css" rel= "stylesheet"> <script type="text/javascript" src="/qap/js/jquery-ui/js/jquery.js"></script> <script type="text/javascript" src="/qap/js/jquery-ui/js/jquery-ui.js"></script> <script type="text/javascript"> function ajaxLink(link) { $.ajax({url:link, type:"post", }); } function SERVEReditFile(link,lnum) { if (typeof(lnum) == "undefined") { lnum = 0; } $.ajax({url:"/edit", type:"post", data: { lnum:lnum, filename: link } }); } function aceditFile(link,lnum) { if (typeof(lnum) == "undefined") { lnum = 0; } var ddict = { lnum:lnum, filename:link }; var normal = true; if (normal) { var acewin = window.open("/acedit?lnum="+lnum+"&filename="+encodeURI(link), "_blank", "width=600,height=600"); // acewin.moveTo(window.screenX, window.screenY); acewin.moveTo(200,200); //acewin.resizeTo(600,500); } else { $.ajax({url:"/acedit", type:"get", data: { lnum:lnum, filename: link }, success:function(data) { var acewin = window.open("","","width=600,height=500"); // acewin.moveTo(window.screenX, window.screenY); acewin.moveTo(0,0); ac acewin.document.write(data); acewin.focus(); } }); } } editFile = aceditFile; function empty_pdk_focus(){ $($(".pdk_focus")[0]).slideUp(500, function () { $($(".pdk_focus")[0]).empty(); }); } function getKeys(obj) { var keys = [] for (var key in obj) { keys[keys.length]=key; } keys.sort(); return keys; } localClient = %(local_client)s; </script> </head> """ % {"local_client": "true" if localClient else "false" } ) s.wfile.write("<body>") s.wfile.write("<a href='/docs'>Astrodata Manual</a><br/>") ### selopts = [] for pkpath in cs.package_paths: packname = os.path.basename(pkpath) path = pkpath link = "/pkinfo/"+packname selopts.append( { "name":packname, "path":path, "url":link }) accordfeature = jsutil.JSAccord() s.wfile.write(accordfeature.div()) sel = jsutil.JSPackageSelection(selopts) s.wfile.write(sel.div()) #act = jsutil.JSAce() #s.wfile.write(act.div()) s.wfile.write("""<div class="pdk_focus"> </div> """) typdiv = jsutil.JSTypes() s.wfile.write(typdiv.div()) desdiv = jsutil.JSDescriptors() s.wfile.write(desdiv.div()) recdiv = jsutil.JSRecipeSystem() s.wfile.write(recdiv.div()) #typdiv = jsutil.JSTypes() #s.wfile.write(typdiv.div()) s.wfile.write("</body></html>") elif command == "acedit": st = time() print "got ace edit", st s.send_response(200) print "c1", time() -st, " elapsed" s.send_header("Content-type", "text/html") print "c1", time() -st, " elapsed" s.end_headers() print "before open file ace edit", urlparms["filename"], time() -st, " elapsed" sendfile = open(urlparms["filename"][0]) print "before read file ace edit", time() -st, " elapsed" code = sendfile.read() print "read file ace edit", time() -st, " elapsed" sendfile.close() code = code.replace("<","&lt;") code = code.replace(">","&rt;") print "pt353:",localClient if "target" in urlparms: target = urlparms["target"][0] else: target = "unknown" ace = jsutil.JSAce(code, int(urlparms["lnum"][0]), local_client = localClient, filename = urlparms["filename"][0], target = target ); s.wfile.write(ace.page()) # print "pt303:", ace.page() print "returned ace edit", time() -st, " elapsed" return elif command == "pkinfo" or command == "typeinfo": packinfo = False typeinfo = False astrotype = None packname = None if command == "pkinfo": packinfo = True packname = scope if command == "typeinfo": typeinfo = True if "astrotype" in urlparms: astrotype = urlparms["astrotype"] else: astrotype = scope cl = AstroDataType.ClassificationLibrary.get_classification_library() s.send_response(200) s.send_header("Content-type", "application/json") s.end_headers() rd = {"package":scope, "types":[]} retprimsets = rettypes = retdescriptors = retrecipes = retrecipesystem = False if packname: if (subscope == None or subscope == "all"): print "sub", subscope rettypes = True retdescriptors = True retrecipesystem = True retprimsets = True if subscope == "types": rettypes = True if subscope == "descriptors": retdescriptors = True if subscope == "recipesystem": retrecipesystem = True if subscope == "primsets": retrecipesystem = True if astrotype: if (subscope == None or subscope == "all"): print "sub", subscope rettypes = True retdescriptors = True retrecipesystem = True #True retprimsets = True #True if subscope == "types": rettypes = True if subscope == "descriptors": retdescriptors = True if subscope == "recipesystem": retrecipesystem = True if subscope == "primsets": retrecipesystem = True # returning types if rettypes: cns = [] if "type_meta" not in rd: rd.update({"type_meta": {}}) if typeinfo: typenames = [astrotype] else: # packinfo typenames = cl.typesDict.keys() for cn in typenames: co = cl.typesDict[cn] print "lp184:", co.fullpath editlink = "/edit?%s" % urllib.urlencode( {"filename":co.fullpath, "lnum":0}) if typeinfo or ( packname and packname in co.fullpath ): rd["types"].append(cn) rd["type_meta"].update({cn: {"type":cn, "fullpath":co.fullpath, "edit_link": editlink} }) rd["types"].sort() if retdescriptors: from astrodata import Descriptors das = []; if packinfo: for dv in Descriptors.centralCalculatorIndex: mod = Descriptors.centralCalculatorIndex[dv].split(".")[0] exec("import %s" % mod) module = eval(mod) path = module.__file__ # print "173:",scope, path if scope in path: di = {"type":dv, "descriptor_class": Descriptors.centralCalculatorIndex[dv], "path": path } das.append(di) if typeinfo: if astrotype in Descriptors.centralCalculatorIndex.keys(): mod = Descriptors.centralCalculatorIndex[astrotype].split(".")[0] exec ("import %s" % mod) module = eval(mod) path = module.__file__ di = { "type":astrotype, "descriptor_class": Descriptors.centralCalculatorIndex[astrotype], "path":path } das.append(di) das.sort(key= lambda da: da["type"]) rd.update({"descriptors":das}) if retrecipesystem: from astrodata import RecipeManager ri = RecipeManager.centralRecipeIndex recdict = {} if "recipes" not in rd: rd.update({"recipes":{}}) bigrecdict = rd["recipes"] recipe_cat = RCat({ "cat_name": scope, }) for rec in ri.keys(): if typeinfo: relevant = ri[rec].endswith(astrotype) elif packinfo: relevant = scope in ri[rec] print "adk625:", relevant, astrotype, ri[rec] #if relevant == True: # print "pt252:", relevant, rec, scope, ri[rec] if packinfo: package = scope elif typeinfo: package = "*" if relevant == True: #print "pt254: TRUE" # print "pt252:", rec, scope, ri[rec] trd = {"name":rec, "path":ri[rec], "package":package} bigrecdict.update({rec: trd}) recipe_cat.add(trd) rd.update({"recipe_cat":recipe_cat.cat}) if retprimsets: from astrodata import RecipeManager from astrodata.RecipeManager import RecipeLibrary, centralPrimitivesCatalog rl = RecipeLibrary() if packinfo: primsetkeys = RecipeManager.centralPrimitivesIndex.keys() else: primsetkeys = [astrotype] print "adkt649:", primsetkeys cpi = RecipeManager.centralPrimitivesIndex primsetkeys.sort() primitive_cat = {} for key in primsetkeys: if key in cpi: for entry in cpi[key]: print "pt345:",key,entry impname = os.path.splitext(entry[0])[0] #[0] #exec("import "+impname) #fname = eval(impname+".__file__") fname = "astrodata_Gemini" #print "fname",fname ps = { "astrotype":key, "module": entry[0], "class":entry[1], } cpc = centralPrimitivesCatalog pmd = cpc.get_primcat_dict(entry) if pmd: ps.update({"index_path":pmd["path"], "package":pmd["package"] }) # print "pt382:", pformat(ps) if scope == pmd["package"] or typeinfo: print "at527:", key primitive_cat.update({key:ps}) rd.update({"primitives_cat":primitive_cat}) # print "lp189:", pprint.pformat(rd["recipes"]) rd["astrotype"] = astrotype s.wfile.write(json.dumps(rd)) return elif command == "calculator_by_type": from astrodata.Descriptors import centralCalculatorIndex, calculatorPackageMap import astrodata.adinspect as adi print "538:" s.send_response(200) s.send_header("Content-type", "application/json") s.end_headers() s.wfile.write(json.dumps(adi.get_descriptors(astrotype=scope), sort_keys=True, indent=4)) return elif command == "primset_by_type": from astrodata.priminspect import PrimInspect from inspect import getsourcelines,getsourcefile import astrodata.adinspect as adi from astrodata.RecipeManager import centralPrimitivesIndex as cpi # print "at529:", pformat(cpi[scope]) primsets = recipelibrary.retrieve_primitive_set(astrotype=scope) # print "at536:", pformat(primsets) # if self.pi == None: # self.pi = PrimInspect() #pi = self.pi #mdict = pi.master_dict[scope] s.send_response(200) s.send_header("Content-type", "application/json") s.end_headers() # dstr = json.dumps(pidict["inheritance"]["GMOS"].keys(), sort_keys=True, indent=4) # psObject = mdict["instance"] # classname = mdict["class"].__name__ parsed = urlparse(self.path) parms = parse_qs(parsed.query) # print "at550:", pformat(parms) pclass = parms["primclass"][0] oprims = [] tprimset = None for primset in primsets: if primset.__class__.__name__ == pclass: tprimset = primset else: oprims.append( { "primclass":pclass, "astrotype":scope, "link_frag": '''also assigned <i><b>%(primclass)s</b></i> (<a href="javascript:void(0)" onclick="showPrimset('%(primclass)s','%(astrotype)s')" >detail</a>) ''' % {"primclass":primset.__class__.__name__, "astrotype":scope } } ) # print "at572:", pformat(oprims) # which object? one that has right class psObject = tprimset classname = psObject.__class__.__name__ primslist = adi.get_primitives(psObject) # mdict["primitives"].keys() exec("import "+psObject.__module__) tpath = eval(psObject.__module__+".__file__") if tpath[-4:] == ".pyc": tpath = tpath[:-1] tpath = os.path.splitext(tpath)[0]+".py" # print "405:", tpath pd = {} for prim in primslist: co = eval("psObject."+prim) gsl = getsourcelines(co) fpath = getsourcefile(co) if fpath[-4:] ==".pyc": fpath = fpath[:-1] # print "pt420:", fpath pd.update({prim: {"lnum":gsl[1], "path": fpath, "basename": os.path.basename(fpath), "dirname":os.path.dirname(fpath)}}) retdict = {"class":classname, "path":tpath, "prims":primslist, "prim_info":pd, "other_primsets":oprims } # print "pt549:", pformat(retdict) s.wfile.write(json.dumps(retdict)) return elif command == "primset_info": from astrodata.priminspect import PrimInspect from inspect import getsourcelines if self.pi == None: self.pi = PrimInspect() pi = self.pi # print "388:", pformat(urlparms) # print "400:",pformat(pi.class2instance) classname = urlparms["class"][0] psObject = pi.class2instance[classname] primslist = pi.primsdict[psObject] exec("import "+psObject.__module__) tpath = eval(psObject.__module__+".__file__") # print "405:", tpath s.send_response(200) s.send_header("Content-type", "application/json") s.end_headers() tpath = os.path.splitext(tpath) tpath = tpath[0]+".py" pd = {} for prim in primslist: co = eval("psObject."+prim) gsl = getsourcelines(co) # print "pt420:", pformat(gsl) pd.update({prim: {"lnum":gsl[1]}}) retdict = {"class":classname, "path":tpath, "prims":primslist, "prim_info":pd } # print "pt549:", pformat(retdict) s.wfile.write(json.dumps(retdict)) return def do_POST(s): """Respond to a POST request.""" import cgi self = s try: ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) if ctype == 'multipart/form-data': postvars = cgi.parse_multipart(self.rfile, pdict) elif ctype == 'application/x-www-form-urlencoded': length = int(self.headers.getheader('content-length')) postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) else: postvars = {} except: postvars = {} print "350:", pformat(postvars) self = s # If someone went to "http://something.somewhere.net/foo/bar/", # then s.path equals "/foo/bar/". #s.wfile.write("<p>You accessed path: %s</p>" % s.path) pres = urlparse(s.path) urlparms = parse_qs(pres.query) cmds = pres.path.split("/")[1:] command = scope = subscope = None if len(cmds)>0: command = cmds[0] if len(cmds)>1: scope = cmds[1] if len(cmds)>2: subscope = cmds[2] postvars.update(urlparms) if command == "edit": print "pt165:",s.path print "pt169:",postvars["filename"] fn = postvars["filename"][0] lnum = postvars["lnum"][0] ethread = EditThread(fn, lnum) ethread.start() s.send_response(200) s.send_header("Content-type", "application/json") s.end_headers() s.wfile.write(json.dumps({"status":"success"})) return server_class = BaseHTTPServer.HTTPServer httpd = server_class((HOST_NAME, PORT_NUMBER), IfaceHandler) try: print "Starting HTTP Interface at %s:%d" % (HOST_NAME, PORT_NUMBER) print "Opening Window in Default Browser" if False: # this opens a tab/window in the default browser import webbrowser webbrowser.open("http://localhost:%d/" % PORT_NUMBER, 0, True) httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close()
from z3 import Const, IntSort class ClassNode: """ Class representing a node in a tree where each node represents a class, and has references to the base class and subclasses. """ def __init__(self, name, parent_node, type_sort): self.name = name self.parent_node = parent_node self.children = [] self.type_sort = type_sort def find(self, name): """ Looks a class with the given name in the tree starting at this node. If there is no such class, returns None. """ if name == self.name: return self for c in self.children: res = c.find(name) if res: return res def __str__(self): return str(self.name) def all_children(self): """ Returns all transitive child nodes. """ result = [self] for c in self.children: result += c.all_children() return result def all_parents(self): """ Returns all transitive parent nodes. """ result = [self] if self.parent_node: result += self.parent_node.all_parents() return result def get_literal(self): """ Creates a Z3 expression representing this type. If this is a generic type, will use the variables from self.quantified() as the type arguments. """ if isinstance(self.name, str): return getattr(self.type_sort, self.name) else: constr = getattr(self.type_sort, self.name[0]) args = self.quantified() return constr(*args) def get_literal_with_args(self, var): """ Creates a Z3 expression representing this type. If this is a generic type, will use the accessor methods for the type arguments and apply them to the given variable argument to get the arguments. """ if isinstance(self.name, str): return getattr(self.type_sort, self.name) else: constr = getattr(self.type_sort, self.name[0]) args = [] for arg in self.name[1:]: args.append(getattr(self.type_sort, arg)(var)) return constr(*args) def quantified(self): """ Returns a list of Z3 variables, one for each parameter of this type. """ res = [] if isinstance(self.name, tuple): for i, arg in enumerate(self.name[1:]): sort = self.type_sort if not arg.endswith('defaults_args') else IntSort() cur = Const("y" + str(i), sort) res.append(cur) return res
"""Copyright (c) 2015 TBillTech. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. """ import json def as_summary(dct): if '__Summary__' in dct: result = Summary() result.test_count = dct['test_count'] result.success_count = dct['success_count'] result.errors = dct['errors'] if result.errors: if result.test_count == result.success_count: result.test_count += 1 return result return dct def get_safe_test_count(summary_info): try: return int(summary_info.test_count) except: return 1 def get_safe_success_count_from_bool(summary_info): if SummaryInfo: return 1 else: return 0 def get_safe_success_count(summary_info): try: return int(summary_info.success_count) except: return get_safe_success_count_from_bool(summary_info) def get_safe_error_list(summary_info): try: return list(summary_info.errors) except: return list() def normal_summary_decode_from_std_strs(stdstrs): (substdout, substderr) = stdstrs js_summary = json.loads(substdout, object_hook=as_summary) js_summary.append_error(substderr) return js_summary def error_summary_decode_from_std_strs(stdstrs): (substdout, substderr) = stdstrs error_summary = Summary() error_summary.increment_fail() error_summary.append_error(substdout) error_summary.append_error(substderr) return error_summary def summary_from_std_strs(stdstrs): try: return normal_summary_decode_from_std_strs(stdstrs) except ValueError: return error_summary_decode_from_std_strs(stdstrs) class Summary(object): def __init__(self, stdstrs=None): self.test_count = 0 self.success_count = 0 self.errors = list() def increment_pass(self): self.test_count += 1 self.success_count += 1 def increment_fail(self): self.test_count += 1 def test(self, assertion_value): assert(assertion_value) self.increment_pass() def merge(self, summary_info): self.test_count += get_safe_test_count(summary_info) self.success_count += get_safe_success_count(summary_info) self.errors += get_safe_error_list(summary_info) def append_error(self, error_str): if error_str != '': self.errors.append(str(error_str)) def get_fail_count(self): return self.test_count - self.success_count def __str__(self): if self.get_fail_count() == 0: return self.success_str() else: return self.failure_str() def success_str(self): return "All %s Tests Succeeded!\n" % (self.success_count) def failure_str(self): resultstr = self.errors_str() resultstr += "%s Failures out of %s tests" % ( self.get_fail_count(), self.test_count) return resultstr def errors_str(self): resultstr = '' for error in self.errors: resultstr += "Error: %s\n" % (error,) return resultstr
value: int = int(input()) example: Dict[str, int] = {"a": 0, "b": 1, "c": 2} key: str = "b" example[key]: int = value i: int = example["a"] print(i)
import logging import web from bson.objectid import ObjectId from inginious.frontend.pages.course_admin.utils import INGIniousSubmissionAdminPage class CourseDownloadSubmissions(INGIniousSubmissionAdminPage): """ Batch operation management """ _logger = logging.getLogger("inginious.webapp.download") def valid_formats(self): dict = { "taskid/username": _("taskid/username"), "taskid/aggregation": _("taskid/aggregation"), "username/taskid": _("username/taskid"), "aggregation/taskid": _("aggregation/taskid") } return list(dict.keys()) def POST_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course, __ = self.get_course_and_check_rights(courseid) user_input = web.input(tasks=[], aggregations=[], users=[]) if "filter_type" not in user_input or "type" not in user_input or "format" not in user_input or user_input.format not in self.valid_formats(): raise web.notfound() tasks = list(course.get_tasks().keys()) for i in user_input.tasks: if i not in tasks: raise web.notfound() # Load submissions submissions, aggregations = self.get_selected_submissions(course, user_input.filter_type, user_input.tasks, user_input.users, user_input.aggregations, user_input.type) self._logger.info("Downloading %d submissions from course %s", len(submissions), courseid) web.header('Content-Type', 'application/x-gzip', unique=True) web.header('Content-Disposition', 'attachment; filename="submissions.tgz"', unique=True) return self.submission_manager.get_submission_archive(submissions, list(reversed(user_input.format.split('/'))), aggregations) def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course, __ = self.get_course_and_check_rights(courseid) user_input = web.input() # First, check for a particular submission if "submission" in user_input: submission = self.database.submissions.find_one({"_id": ObjectId(user_input.submission), "courseid": course.get_id(), "status": {"$in": ["done", "error"]}}) if submission is None: raise web.notfound() self._logger.info("Downloading submission %s - %s - %s - %s", submission['_id'], submission['courseid'], submission['taskid'], submission['username']) web.header('Content-Type', 'application/x-gzip', unique=True) web.header('Content-Disposition', 'attachment; filename="submissions.tgz"', unique=True) return self.submission_manager.get_submission_archive([submission], [], {}) # Else, display the complete page tasks, user_data, aggregations, tutored_aggregations,\ tutored_users, checked_tasks, checked_users, show_aggregations = self.show_page_params(course, user_input) chosen_format = self.valid_formats()[0] if "format" in user_input and user_input.format in self.valid_formats(): chosen_format = user_input.format if "aggregation" in chosen_format: show_aggregations = True return self.template_helper.get_renderer().course_admin.download(course, tasks, user_data, aggregations, tutored_aggregations, tutored_users, checked_tasks, checked_users, self.valid_formats(), chosen_format, show_aggregations)
"""Default data initializations for the XBlock, with formatting preserved.""" DEFAULT_PROMPT = """ Censorship in the Libraries 'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. Read for conciseness, clarity of thought, and form. """ DEFAULT_RUBRIC_CRITERIA = [ { 'name': "Ideas", 'label': "Ideas", 'prompt': "Determine if there is a unifying theme or main idea.", 'order_num': 0, 'feedback': 'optional', 'options': [ { 'order_num': 0, 'points': 0, 'name': 'Poor', 'label': 'Poor', 'explanation': """Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.""" }, { 'order_num': 1, 'points': 3, 'name': 'Fair', 'label': 'Fair', 'explanation': """Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.""" }, { 'order_num': 2, 'points': 5, 'name': 'Good', 'label': 'Good', 'explanation': """Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.""" }, ], }, { 'name': "Content", 'label': "Content", 'prompt': "Assess the content of the submission", 'order_num': 1, 'options': [ { 'order_num': 0, 'points': 0, 'name': 'Poor', 'label': 'Poor', 'explanation': """Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.""" }, { 'order_num': 1, 'points': 1, 'name': 'Fair', 'label': 'Fair', 'explanation': """Includes little information and few or no details. Explores only one or two facets of the topic.""" }, { 'order_num': 2, 'points': 3, 'name': 'Good', 'label': 'Good', 'explanation': """Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.""" }, { 'order_num': 3, 'points': 3, 'name': 'Excellent', 'label': 'Excellent', 'explanation': """Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.""" }, ], }, ] DEFAULT_RUBRIC_FEEDBACK_PROMPT = """ (Optional) What aspects of this response stood out to you? What did it do well? How could it be improved? """ DEFAULT_RUBRIC_FEEDBACK_TEXT = """ I think that this response... """ DEFAULT_EXAMPLE_ANSWER = ( "Replace this text with your own sample response for this assignment. " "Then, under Response Score to the right, select an option for each criterion. " "Learners practice performing peer assessments by assessing this response and comparing " "the options that they select in the rubric with the options that you specified." ) DEFAULT_EXAMPLE_ANSWER_2 = ( "Replace this text with another sample response, " "and then specify the options that you would select for this response." ) DEFAULT_STUDENT_TRAINING = { "name": "student-training", "start": None, "due": None, "examples": [ { "answer": DEFAULT_EXAMPLE_ANSWER, "options_selected": [ { "criterion": "Ideas", "option": "Fair" }, { "criterion": "Content", "option": "Good" } ] }, { "answer": DEFAULT_EXAMPLE_ANSWER_2, "options_selected": [ { "criterion": "Ideas", "option": "Poor" }, { "criterion": "Content", "option": "Good" } ] } ] } DEFAULT_START = "2001-01-01T00:00" DEFAULT_DUE = "2029-01-01T00:00" DEFAULT_PEER_ASSESSMENT = { "name": "peer-assessment", "start": DEFAULT_START, "due": DEFAULT_DUE, "must_grade": 5, "must_be_graded_by": 3, "track_changes": "", } DEFAULT_SELF_ASSESSMENT = { "name": "self-assessment", "start": DEFAULT_START, "due": DEFAULT_DUE, } DEFAULT_STAFF_ASSESSMENT = { "name": "staff-assessment", "start": DEFAULT_START, "due": DEFAULT_DUE, "required": False, } DEFAULT_ASSESSMENT_MODULES = [ DEFAULT_STUDENT_TRAINING, DEFAULT_PEER_ASSESSMENT, DEFAULT_SELF_ASSESSMENT, DEFAULT_STAFF_ASSESSMENT, ] DEFAULT_EDITOR_ASSESSMENTS_ORDER = [ "student-training", "peer-assessment", "self-assessment", "staff-assessment", ]
""" Simple utilities to run hazard calculations from within the toolkit """ import sys import numpy as np import multiprocessing from collections import OrderedDict from openquake.hazardlib.calc import hazard_curve from openquake.hazardlib.site import Site, SiteCollection from openquake.hazardlib.gsim import get_available_gsims from openquake.hazardlib.calc import filters from openquake.hazardlib import imt from openquake.hazardlib.geo.point import Point from hmtk.sources.source_model import mtkSourceModel DEFAULT_WORKERS = multiprocessing.cpu_count() GSIM_MAP = get_available_gsims() def _check_supported_imts(imts): """ Checks that all of the IMTs in the list are supported """ output_imts = [] for imtx in imts: if imtx in imt.__all__: output_imts.append(imt.__dict__[imtx]()) elif 'SA' in imtx: output_imts.append(imt.from_string(imtx)) else: raise ValueError('IMT %s not supported in OpenQuake!' % imtx) return output_imts def _check_imts_imls(imts, imls): """ Pre-process IMTS and IMLs, returning a corresponding IMT dictionary """ #imts = _check_supported_imts(imts) n_imts = len(imts) if len(imls) == 1: # Fixed IMLs imts = OrderedDict([(imt, imls[0]) for imt in imts]) elif len(imls) == n_imts: # One set of IMLS per IMT imts = OrderedDict([(imts[iloc], imls[iloc]) for iloc in range(0, n_imts)]) else: raise ValueError('Number of IML sets must be 1 or equal ' 'to number of IMTs') return imts def _preprocess_gmpes(source_model, gmpes): """ :param dict gmpes: Regionalisation of GMPEs {'region_name': 'GMPE Name'} """ model_regions = [src.tectonic_region_type for src in source_model] for key in gmpes.keys(): #if not key in model_regions: # raise ValueError('Region type %s not in source model' % key) if gmpes[key] in GSIM_MAP.keys(): gmpes[key] = GSIM_MAP[gmpes[key]]() else: raise ValueError('GMPE %s not supported!' % gmpes[key]) for region in model_regions: if not region in gmpes.keys(): raise ValueError('No GMPE defined for region type %s' % region) return gmpes def site_array_to_collection(site_array): """ Converts a set of sites from a 2D numpy array to an instance of :class: openquake.hazardlib.site.SiteCollection :param np.ndarray site_array: Site parameters as [ID, Long, Lat, vs30, vs30measured, z1pt0, z2pt5, backarc] """ site_list = [] n_sites, n_param = np.shape(site_array) if n_param != 8: raise ValueError('Site array incorrectly formatted!') for iloc in range(0, n_sites): site = Site(Point(site_array[iloc, 1], site_array[iloc, 2]), # Location site_array[iloc, 3], # Vs30 site_array[iloc, 4].astype(bool), # vs30measured site_array[iloc, 5], # z1pt0 site_array[iloc, 6], # z2pt5 site_array[iloc, 7].astype(bool), # Backarc site_array[iloc, 0].astype(int)) # ID site_list.append(site) return SiteCollection(site_list) class HMTKHazardCurve(object): """ Base Class for calculation of hazard curves according to different parallelisation strategies :param source_model: Source model as list of OpenQuake sources :param sites: Sites as :class: openquake.hazardlib.site.SiteCollection :param gmpes: GMPE dictionary with params {'Region', 'GMPEName'} :param np.ndarray imls: Intensity measure levels (g for PGA, Sa; cm/s for PGV) :param imts: List of intensity measures as :class: openquake.hazardlib.imt :param float truncation_level: GMPE truncation level :param src_filter: Source distance filter :param rup_filter: Rupture distance filter """ def __init__(self, source_model, sites, gmpes, imls, imts, truncation_level=None, source_integration_dist=None, rupture_integration_dist=None): """ Instatiate and preprocess :param float source_integration_dist: Integration distance for sources :param float rupture_integration_dist: Integration distance for ruptures """ self.source_model = source_model self.sites = sites self.gmpes = gmpes self.imls = imls self.imts = imts self.truncation_level = truncation_level if source_integration_dist: self.src_filter = filters.source_site_distance_filter( source_integration_dist) else: self.src_filter = filters.source_site_noop_filter if rupture_integration_dist: self.rup_filter = filters.rupture_site_distance_filter( rupture_integration_dist) else: self.rup_filter = filters.rupture_site_noop_filter self.preprocess_inputs() def preprocess_inputs(self): """ Perform initial checks to ensure correct inputs to hazard calculation """ if not isinstance(self.sites, SiteCollection): raise ValueError('Sites must be input as instance of :class: ' 'openquake.hazardlib.site.SiteCollection') # Preprocess GMPEs self.gmpes = _preprocess_gmpes(self.source_model, self.gmpes) # Set up IMT dictionary self.imts = _check_imts_imls(self.imts, self.imls) def _setup_poe_set(self): """ Instantiated PoE values to zeros """ num_sites = self.sites.total_sites poe_set = OrderedDict([(imt, np.ones([num_sites, len(self.imts[imt])])) for imt in self.imts]) return poe_set def calculate_hazard(self, num_workers=DEFAULT_WORKERS, num_src_workers=1): """ Calculates the hazard :param int num_workers: Number of workers for parallel calculation :param int num_src_workers: Number of elements per worker """ return hazard_curve.calc_hazard_curves(self.source_model, self.sites, self.imts, self.gmpes, self.truncation_level, self.src_filter, self.rup_filter) def get_hazard_curve_source(input_set): """ From a dictionary input set returns hazard curves """ try: for rupture, r_sites in input_set["ruptures_sites"]: gsim = input_set["gsims"][rupture.tectonic_region_type] sctx, rctx, dctx = gsim.make_contexts(r_sites, rupture) for iimt in input_set["imts"]: poes = gsim.get_poes(sctx, rctx, dctx, imt.from_string(iimt), input_set["imts"][iimt], input_set["truncation_level"]) pno = rupture.get_probability_no_exceedance(poes) input_set["curves"][iimt] *= r_sites.expand(pno, placeholder=1) except Exception, err: pass for iimt in input_set["imts"]: input_set["curves"][iimt] = 1 - input_set["curves"][iimt] return input_set["curves"] class HMTKHazardCurveParallelSource(HMTKHazardCurve): """ Runs the PSHA calculation parallelising by source """ def calculate_hazard(self, num_workers=DEFAULT_WORKERS, num_src_workers=1): """ Executes the hazard calculation, parallelising by source """ p = multiprocessing.Pool(processes=num_workers) poe_set = self._setup_poe_set() src_counter = 0 input_set = [] if len(self.source_model) < (num_workers * num_src_workers): gather_limit = len(self.source_model) else: gather_limit = num_workers * num_src_workers for source in self.source_model: src_counter += 1 if src_counter < gather_limit: inputs = self._get_calculation_inputs(source) if inputs: input_set.append(inputs) else: inputs = self._get_calculation_inputs(source) if inputs: input_set.append(inputs) # TODO Theoretically this should use the multiprocessing # tools. However, after extensive testing it is shown that # the OpenQuake hazardlib cannot be parallelised safely with # pythons own multiprocessing tools. The function will be # kept in its present form to allow the problem to be # resolved in future, but there is currently no performance # gain over the non-parallelised version poei = map(get_hazard_curve_source, input_set) for poe in poei: poe_set_keys = poe_set.keys() poe_keys = poe.keys() for iloc, key in enumerate(poe_set_keys): poe_set[key] *= 1.0 - poe[poe_keys[iloc]] input_set = [] src_counter = 0 for key in poe_set.keys(): poe_set[key] = 1.0 - poe_set[key] return poe_set def _get_calculation_inputs(self, source): """ Returns the calculation inputs, checking if sources are missing """ (s_source, s_sites) = self.src_filter(((source, self.sites))) if not s_source: return None inputs = { "imts": self.imts, "gsims": {source.tectonic_region_type: self.gmpes[source.tectonic_region_type]}, "truncation_level": self.truncation_level, "curves": OrderedDict([(imt, np.ones([len(self.sites), len(self.imts[imt])])) for imt in self.imts])} inputs["ruptures_sites"] = [(rupture, s_sites) for rupture in source.iter_ruptures()] return inputs
""" To generate a migration, make changes to this model file and then run: django-admin.py schemamigration submission_queue [migration_name] --auto --settings=xqueue.settings --pythonpath=. """ import json from datetime import datetime, timedelta import pytz from django.conf import settings from django.db import models from django.db.models import Q CHARFIELD_LEN_SMALL = 128 CHARFIELD_LEN_LARGE = 1024 class SubmissionManager(models.Manager): """ Table filter methods for Submissions """ def get_queue_length(self, queue_name): """ How many unretired submissions are available for a queue """ return self.time_filter('pull_time').filter(queue_name=queue_name, retired=False).count() def get_single_unretired_submission(self, queue_name): ''' Retrieve a single unretired queued item, if one exists, for the named queue Returns (success, submission): success: Flag whether retrieval is successful (Boolean) If no unretired item in the queue, return False submission: A single submission from the queue, guaranteed to be unretired ''' submission = self.time_filter('pull_time').filter( queue_name=queue_name, retired=False ).order_by( 'arrival_time' ).first() if submission: return (True, submission) else: return (False, '') def get_single_unpushed_submission(self, queue_name): """ Finds a single submission that hasn't been pushed for SUBMISSION_PROCESSING_DELAY """ return self.time_filter('push_time').filter( queue_name=queue_name, retired=False ).order_by( 'arrival_time' ).first() def time_filter(self, time_field=None): """ filters on push_time or pull_time to limit to submissions that haven't been pushed/pulled or were pushed/pulled SUBMISSION_PROCESSING_DELAY ago return a queryset that has been filtered on the specified time column """ if time_field not in ['push_time', 'pull_time']: raise ValueError(f'time_field must be pull_time or push_time not ({time_field})') previous_update = datetime.now(pytz.utc) - timedelta(minutes=settings.SUBMISSION_PROCESSING_DELAY) if time_field == "push_time": time_filter = Q(push_time__lte=(previous_update)) | Q(push_time__isnull=True) elif time_field == "pull_time": time_filter = Q(pull_time__lte=(previous_update)) | Q(pull_time__isnull=True) return super().get_queryset().filter(time_filter) class Submission(models.Model): ''' Representation of submission request, including metadata information ''' class Meta: # Once we get to Django 1.11 use indexes, it would have allowed a better index name # https://docs.djangoproject.com/en/1.11/ref/models/options/#django.db.models.Options.indexes index_together = [('queue_name', 'retired', 'push_time', 'arrival_time'), ('queue_name', 'retired', 'pull_time', 'arrival_time'), ('lms_callback_url', 'retired')] db_table = 'queue_submission' # Submission requester_id = models.CharField(max_length=CHARFIELD_LEN_SMALL) # ID of LMS lms_callback_url = models.CharField(max_length=CHARFIELD_LEN_SMALL, db_index=True) queue_name = models.CharField(max_length=CHARFIELD_LEN_SMALL, db_index=True) xqueue_header = models.CharField(max_length=CHARFIELD_LEN_LARGE) xqueue_body = models.TextField() # Uploaded files. These are prefixed with `s3_` for historical reasons, and # aliased as `keys` and `urls` to avoid an expensive migration. s3_keys = models.CharField(max_length=CHARFIELD_LEN_LARGE) # keys for internal Xqueue use s3_urls = models.CharField(max_length=CHARFIELD_LEN_LARGE) # urls for external access # Timing arrival_time = models.DateTimeField(auto_now_add=True) # Time of arrival from LMS pull_time = models.DateTimeField(null=True, blank=True) # Time of pull request, if pulled from external grader push_time = models.DateTimeField(null=True, blank=True) # Time of push, if xqueue pushed to external grader return_time = models.DateTimeField(null=True, blank=True) # Time of return from external grader # External pull interface grader_id = models.CharField(max_length=CHARFIELD_LEN_SMALL) # ID of external grader pullkey = models.CharField(max_length=CHARFIELD_LEN_SMALL) # Secret key for external pulling interface grader_reply = models.TextField() # Reply from external grader # Status num_failures = models.IntegerField(default=0) # Number of failures in exchange with external grader lms_ack = models.BooleanField(default=False) # True/False on whether LMS acknowledged receipt retired = models.BooleanField(default=False, db_index=True) # True/False on whether Submission is "finished" objects = SubmissionManager() def __str__(self): submission_info = "Submission from %s for queue '%s':\n" % (self.requester_id, self.queue_name) submission_info += " Callback URL: %s\n" % self.lms_callback_url submission_info += " Arrival time: %s\n" % self.arrival_time submission_info += " Pull time: %s\n" % self.pull_time submission_info += " Push time: %s\n" % self.push_time submission_info += " Return time: %s\n" % self.return_time submission_info += " Grader_id: %s\n" % self.grader_id submission_info += " Pullkey: %s\n" % self.pullkey submission_info += " num_failures: %d\n" % self.num_failures submission_info += " lms_ack: %s\n" % self.lms_ack submission_info += " retired: %s\n" % self.retired submission_info += "Original Xqueue header follows:\n" submission_info += json.dumps(json.loads(self.xqueue_header), indent=4) return submission_info @property def keys(self): ''' Alias for `s3_keys` field. ''' return self.s3_keys @property def urls(self): ''' Alias for `s3_urls` field. ''' return self.s3_urls
""" Hook Execution. """ import os import fnmatch import logging import tempfile from twisted.internet.defer import ( inlineCallbacks, DeferredQueue, Deferred, DeferredLock, returnValue) from twisted.internet.error import ProcessExitedAlready DEBUG_HOOK_TEMPLATE = r"""#!/bin/bash set -e export JUJU_DEBUG=$(mktemp -d) exec > $JUJU_DEBUG/debug.log >&1 FILTER='^\(LS_COLORS\|LESSOPEN\|LESSCLOSE\|PWD\)=' env | grep -v $FILTER > $JUJU_DEBUG/env.sh sed -i 's/^/export /' $JUJU_DEBUG/env.sh cat > $JUJU_DEBUG/hook.sh <<END . $JUJU_DEBUG/env.sh echo \$\$ > $JUJU_DEBUG/hook.pid exec /bin/bash END chmod +x $JUJU_DEBUG/hook.sh tmux new-session -d -s $JUJU_UNIT_NAME 2>&1 | cat > /dev/null || true tmux new-window -t $JUJU_UNIT_NAME -n {hook_name} "$JUJU_DEBUG/hook.sh" exit_handler() { if [ -f $JUJU_DEBUG/hook.pid ]; then kill -9 $(cat $JUJU_DEBUG/hook.pid) || true fi } trap exit_handler EXIT while [ ! -f $JUJU_DEBUG/hook.pid ]; do sleep 1 done HOOK_PID=$(cat $JUJU_DEBUG/hook.pid) while kill -0 "$HOOK_PID" 2> /dev/null; do sleep 1 done """ class HookExecutor(object): """Executes scheduled hooks. A typical unit agent is subscribed to multiple event streams across unit and relation lifecycles. All of which will attempt to execute hooks in response to events. In order to serialize hook execution and bring observability, a hook executor is utilized across the different components that want to execute hooks. """ STOP = object() def __init__(self): self._running = False self._executions = DeferredQueue() self._observer = None self._log = logging.getLogger("hook.executor") self._run_lock = DeferredLock() # The currently executing hook invoker. None if no hook is executing. self._invoker = None # The currently executing hook's context. None if no hook is executing. self._hook_context = None # The current names of hooks that should be debugged. self._debug_hook_names = None # The path to the last utilized tempfile debug hook. self._debug_hook_file_path = None @property def running(self): """Returns a boolean, denoting if the executor is running.""" return self._running @inlineCallbacks def start(self): """Start the hook executor. After the executor is started, it will continue to serially execute any queued hook executions. """ assert self._running is False, "Already running" self._running = True self._log.debug("started") while self._running: next = yield self._executions.get() # The stop logic here is to allow for two different # scenarios. One is if the executor is currently waiting on # the queue, putting a stop value there will, immediately # wake it up and cause it to stop. # The other distinction is more subtle, if we invoke # start/stop/start on the executioner, and it was # currently executing a slow hook, then when the # executioner finishes with the hook it may now be in the # running state, resulting in two executioners closures # executing hooks. We track stops to ensure that only one # executioner closure is running at a time. if next is self.STOP: continue yield self._run_lock.acquire() if not self._running: self._run_lock.release() continue yield self._run_one(*next) self._run_lock.release() @inlineCallbacks def _run_one(self, invoker, path, exec_deferred): """Run a hook. """ hook_path = self.get_hook_path(path) if not os.path.exists(hook_path): self._log.info( "Hook does not exist, skipping %s", hook_path) exec_deferred.callback(False) if self._observer: self._observer(path) returnValue(None) self._log.debug("Running hook: %s", path) # Store for context for callbacks, execution is serialized. self._invoker = invoker self._hook_context = invoker.get_context() try: yield invoker(hook_path) except Exception, e: self._invoker = self._hook_context = None self._log.debug("Hook error: %s %s", path, e) exec_deferred.errback(e) else: self._invoker = self._hook_context = None self._log.debug("Hook complete: %s", path) exec_deferred.callback(True) if self._observer: self._observer(path) @inlineCallbacks def stop(self): """Stop hook executions. Returns a deferred that fires when the executor has stopped. """ assert self._running, "Already stopped" yield self._run_lock.acquire() self._running = False self._executions.put(self.STOP) self._run_lock.release() self._log.debug("stopped") @inlineCallbacks def run_priority_hook(self, invoker, hook_path): """Execute a hook while the executor is stopped. Executes a hook immediately, ignoring the existing queued hook executions, requires the hook executor to be stopped. """ yield self._run_lock.acquire() try: assert not self._running, "Executor must not be running" exec_deferred = Deferred() yield self._run_one(invoker, hook_path, exec_deferred) finally: self._run_lock.release() yield exec_deferred def set_observer(self, observer): """Set a callback hook execution observer. The callback receives a single parameter, the path to the hook, and is invoked after the hook has been executed. """ self._observer = observer def get_hook_context(self, client_id): """Retrieve the context of the currently executing hook. This serves as the integration point with the hook api server, which utilizes this function to retrieve a hook context for a given client. Since we're serializing execution its effectively a constant lookup to the currently executing hook's context. """ return self._hook_context def get_hook_path(self, hook_path): """Retrieve a hook path. We use this to enable debugging. :param hook_path: The requested hook path to execute. If the executor is in debug mode, a path to a debug hook is returned. """ hook_name = os.path.basename(hook_path) # Cleanup/Release any previous hook debug scripts. if self._debug_hook_file_path: os.unlink(self._debug_hook_file_path) self._debug_hook_file_path = None # Check if debug is active, if not use the requested hook. if not self._debug_hook_names: return hook_path # Check if its a hook we want to debug found = False for debug_name in self._debug_hook_names: if fnmatch.fnmatch(hook_name, debug_name): found = True if not found: return hook_path # Setup a debug hook script. self._debug_hook_file_path = self._write_debug_hook(hook_name) return self._debug_hook_file_path def _write_debug_hook(self, hook_name): debug_hook = DEBUG_HOOK_TEMPLATE.replace("{hook_name}", hook_name) debug_hook_file = tempfile.NamedTemporaryFile( suffix="-%s" % hook_name, delete=False) debug_hook_file.write(debug_hook) debug_hook_file.flush() # We have to close the hook file, else linux throws a Text # File busy on exec because the file is open for write. debug_hook_file.close() os.chmod(debug_hook_file.name, 0700) return debug_hook_file.name def get_invoker(self, client_id): """Retrieve the invoker of the currently executing hook. This method enables a lookup point for the hook API. """ return self._invoker def set_debug(self, hook_names): """Set some hooks to be debugged. Also used to clear debug. :param hook_names: A list of hook names to debug, None values means disable debugging, and end current debugging underway. """ if hook_names is not None and not isinstance(hook_names, list): raise AssertionError("Invalid hook names %r" % (hook_names)) # Terminate an existing debug session when the debug ends. if hook_names is None and self._invoker: try: self._invoker.send_signal("HUP") except (ProcessExitedAlready, ValueError): pass self._debug_hook_names = hook_names def __call__(self, invoker, hook_path): """Schedule a hook for execution. Returns a deferred that fires when the hook has been executed. """ exec_deferred = Deferred() self._executions.put( (invoker, hook_path, exec_deferred)) return exec_deferred
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('pppcemr', '0077_auto_20160218_1243'), ] operations = [ migrations.AlterField( model_name='treatment', name='date', field=models.DateTimeField(null=True, verbose_name='date and time of treatment', blank=True), ), ]
from . import contipaq
from __future__ import unicode_literals import webnotes class DocType: def __init__(self, d, dl): self.doc, self.doclist = d, dl def on_submit(self): webnotes.conn.sql("""update `tabTender` set tender_name='%s' where name='%s'"""%(self.doc.name, self.doc.name)) webnotes.conn.sql('commit') def get_opening_date(self): from datetime import datetime webnotes.errprint(self.doc.opening_date) a = datetime.strptime(self.doc.opening_date, "%y-%m-%d") webnotes.errprint(a) # b = datetime.strptime(self.doc.submission_date, "%yy-%mm-%dd") # if a<b: # webnotes.errprint(a) # else: # webnotes.errprint(b) def validate(self): self.validate_estimatedcost() def validate_estimatedcost(self): if self.doc.estimated_cost: if self.doc.estimated_cost<=0: webnotes.msgprint("Estimated cost of tender must be greater than zero",raise_exception=1) @webnotes.whitelist() def make_quotation(source_name, target_doclist=None): from webnotes.model.mapper import get_mapped_doclist doclist = get_mapped_doclist("Tender", source_name, {"Tender": { "doctype": "Quotation", "field_map": { # "campaign_name": "campaign", # "doctype": "enquiry_from", # "name": "lead", # "lead_name": "contact_display", # "company_name": "customer_name", # "email_id": "contact_email", # "mobile_no": "contact_mobile" } }}, target_doclist) #webnotes.msgprint([d if isinstance(d, dict) else d.fields for d in doclist]) return [d if isinstance(d, dict) else d.fields for d in doclist]
from smashbox.utilities import * from smashbox.utilities.hash_files import * from os import listdir from os.path import isfile, join from smashbox.utilities.monitoring import push_to_monitoring import sys,os,os.path,random username = config.get('reva_tests_username', 'foo') password = config.get('reva_tests_password', 'bar') revad_address = config.get('reva_tests_revad_address', 'localhost:9999') reva_cli = config.get('reva_tests_reva_cli_binary', '/usr/local/bin/reva-cli') blocksize = int(config.get('reva_tests_revad_blocksize', 3*1024*1024)) # default 3MiB remainder = int(config.get('reva_tests_remainder', 1024*3)) # default 3kib revad_target_folder = config.get('reva_tests_revad_target_folder', '/home/') size_distribution = [] for i in range(0, 10): size_distribution.append(i*blocksize) size_distribution.append(i*blocksize + remainder) nfiles = len(size_distribution) total_size = sum(size_distribution) ok = os.path.isfile(reva_cli) if not ok: logger.error("reva-cli tool not found at %s" % (reva_cli)) sys.exit(1) @add_worker def main(step): t0 = time.time() print "Run options: nfiles=%d revad_address=%s username=%s password=%s blocksize=%d" % (nfiles, revad_address, username, "xxxx", blocksize) # create work directory d = make_workdir() e = d + '-download' # work directory for downloads os.mkdir(e, 0755) remote_folder = join(revad_target_folder, os.path.basename(os.path.dirname(d))) print "d=%s e=%s remote_folder=%s" % (d, e, remote_folder) # create sample files from the size_distribution create_sample_files(d) k0 = count_files(d) print "Creating sample files succeded: numfiles=%d" % (k0) # get all the hash_files created hash_files = get_hash_files(d) # authenticate to the revad daemon reva_authenticate() reva_create_target_folder(remote_folder) # upload the hash files to the revad daemon reva_upload_sample_files(remote_folder, hash_files) # download the hash files to the original location reva_download_sample_files(remote_folder, e, hash_files) k1 = count_files(e) print "Downloading sample files succeded: numfiles=%d" % (k1) error_check(k1-k0==0,'Expected to have the same number of files: nfiles=%d sampled=%d downloaded=%d'%(nfiles,k0,k1)) # verify downloaded sample files for corruptions ncorrupt = verify_sample_files(e)[2] fatal_check(ncorrupt==0, 'Corrupted files (%d) found' % (ncorrupt)) # clean local workdir and revad_target_folder # assuming that verify_sample_files(d) will abort # if corruption happened clean_run(remote_folder) t1 = time.time() push_to_monitoring("cernbox.cboxsls.revad_corruption.nfiles", nfiles) push_to_monitoring("cernbox.cboxsls.revad_corruption.total_size", total_size) push_to_monitoring("cernbox.cboxsls.revad_corruption.elapsed", t1-t0) push_to_monitoring("cernbox.cboxsls.revad_corruption.transfer_rate", total_size/(t1-t0)) push_to_monitoring("cernbox.cboxsls.revad_corruption.downloaded_files", k1) def create_sample_files(d): print "Creating sample files ..." # create sample files inside folder for size in size_distribution: print "Creating sample file: size=%d bs=%d" % (size, blocksize) create_hashfile(d, size=size, bs=blocksize) def get_hash_files(d): hash_files = [join(d, f) for f in listdir(d) if isfile(join(d, f))] return hash_files def reva_authenticate(): uri = "tcp://%s:%s@%s" % (username, password, revad_address) cmd = "%s login %s" % (reva_cli, uri) print "REVA auth: cmd=%s" % (cmd) runcmd(cmd) def reva_create_target_folder(target_folder): # missing mkdir on the reva-cli, xrdcopy will create dir for us pass def reva_upload_sample_files(remote_folder, hash_files): print "Uploading sample files ..." # upload using reva-cli to remote server for f in hash_files: upload(remote_folder, f) def upload(remote_folder, fn): target_fn = join(remote_folder, os.path.basename(fn)) cmd = "%s storage upload %s %s" % (reva_cli, target_fn, fn) print "REVA upload: cmd=%s" % (cmd) runcmd(cmd) def reva_download_sample_files(remote_folder, e, hash_files): print "Downloading sample files ..." # download using reva-cli to local disk for f in hash_files: download(remote_folder, e, f) def download(remote_folder, e, fn): remote_fn = join(remote_folder, os.path.basename(fn)) local_fn = join(e, os.path.basename(fn)) cmd = "%s storage download %s %s" % (reva_cli, remote_fn, local_fn) print "REVA download: cmd=%s" % (cmd) runcmd(cmd) def verify_sample_files(d): print "Verifying uploaded files ..." # verify that the checksums of the downloaded files # matches the one of the sample file return analyse_hashfiles(d) def clean_run(remote_folder): cmd = "%s storage delete %s" % (reva_cli, remote_folder) print "REVA clean: cmd=%s" % (cmd) runcmd(cmd)
from django.conf.urls.defaults import * from django.views.generic import ListView from open_municipio.people.models import Office urlpatterns = patterns('', url(r'^$', ListView.as_view( model=Office, template_name='office_list.html', ), name='om_office_list'), )
import patten class data: def __init__(self): self.dic = {} self.mapper = {} self.p = patten.patten() self.stop = {} def rep(self, l): l = l.replace("[p:", "") l = l.replace("[a:", "") l = l.replace("[s:", "") l = l.replace("[n:", "") l = l.replace("[m:", "") l = l.replace("[f:", "") l = l.replace("[t:", "") l = l.replace("[k:", "") l = l.replace("]", "") return l def map(self): f = open("cat2", 'r') cat = f.read().splitlines() for line in cat: s = line.split(":") for a in s[1].split(","): self.mapper[self.p.stm(a)] = s[0] s = line.split(":") def process_stop(self): f = open("stop", 'r') lines = f.read().splitlines() for line in lines: self.stop[line] = 1 def score(self, s, ls, old, sc, r, r_old, f=False): co = 0 co = co + sc #print "aaa ", r, " ", r_old words = s.split() for i in range(0, len(words)): words[i] = words[i].replace(".", "") words[i] = self.p.stm(words[i]) for word in ls: if self.p.stm(word) in words and not self.p.is_adv(word) and self.stop.get(word, 0) == 0: if f: print "5 = ", self.p.stm(word) co = co + 5 for word in old: if self.p.stm(word) in words and not self.p.is_adv(word) and not self.stop.get(word, 0) == 0: if f: print "1 = ", self.p.stm(word) co = co + 1 fl = True for word in words: if self.mapper.get(word, '1') == r or self.mapper.get(self.p.stm(word), '1') == r: if f: print "r = ", word co = co + 25 fl = False break if fl: for word in words: if self.mapper.get(word, '1') == r_old or self.mapper.get(self.p.stm(word), '1') == r_old: co = co + 15 break max_score = len(ls) * 5 + len(old) + sc if r != "": max_score = max_score + 25 if r_old != "": max_score = max_score + 15 print "max score : ", max_score return max_score, co def train(self): f = open("list5", 'r') lines = f.read().splitlines() for line in lines: line = line.lower() words = line.split() l = len(words) for i in range(0, l - 1): if self.dic.get(words[i], 0) == 0: self.dic[words[i]] = {} if self.dic[words[i]].get(words[i + 1], 0) == 0: self.dic[words[i]][words[i + 1]] = {} self.dic[words[i]][words[i + 1]][words[i + 2]] = 1 def train2(self): f = open("list5", 'r') lines = f.read().splitlines() for line in lines: line = line.lower() words = line.split() l = len(words) for i in range(0, l - 3): if self.dic.get(words[i], 0) == 0: self.dic[words[i]] = {} if self.dic[words[i]].get(words[i + 1], 0) == 0: self.dic[words[i]][words[i + 1]] = {} if self.dic[words[i]][words[i + 1]].get(words[i + 2], 0) == 0: self.dic[words[i]][words[i + 1]][words[i + 2]] = {} self.dic[words[i]][words[i + 1]][words[i + 2]][words[i + 3]] = 1 def search(self, lst, s, a, b, ls, old, r, r_old): if not "." in b: if len(s) > 90: return 0 if self.dic.get(a, 0) != 0: if self.dic[a].get(b, 0) != 0: for key in self.dic[a][b]: self.search(lst, s + " " + b, b, key, ls, old, r, r_old) return 0 co = 0 #print "aaa ", old, " ", r for word in ls: if self.p.stm(word) in s + " " + b: co = co + 5 for word in old: if self.p.stm(word) in s + " " + b: co = co + 1 words = (s + " " + b).split() for word in words: if self.mapper.get(word, '1') == r: co = co + 25 elif self.mapper.get(word, '1') == r_old: #if self.mapper.get(word, '1') == r_old: co = co + 15 max_score = len(ls) * 5 + len(old) + 25 + 15 print "max score : ", max_score if co > 2: l = s + " " + b l = self.rep(l) lst.append((co, -len(l), l)) def search2(self, lst, s, a, b, c, ls, old, r, r_old, sc): if not "." in c: if len(s.split()) > len(ls) + 5: # and c != 'and' and c != 'a': return 0 if self.dic.get(a, 0) != 0: if self.dic[a].get(b, 0) != 0: if self.dic[a][b].get(c, 0) != 0: for key in self.dic[a][b][c]: self.search2(lst, s + " " + c, b, c, key, ls, old, r, r_old, sc) return 0 if c == 'and' or c == 'a': return 0 max_score, co = self.score(s + " " + c, ls, old, sc, r, r_old) if co > max_score / 2: l = s + " " + c l = self.rep(l) lst.append((co, -len(l), l)) def s2(self, ls, old, r, r_old): lst = list() for a in ls: if self.p.is_adv(a): continue if self.dic.get(a, 0) != 0: for b in self.dic[a]: self.search(lst, a, a, b, ls, old, r, r_old) l = self.dic.keys() for a in l: if self.mapper.get(a, '1') == r: for b in self.dic[a]: self.search(lst, a, a, b, ls, old, r, r_old) return lst def s3(self, ls, old, r, r_old): print r, " ", r_old lst = list() sc = len(ls) + 10 for a in ls: if self.p.is_adv(a) or a == 'like': continue if self.dic.get(a, 0) != 0: for b in self.dic[a]: for c in self.dic[a][b]: self.search2(lst, a + " " + b, a, b, c, ls, old, r, r_old, sc) sc = sc - 1 #if a == 'i': #break l = self.dic.keys() for a in l: if self.mapper.get(a, '1') == r: for b in self.dic[a]: self.search2(lst, a + " " + b, a, b, c, ls, old, r, r_old, 0) s = "i am an a.i." c = 0 ma, c = self.score(s, ls, old, 0, r, r_old, True) print "c = ", c return lst
""" Helpers for the student app. """ import json import logging import mimetypes import urllib import urlparse from datetime import datetime from django.conf import settings from django.core.exceptions import PermissionDenied from django.urls import NoReverseMatch, reverse from django.core.validators import ValidationError from django.contrib.auth import load_backend from django.contrib.auth.models import User from django.db import IntegrityError, transaction from django.utils import http from django.utils.translation import ugettext as _ from oauth2_provider.models import AccessToken as dot_access_token from oauth2_provider.models import RefreshToken as dot_refresh_token from provider.oauth2.models import AccessToken as dop_access_token from provider.oauth2.models import RefreshToken as dop_refresh_token from pytz import UTC from six import iteritems, text_type import third_party_auth from course_modes.models import CourseMode from lms.djangoapps.certificates.api import ( get_certificate_url, has_html_certificates_enabled ) from lms.djangoapps.certificates.models import ( CertificateStatuses, certificate_status_for_student ) from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory from lms.djangoapps.verify_student.models import VerificationDeadline from lms.djangoapps.verify_student.services import IDVerificationService from lms.djangoapps.verify_student.utils import is_verification_expiring_soon, verification_for_datetime from openedx.core.djangoapps.certificates.api import certificates_viewable_for_course from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.djangoapps.theming import helpers as theming_helpers from openedx.core.djangoapps.theming.helpers import get_themes from student.models import ( LinkedInAddToProfileConfiguration, PasswordHistory, Registration, UserAttribute, UserProfile, unique_id_for_user, email_exists_or_retired ) VERIFY_STATUS_NEED_TO_VERIFY = "verify_need_to_verify" VERIFY_STATUS_SUBMITTED = "verify_submitted" VERIFY_STATUS_RESUBMITTED = "re_verify_submitted" VERIFY_STATUS_APPROVED = "verify_approved" VERIFY_STATUS_MISSED_DEADLINE = "verify_missed_deadline" VERIFY_STATUS_NEED_TO_REVERIFY = "verify_need_to_reverify" DISABLE_UNENROLL_CERT_STATES = [ 'generating', 'downloadable', ] USERNAME_EXISTS_MSG_FMT = _("An account with the Public Username '{username}' already exists.") log = logging.getLogger(__name__) def check_verify_status_by_course(user, course_enrollments): """ Determine the per-course verification statuses for a given user. The possible statuses are: * VERIFY_STATUS_NEED_TO_VERIFY: The student has not yet submitted photos for verification. * VERIFY_STATUS_SUBMITTED: The student has submitted photos for verification, but has have not yet been approved. * VERIFY_STATUS_RESUBMITTED: The student has re-submitted photos for re-verification while they still have an active but expiring ID verification * VERIFY_STATUS_APPROVED: The student has been successfully verified. * VERIFY_STATUS_MISSED_DEADLINE: The student did not submit photos within the course's deadline. * VERIFY_STATUS_NEED_TO_REVERIFY: The student has an active verification, but it is set to expire before the verification deadline for the course. It is is also possible that a course does NOT have a verification status if: * The user is not enrolled in a verified mode, meaning that the user didn't pay. * The course does not offer a verified mode. * The user submitted photos but an error occurred while verifying them. * The user submitted photos but the verification was denied. In the last two cases, we rely on messages in the sidebar rather than displaying messages for each course. Arguments: user (User): The currently logged-in user. course_enrollments (list[CourseEnrollment]): The courses the user is enrolled in. Returns: dict: Mapping of course keys verification status dictionaries. If no verification status is applicable to a course, it will not be included in the dictionary. The dictionaries have these keys: * status (str): One of the enumerated status codes. * days_until_deadline (int): Number of days until the verification deadline. * verification_good_until (str): Date string for the verification expiration date. """ status_by_course = {} # Retrieve all verifications for the user, sorted in descending # order by submission datetime verifications = IDVerificationService.verifications_for_user(user) # Check whether the user has an active or pending verification attempt has_active_or_pending = IDVerificationService.user_has_valid_or_pending(user) # Retrieve expiration_datetime of most recent approved verification expiration_datetime = IDVerificationService.get_expiration_datetime(user, ['approved']) verification_expiring_soon = is_verification_expiring_soon(expiration_datetime) # Retrieve verification deadlines for the enrolled courses enrolled_course_keys = [enrollment.course_id for enrollment in course_enrollments] course_deadlines = VerificationDeadline.deadlines_for_courses(enrolled_course_keys) recent_verification_datetime = None for enrollment in course_enrollments: # If the user hasn't enrolled as verified, then the course # won't display state related to its verification status. if enrollment.mode in CourseMode.VERIFIED_MODES: # Retrieve the verification deadline associated with the course. # This could be None if the course doesn't have a deadline. deadline = course_deadlines.get(enrollment.course_id) relevant_verification = verification_for_datetime(deadline, verifications) # Picking the max verification datetime on each iteration only with approved status if relevant_verification is not None and relevant_verification.status == "approved": recent_verification_datetime = max( recent_verification_datetime if recent_verification_datetime is not None else relevant_verification.expiration_datetime, relevant_verification.expiration_datetime ) # By default, don't show any status related to verification status = None should_display = True # Check whether the user was approved or is awaiting approval if relevant_verification is not None: should_display = relevant_verification.should_display_status_to_user() if relevant_verification.status == "approved": if verification_expiring_soon: status = VERIFY_STATUS_NEED_TO_REVERIFY else: status = VERIFY_STATUS_APPROVED elif relevant_verification.status == "submitted": if verification_expiring_soon: status = VERIFY_STATUS_RESUBMITTED else: status = VERIFY_STATUS_SUBMITTED # If the user didn't submit at all, then tell them they need to verify # If the deadline has already passed, then tell them they missed it. # If they submitted but something went wrong (error or denied), # then don't show any messaging next to the course, since we already # show messages related to this on the left sidebar. submitted = ( relevant_verification is not None and relevant_verification.status not in ["created", "ready"] ) if status is None and not submitted: if deadline is None or deadline > datetime.now(UTC): if IDVerificationService.user_is_verified(user) and verification_expiring_soon: # The user has an active verification, but the verification # is set to expire within "EXPIRING_SOON_WINDOW" days (default is 4 weeks). # Tell the student to reverify. status = VERIFY_STATUS_NEED_TO_REVERIFY elif not IDVerificationService.user_is_verified(user): status = VERIFY_STATUS_NEED_TO_VERIFY else: # If a user currently has an active or pending verification, # then they may have submitted an additional attempt after # the verification deadline passed. This can occur, # for example, when the support team asks a student # to reverify after the deadline so they can receive # a verified certificate. # In this case, we still want to show them as "verified" # on the dashboard. if has_active_or_pending: status = VERIFY_STATUS_APPROVED # Otherwise, the student missed the deadline, so show # them as "honor" (the kind of certificate they will receive). else: status = VERIFY_STATUS_MISSED_DEADLINE # Set the status for the course only if we're displaying some kind of message # Otherwise, leave the course out of the dictionary. if status is not None: days_until_deadline = None now = datetime.now(UTC) if deadline is not None and deadline > now: days_until_deadline = (deadline - now).days status_by_course[enrollment.course_id] = { 'status': status, 'days_until_deadline': days_until_deadline, 'should_display': should_display, } if recent_verification_datetime: for key, value in iteritems(status_by_course): # pylint: disable=unused-variable status_by_course[key]['verification_good_until'] = recent_verification_datetime.strftime("%m/%d/%Y") return status_by_course def auth_pipeline_urls(auth_entry, redirect_url=None): """Retrieve URLs for each enabled third-party auth provider. These URLs are used on the "sign up" and "sign in" buttons on the login/registration forms to allow users to begin authentication with a third-party provider. Optionally, we can redirect the user to an arbitrary url after auth completes successfully. We use this to redirect the user to a page that required login, or to send users to the payment flow when enrolling in a course. Args: auth_entry (string): Either `pipeline.AUTH_ENTRY_LOGIN` or `pipeline.AUTH_ENTRY_REGISTER` Keyword Args: redirect_url (unicode): If provided, send users to this URL after they successfully authenticate. Returns: dict mapping provider IDs to URLs """ if not third_party_auth.is_enabled(): return {} return { provider.provider_id: third_party_auth.pipeline.get_login_url( provider.provider_id, auth_entry, redirect_url=redirect_url ) for provider in third_party_auth.provider.Registry.displayed_for_login() } POST_AUTH_PARAMS = ('course_id', 'enrollment_action', 'course_mode', 'email_opt_in', 'purchase_workflow') def get_next_url_for_login_page(request): """ Determine the URL to redirect to following login/registration/third_party_auth The user is currently on a login or registration page. If 'course_id' is set, or other POST_AUTH_PARAMS, we will need to send the user to the /account/finish_auth/ view following login, which will take care of auto-enrollment in the specified course. Otherwise, we go to the ?next= query param or to the dashboard if nothing else is specified. If THIRD_PARTY_AUTH_HINT is set, then `tpa_hint=<hint>` is added as a query parameter. """ redirect_to = get_redirect_to(request) if not redirect_to: try: redirect_to = reverse('dashboard') except NoReverseMatch: redirect_to = reverse('home') if any(param in request.GET for param in POST_AUTH_PARAMS): # Before we redirect to next/dashboard, we need to handle auto-enrollment: params = [(param, request.GET[param]) for param in POST_AUTH_PARAMS if param in request.GET] params.append(('next', redirect_to)) # After auto-enrollment, user will be sent to payment page or to this URL redirect_to = '{}?{}'.format(reverse('finish_auth'), urllib.urlencode(params)) # Note: if we are resuming a third party auth pipeline, then the next URL will already # be saved in the session as part of the pipeline state. That URL will take priority # over this one. # Append a tpa_hint query parameter, if one is configured tpa_hint = configuration_helpers.get_value( "THIRD_PARTY_AUTH_HINT", settings.FEATURES.get("THIRD_PARTY_AUTH_HINT", '') ) if tpa_hint: # Don't add tpa_hint if we're already in the TPA pipeline (prevent infinite loop), # and don't overwrite any existing tpa_hint params (allow tpa_hint override). running_pipeline = third_party_auth.pipeline.get(request) (scheme, netloc, path, query, fragment) = list(urlparse.urlsplit(redirect_to)) if not running_pipeline and 'tpa_hint' not in query: params = urlparse.parse_qs(query) params['tpa_hint'] = [tpa_hint] query = urllib.urlencode(params, doseq=True) redirect_to = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) return redirect_to def get_redirect_to(request): """ Determine the redirect url and return if safe :argument request: request object :returns: redirect url if safe else None """ redirect_to = request.GET.get('next') header_accept = request.META.get('HTTP_ACCEPT', '') # If we get a redirect parameter, make sure it's safe i.e. not redirecting outside our domain. # Also make sure that it is not redirecting to a static asset and redirected page is web page # not a static file. As allowing assets to be pointed to by "next" allows 3rd party sites to # get information about a user on edx.org. In any such case drop the parameter. if redirect_to: mime_type, _ = mimetypes.guess_type(redirect_to, strict=False) if not http.is_safe_url(redirect_to, allowed_hosts={request.get_host()}, require_https=True): log.warning( u'Unsafe redirect parameter detected after login page: %(redirect_to)r', {"redirect_to": redirect_to} ) redirect_to = None elif 'text/html' not in header_accept: log.info( u'Redirect to non html content %(content_type)r detected from %(user_agent)r' u' after login page: %(redirect_to)r', { "redirect_to": redirect_to, "content_type": header_accept, "user_agent": request.META.get('HTTP_USER_AGENT', '') } ) redirect_to = None elif mime_type: log.warning( u'Redirect to url path with specified filed type %(mime_type)r not allowed: %(redirect_to)r', {"redirect_to": redirect_to, "mime_type": mime_type} ) redirect_to = None elif settings.STATIC_URL in redirect_to: log.warning( u'Redirect to static content detected after login page: %(redirect_to)r', {"redirect_to": redirect_to} ) redirect_to = None else: themes = get_themes() next_path = urlparse.urlparse(redirect_to).path for theme in themes: if theme.theme_dir_name in next_path: log.warning( u'Redirect to theme content detected after login page: %(redirect_to)r', {"redirect_to": redirect_to} ) redirect_to = None break return redirect_to def destroy_oauth_tokens(user): """ Destroys ALL OAuth access and refresh tokens for the given user. """ dop_access_token.objects.filter(user=user.id).delete() dop_refresh_token.objects.filter(user=user.id).delete() dot_access_token.objects.filter(user=user.id).delete() dot_refresh_token.objects.filter(user=user.id).delete() def generate_activation_email_context(user, registration): """ Constructs a dictionary for use in activation email contexts Arguments: user (User): Currently logged-in user registration (Registration): Registration object for the currently logged-in user """ return { 'name': user.profile.name, 'key': registration.activation_key, 'lms_url': configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL), 'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME), 'support_url': configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK), 'support_email': configuration_helpers.get_value('CONTACT_EMAIL', settings.CONTACT_EMAIL), } def create_or_set_user_attribute_created_on_site(user, site): """ Create or Set UserAttribute indicating the microsite site the user account was created on. User maybe created on 'courses.edx.org', or a white-label site. Due to the very high traffic on this table we now ignore the default site (eg. 'courses.edx.org') and code which comsumes this attribute should assume a 'created_on_site' which doesn't exist belongs to the default site. """ if site and site.id != settings.SITE_ID: UserAttribute.set_user_attribute(user, 'created_on_site', site.domain) NEW_USER_AUTH_BACKEND = 'django.contrib.auth.backends.AllowAllUsersModelBackend' def authenticate_new_user(request, username, password): """ Immediately after a user creates an account, we log them in. They are only logged in until they close the browser. They can't log in again until they click the activation link from the email. """ backend = load_backend(NEW_USER_AUTH_BACKEND) user = backend.authenticate(request=request, username=username, password=password) user.backend = NEW_USER_AUTH_BACKEND return user class AccountValidationError(Exception): """ Used in account creation views to raise exceptions with details about specific invalid fields """ def __init__(self, message, field): super(AccountValidationError, self).__init__(message) self.field = field def cert_info(user, course_overview): """ Get the certificate info needed to render the dashboard section for the given student and course. Arguments: user (User): A user. course_overview (CourseOverview): A course. Returns: dict: A dictionary with keys: 'status': one of 'generating', 'downloadable', 'notpassing', 'processing', 'restricted', 'unavailable', or 'certificate_earned_but_not_available' 'download_url': url, only present if show_download_url is True 'show_survey_button': bool 'survey_url': url, only if show_survey_button is True 'grade': if status is not 'processing' 'can_unenroll': if status allows for unenrollment """ return _cert_info( user, course_overview, certificate_status_for_student(user, course_overview.id) ) def _cert_info(user, course_overview, cert_status): """ Implements the logic for cert_info -- split out for testing. Arguments: user (User): A user. course_overview (CourseOverview): A course. """ # simplify the status for the template using this lookup table template_state = { CertificateStatuses.generating: 'generating', CertificateStatuses.downloadable: 'downloadable', CertificateStatuses.notpassing: 'notpassing', CertificateStatuses.restricted: 'restricted', CertificateStatuses.auditing: 'auditing', CertificateStatuses.audit_passing: 'auditing', CertificateStatuses.audit_notpassing: 'auditing', CertificateStatuses.unverified: 'unverified', } certificate_earned_but_not_available_status = 'certificate_earned_but_not_available' default_status = 'processing' default_info = { 'status': default_status, 'show_survey_button': False, 'can_unenroll': True, } if cert_status is None: return default_info status = template_state.get(cert_status['status'], default_status) is_hidden_status = status in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing') if ( not certificates_viewable_for_course(course_overview) and (status in CertificateStatuses.PASSED_STATUSES) and course_overview.certificate_available_date ): status = certificate_earned_but_not_available_status if ( course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status ): return default_info status_dict = { 'status': status, 'mode': cert_status.get('mode', None), 'linked_in_url': None, 'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES, } if status != default_status and course_overview.end_of_course_survey_url is not None: status_dict.update({ 'show_survey_button': True, 'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)}) else: status_dict['show_survey_button'] = False if status == 'downloadable': # showing the certificate web view button if certificate is downloadable state and feature flags are enabled. if has_html_certificates_enabled(course_overview): if course_overview.has_any_active_web_certificate: status_dict.update({ 'show_cert_web_view': True, 'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid']) }) else: # don't show download certificate button if we don't have an active certificate for course status_dict['status'] = 'unavailable' elif 'download_url' not in cert_status: log.warning( u"User %s has a downloadable cert for %s, but no download url", user.username, course_overview.id ) return default_info else: status_dict['download_url'] = cert_status['download_url'] # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() # posting certificates to LinkedIn is not currently # supported in White Labels if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site(): status_dict['linked_in_url'] = linkedin_config.add_to_profile_url( course_overview.id, course_overview.display_name, cert_status.get('mode'), cert_status['download_url'] ) if status in {'generating', 'downloadable', 'notpassing', 'restricted', 'auditing', 'unverified'}: cert_grade_percent = -1 persisted_grade_percent = -1 persisted_grade = CourseGradeFactory().read(user, course=course_overview, create_if_needed=False) if persisted_grade is not None: persisted_grade_percent = persisted_grade.percent if 'grade' in cert_status: cert_grade_percent = float(cert_status['grade']) if cert_grade_percent == -1 and persisted_grade_percent == -1: # Note: as of 11/20/2012, we know there are students in this state-- cs169.1x, # who need to be regraded (we weren't tracking 'notpassing' at first). # We can add a log.warning here once we think it shouldn't happen. return default_info status_dict['grade'] = text_type(max(cert_grade_percent, persisted_grade_percent)) return status_dict def process_survey_link(survey_link, user): """ If {UNIQUE_ID} appears in the link, replace it with a unique id for the user. Currently, this is sha1(user.username). Otherwise, return survey_link. """ return survey_link.format(UNIQUE_ID=unique_id_for_user(user)) def do_create_account(form, custom_form=None): """ Given cleaned post variables, create the User and UserProfile objects, as well as the registration for this user. Returns a tuple (User, UserProfile, Registration). Note: this function is also used for creating test users. """ # Check if ALLOW_PUBLIC_ACCOUNT_CREATION flag turned off to restrict user account creation if not configuration_helpers.get_value( 'ALLOW_PUBLIC_ACCOUNT_CREATION', settings.FEATURES.get('ALLOW_PUBLIC_ACCOUNT_CREATION', True) ): raise PermissionDenied() errors = {} errors.update(form.errors) if custom_form: errors.update(custom_form.errors) if errors: raise ValidationError(errors) proposed_username = form.cleaned_data["username"] user = User( username=proposed_username, email=form.cleaned_data["email"], is_active=False ) user.set_password(form.cleaned_data["password"]) registration = Registration() # TODO: Rearrange so that if part of the process fails, the whole process fails. # Right now, we can have e.g. no registration e-mail sent out and a zombie account try: with transaction.atomic(): user.save() if custom_form: custom_model = custom_form.save(commit=False) custom_model.user = user custom_model.save() except IntegrityError: # Figure out the cause of the integrity error # TODO duplicate email is already handled by form.errors above as a ValidationError. # The checks for duplicate email/username should occur in the same place with an # AccountValidationError and a consistent user message returned (i.e. both should # return "It looks like {username} belongs to an existing account. Try again with a # different username.") if User.objects.filter(username=user.username): raise AccountValidationError( USERNAME_EXISTS_MSG_FMT.format(username=proposed_username), field="username" ) elif email_exists_or_retired(user.email): raise AccountValidationError( _("An account with the Email '{email}' already exists.").format(email=user.email), field="email" ) else: raise # add this account creation to password history # NOTE, this will be a NOP unless the feature has been turned on in configuration password_history_entry = PasswordHistory() password_history_entry.create(user) registration.register(user) profile_fields = [ "name", "level_of_education", "gender", "mailing_address", "city", "country", "goals", "year_of_birth" ] profile = UserProfile( user=user, **{key: form.cleaned_data.get(key) for key in profile_fields} ) extended_profile = form.cleaned_extended_profile if extended_profile: profile.meta = json.dumps(extended_profile) try: profile.save() except Exception: log.exception("UserProfile creation failed for user {id}.".format(id=user.id)) raise return user, profile, registration
from django.core.urlresolvers import reverse from taiga.base.utils import json from .. import factories as f import pytest pytestmark = pytest.mark.django_db def test_userstory_custom_attribute_duplicate_name_error_on_create(client): custom_attr_1 = f.UserStoryCustomAttributeFactory() member = f.MembershipFactory(user=custom_attr_1.project.owner, project=custom_attr_1.project, is_admin=True) url = reverse("userstory-custom-attributes-list") data = {"name": custom_attr_1.name, "project": custom_attr_1.project.pk} client.login(member.user) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400 def test_userstory_custom_attribute_duplicate_name_error_on_update(client): custom_attr_1 = f.UserStoryCustomAttributeFactory() custom_attr_2 = f.UserStoryCustomAttributeFactory(project=custom_attr_1.project) member = f.MembershipFactory(user=custom_attr_1.project.owner, project=custom_attr_1.project, is_admin=True) url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk}) data = {"name": custom_attr_1.name} client.login(member.user) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400 def test_userstory_custom_attribute_duplicate_name_error_on_move_between_projects(client): custom_attr_1 = f.UserStoryCustomAttributeFactory() custom_attr_2 = f.UserStoryCustomAttributeFactory(name=custom_attr_1.name) member = f.MembershipFactory(user=custom_attr_1.project.owner, project=custom_attr_1.project, is_admin=True) f.MembershipFactory(user=custom_attr_1.project.owner, project=custom_attr_2.project, is_admin=True) url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk}) data = {"project": custom_attr_1.project.pk} client.login(member.user) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400 def test_userstory_custom_attributes_values_when_create_us(client): user_story = f.UserStoryFactory() assert user_story.custom_attributes_values.attributes_values == {} def test_userstory_custom_attributes_values_update(client): user_story = f.UserStoryFactory() member = f.MembershipFactory(user=user_story.project.owner, project=user_story.project, is_admin=True) custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project) ct1_id = "{}".format(custom_attr_1.id) custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project) ct2_id = "{}".format(custom_attr_2.id) custom_attrs_val = user_story.custom_attributes_values url = reverse("userstory-custom-attributes-values-detail", args=[user_story.id]) data = { "attributes_values": { ct1_id: "test_1_updated", ct2_id: "test_2_updated" }, "version": custom_attrs_val.version } assert user_story.custom_attributes_values.attributes_values == {} client.login(member.user) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200 assert response.data["attributes_values"] == data["attributes_values"] user_story = user_story.__class__.objects.get(id=user_story.id) assert user_story.custom_attributes_values.attributes_values == data["attributes_values"] def test_userstory_custom_attributes_values_update_with_error_invalid_key(client): user_story = f.UserStoryFactory() member = f.MembershipFactory(user=user_story.project.owner, project=user_story.project, is_admin=True) custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project) ct1_id = "{}".format(custom_attr_1.id) custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project) custom_attrs_val = user_story.custom_attributes_values url = reverse("userstory-custom-attributes-values-detail", args=[user_story.id]) data = { "attributes_values": { ct1_id: "test_1_updated", "123456": "test_2_updated" }, "version": custom_attrs_val.version } assert user_story.custom_attributes_values.attributes_values == {} client.login(member.user) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400 def test_trigger_update_userstorycustomvalues_afeter_remove_userstorycustomattribute(client): user_story = f.UserStoryFactory() member = f.MembershipFactory(user=user_story.project.owner, project=user_story.project, is_admin=True) custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project) ct1_id = "{}".format(custom_attr_1.id) custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project) ct2_id = "{}".format(custom_attr_2.id) custom_attrs_val = user_story.custom_attributes_values custom_attrs_val.attributes_values = {ct1_id: "test_1", ct2_id: "test_2"} custom_attrs_val.save() assert ct1_id in custom_attrs_val.attributes_values.keys() assert ct2_id in custom_attrs_val.attributes_values.keys() url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk}) client.login(member.user) response = client.json.delete(url) assert response.status_code == 204 custom_attrs_val = custom_attrs_val.__class__.objects.get(id=custom_attrs_val.id) assert ct1_id in custom_attrs_val.attributes_values.keys() assert ct2_id not in custom_attrs_val.attributes_values.keys()
import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class SaleOrderLine(orm.Model): """ Model name: SaleOrderLine """ _inherit = 'sale.order.line' _columns = { 'mx_net_qty': fields.related( 'product_id', 'mx_net_qty', type='float', string='Total net'), 'mx_lord_qty': fields.related( 'product_id', 'mx_lord_qty', type='float', string='Total lord'), }
import mmap import string import struct import types from utils import hash_160_to_pubkey_address, hash_160_to_script_address, public_key_to_pubkey_address, hash_encode,\ hash_160 class SerializationError(Exception): """Thrown when there's a problem deserializing or serializing.""" class BCDataStream(object): """Workalike python implementation of Creditbit's CDataStream class.""" def __init__(self): self.input = None self.read_cursor = 0 def clear(self): self.input = None self.read_cursor = 0 def write(self, bytes): # Initialize with string of bytes if self.input is None: self.input = bytes else: self.input += bytes def map_file(self, file, start): # Initialize with bytes from file self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) self.read_cursor = start def seek_file(self, position): self.read_cursor = position def close_file(self): self.input.close() def read_string(self): # Strings are encoded depending on length: # 0 to 252 : 1-byte-length followed by bytes (if any) # 253 to 65,535 : byte'253' 2-byte-length followed by bytes # 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes # ... and the Creditbit client is coded to understand: # greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string # ... but I don't think it actually handles any strings that big. if self.input is None: raise SerializationError("call write(bytes) before trying to deserialize") try: length = self.read_compact_size() except IndexError: raise SerializationError("attempt to read past end of buffer") return self.read_bytes(length) def write_string(self, string): # Length-encoded as with read-string self.write_compact_size(len(string)) self.write(string) def read_bytes(self, length): try: result = self.input[self.read_cursor:self.read_cursor+length] self.read_cursor += length return result except IndexError: raise SerializationError("attempt to read past end of buffer") return '' def read_boolean(self): return self.read_bytes(1)[0] != chr(0) def read_int16(self): return self._read_num('<h') def read_uint16(self): return self._read_num('<H') def read_int32(self): return self._read_num('<i') def read_uint32(self): return self._read_num('<I') def read_int64(self): return self._read_num('<q') def read_uint64(self): return self._read_num('<Q') def write_boolean(self, val): return self.write(chr(1) if val else chr(0)) def write_int16(self, val): return self._write_num('<h', val) def write_uint16(self, val): return self._write_num('<H', val) def write_int32(self, val): return self._write_num('<i', val) def write_uint32(self, val): return self._write_num('<I', val) def write_int64(self, val): return self._write_num('<q', val) def write_uint64(self, val): return self._write_num('<Q', val) def read_compact_size(self): size = ord(self.input[self.read_cursor]) self.read_cursor += 1 if size == 253: size = self._read_num('<H') elif size == 254: size = self._read_num('<I') elif size == 255: size = self._read_num('<Q') return size def write_compact_size(self, size): if size < 0: raise SerializationError("attempt to write size < 0") elif size < 253: self.write(chr(size)) elif size < 2**16: self.write('\xfd') self._write_num('<H', size) elif size < 2**32: self.write('\xfe') self._write_num('<I', size) elif size < 2**64: self.write('\xff') self._write_num('<Q', size) def _read_num(self, format): (i,) = struct.unpack_from(format, self.input, self.read_cursor) self.read_cursor += struct.calcsize(format) return i def _write_num(self, format, num): s = struct.pack(format, num) self.write(s) class EnumException(Exception): pass class Enumeration: """enum-like type From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/ """ def __init__(self, name, enumList): self.__doc__ = name lookup = {} reverseLookup = {} i = 0 uniqueNames = [] uniqueValues = [] for x in enumList: if isinstance(x, types.TupleType): x, i = x if not isinstance(x, types.StringType): raise EnumException("enum name is not a string: %r" % x) if not isinstance(i, types.IntType): raise EnumException("enum value is not an integer: %r" % i) if x in uniqueNames: raise EnumException("enum name is not unique: %r" % x) if i in uniqueValues: raise EnumException("enum value is not unique for %r" % x) uniqueNames.append(x) uniqueValues.append(i) lookup[x] = i reverseLookup[i] = x i = i + 1 self.lookup = lookup self.reverseLookup = reverseLookup def __getattr__(self, attr): if attr not in self.lookup: raise AttributeError return self.lookup[attr] def whatis(self, value): return self.reverseLookup[value] def long_hex(bytes): return bytes.encode('hex_codec') def short_hex(bytes): t = bytes.encode('hex_codec') if len(t) < 11: return t return t[0:4]+"..."+t[-4:] def parse_TxIn(vds): d = {} d['prevout_hash'] = hash_encode(vds.read_bytes(32)) d['prevout_n'] = vds.read_uint32() scriptSig = vds.read_bytes(vds.read_compact_size()) d['sequence'] = vds.read_uint32() if scriptSig: pubkeys, signatures, address = get_address_from_input_script(scriptSig) else: pubkeys = [] signatures = [] address = None d['address'] = address d['signatures'] = signatures return d def parse_TxOut(vds, i): d = {} d['value'] = vds.read_int64() scriptPubKey = vds.read_bytes(vds.read_compact_size()) d['address'] = get_address_from_output_script(scriptPubKey) d['raw_output_script'] = scriptPubKey.encode('hex') d['index'] = i return d def parse_Transaction(vds, is_coinbase): d = {} start = vds.read_cursor d['version'] = vds.read_int32() d['time'] = vds.read_int32() n_vin = vds.read_compact_size() d['inputs'] = [] for i in xrange(n_vin): o = parse_TxIn(vds) if not is_coinbase: d['inputs'].append(o) n_vout = vds.read_compact_size() d['outputs'] = [] for i in xrange(n_vout): o = parse_TxOut(vds, i) #if o['address'] == "None" and o['value']==0: # print("skipping strange tx output with zero value") # continue # if o['address'] != "None": d['outputs'].append(o) d['lockTime'] = vds.read_uint32() return d opcodes = Enumeration("Opcodes", [ ("OP_0", 0), ("OP_PUSHDATA1", 76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED", "OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", "OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16", "OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY", "OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP", "OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT", "OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND", "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL", "OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", "OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", "OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", "OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX", "OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", "OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG", "OP_CHECKMULTISIGVERIFY", "OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10", ("OP_INVALIDOPCODE", 0xFF), ]) def script_GetOp(bytes): i = 0 while i < len(bytes): vch = None opcode = ord(bytes[i]) i += 1 if opcode <= opcodes.OP_PUSHDATA4: nSize = opcode if opcode == opcodes.OP_PUSHDATA1: nSize = ord(bytes[i]) i += 1 elif opcode == opcodes.OP_PUSHDATA2: (nSize,) = struct.unpack_from('<H', bytes, i) i += 2 elif opcode == opcodes.OP_PUSHDATA4: (nSize,) = struct.unpack_from('<I', bytes, i) i += 4 if i+nSize > len(bytes): vch = "_INVALID_"+bytes[i:] i = len(bytes) else: vch = bytes[i:i+nSize] i += nSize yield (opcode, vch, i) def script_GetOpName(opcode): try: return (opcodes.whatis(opcode)).replace("OP_", "") except KeyError: return "InvalidOp_"+str(opcode) def decode_script(bytes): result = '' for (opcode, vch, i) in script_GetOp(bytes): if len(result) > 0: result += " " if opcode <= opcodes.OP_PUSHDATA4: result += "%d:" % (opcode,) result += short_hex(vch) else: result += script_GetOpName(opcode) return result def match_decoded(decoded, to_match): if len(decoded) != len(to_match): return False for i in range(len(decoded)): if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4: continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent. if to_match[i] != decoded[i][0]: return False return True def get_address_from_input_script(bytes): try: decoded = [ x for x in script_GetOp(bytes) ] except: # coinbase transactions raise an exception return [], [], None # non-generated TxIn transactions push a signature # (seventy-something bytes) and then their public key # (33 or 65 bytes) onto the stack: match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ] if match_decoded(decoded, match): return None, None, public_key_to_pubkey_address(decoded[1][1]) # p2sh transaction, 2 of n match = [ opcodes.OP_0 ] while len(match) < len(decoded): match.append(opcodes.OP_PUSHDATA4) if match_decoded(decoded, match): redeemScript = decoded[-1][1] num = len(match) - 2 signatures = map(lambda x:x[1].encode('hex'), decoded[1:-1]) dec2 = [ x for x in script_GetOp(redeemScript) ] # 2 of 2 match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ] if match_decoded(dec2, match2): pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ] return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript)) # 2 of 3 match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ] if match_decoded(dec2, match2): pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ] return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript)) return [], [], None def get_address_from_output_script(bytes): try: decoded = [ x for x in script_GetOp(bytes) ] except: return None # The Genesis Block, self-payments, and pay-by-IP-address payments look like: # 65 BYTES:... CHECKSIG match = [opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG] if match_decoded(decoded, match): return public_key_to_pubkey_address(decoded[0][1]) # coins sent to black hole # DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_0, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG] if match_decoded(decoded, match): return None # Pay-by-Creditbit-address TxOuts look like: # DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG] if match_decoded(decoded, match): return hash_160_to_pubkey_address(decoded[2][1]) # strange tx match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG, opcodes.OP_NOP] if match_decoded(decoded, match): return hash_160_to_pubkey_address(decoded[2][1]) # p2sh match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ] if match_decoded(decoded, match): addr = hash_160_to_script_address(decoded[1][1]) return addr return None
import base64 import glob import os import re import socket import shutil import subprocess import sys import yaml import pwd from itertools import izip, tee from operator import itemgetter from charmhelpers.core.host import pwgen, lsb_release, service_restart from charmhelpers.core.hookenv import ( log, config as config_get, local_unit, relation_set, relation_ids as get_relation_ids, relations_of_type, relations_for_id, relation_id, open_port, close_port, unit_get, ) from charmhelpers.fetch import ( apt_install, add_source, apt_update, apt_cache ) from charmhelpers.contrib.charmsupport import nrpe default_haproxy_config_dir = "/etc/haproxy" default_haproxy_config = "%s/haproxy.cfg" % default_haproxy_config_dir default_haproxy_service_config_dir = "/var/run/haproxy" default_haproxy_lib_dir = "/var/lib/haproxy" metrics_cronjob_path = "/etc/cron.d/haproxy_metrics" metrics_script_path = "/usr/local/bin/haproxy_to_statsd.sh" service_affecting_packages = ['haproxy'] apt_backports_template = ( "deb http://archive.ubuntu.com/ubuntu %(release)s-backports " "main restricted universe multiverse") haproxy_preferences_path = "/etc/apt/preferences.d/haproxy" dupe_options = [ "mode tcp", "option tcplog", "mode http", "option httplog", ] frontend_only_options = [ "acl", "backlog", "bind", "capture cookie", "capture request header", "capture response header", "clitimeout", "default_backend", "http-request", "maxconn", "monitor fail", "monitor-net", "monitor-uri", "option accept-invalid-http-request", "option clitcpka", "option contstats", "option dontlog-normal", "option dontlognull", "option http-use-proxy-header", "option log-separate-errors", "option logasap", "option socket-stats", "option tcp-smart-accept", "rate-limit sessions", "redirect", "tcp-request content accept", "tcp-request content reject", "tcp-request inspect-delay", "timeout client", "timeout clitimeout", "use_backend", ] class InvalidRelationDataError(Exception): """Invalid data has been provided in the relation.""" def comma_split(value): values = value.split(",") return filter(None, (v.strip() for v in values)) def ensure_package_status(packages, status): if status in ['install', 'hold']: selections = ''.join(['{} {}\n'.format(package, status) for package in packages]) dpkg = subprocess.Popen(['dpkg', '--set-selections'], stdin=subprocess.PIPE) dpkg.communicate(input=selections) def render_template(template_name, vars): # deferred import so install hook can install jinja2 from jinja2 import Environment, FileSystemLoader templates_dir = os.path.join(os.environ['CHARM_DIR'], 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) template = template_env.get_template(template_name) return template.render(vars) def enable_haproxy(): default_haproxy = "/etc/default/haproxy" with open(default_haproxy) as f: enabled_haproxy = f.read().replace('ENABLED=0', 'ENABLED=1') with open(default_haproxy, 'w') as f: f.write(enabled_haproxy) def create_haproxy_globals(): config_data = config_get() global_log = comma_split(config_data['global_log']) haproxy_globals = [] haproxy_globals.append('global') for global_log_item in global_log: haproxy_globals.append(" log %s" % global_log_item.strip()) haproxy_globals.append(" maxconn %d" % config_data['global_maxconn']) haproxy_globals.append(" user %s" % config_data['global_user']) haproxy_globals.append(" group %s" % config_data['global_group']) if config_data['global_debug'] is True: haproxy_globals.append(" debug") if config_data['global_quiet'] is True: haproxy_globals.append(" quiet") haproxy_globals.append(" spread-checks %d" % config_data['global_spread_checks']) if has_ssl_support(): haproxy_globals.append(" tune.ssl.default-dh-param %d" % config_data['global_default_dh_param']) haproxy_globals.append(" ssl-default-bind-ciphers %s" % config_data['global_default_bind_ciphers']) if config_data['global_stats_socket'] is True: sock_path = "/var/run/haproxy/haproxy.sock" haproxy_globals.append(" stats socket %s mode 0600" % sock_path) return '\n'.join(haproxy_globals) def create_haproxy_defaults(): config_data = config_get() default_options = comma_split(config_data['default_options']) default_timeouts = comma_split(config_data['default_timeouts']) haproxy_defaults = [] haproxy_defaults.append("defaults") haproxy_defaults.append(" log %s" % config_data['default_log']) haproxy_defaults.append(" mode %s" % config_data['default_mode']) for option_item in default_options: haproxy_defaults.append(" option %s" % option_item.strip()) haproxy_defaults.append(" retries %d" % config_data['default_retries']) for timeout_item in default_timeouts: haproxy_defaults.append(" timeout %s" % timeout_item.strip()) return '\n'.join(haproxy_defaults) def load_haproxy_config(haproxy_config_file="/etc/haproxy/haproxy.cfg"): if os.path.isfile(haproxy_config_file): return open(haproxy_config_file).read() else: return None def get_monitoring_password(haproxy_config_file="/etc/haproxy/haproxy.cfg"): haproxy_config = load_haproxy_config(haproxy_config_file) if haproxy_config is None: return None m = re.search("stats auth\s+(\w+):(\w+)", haproxy_config) if m is not None: return m.group(2) else: return None def get_service_ports(haproxy_config_file="/etc/haproxy/haproxy.cfg"): stanzas = get_listen_stanzas(haproxy_config_file=haproxy_config_file) return tuple((int(port) for service, addr, port in stanzas)) def get_listen_stanzas(haproxy_config_file="/etc/haproxy/haproxy.cfg"): haproxy_config = load_haproxy_config(haproxy_config_file) if haproxy_config is None: return () listen_stanzas = re.findall( "listen\s+([^\s]+)\s+([^:]+):(.*)", haproxy_config) # Match bind stanzas like: # # bind 1.2.3.5:234 # bind 1.2.3.4:123 ssl crt /foo/bar bind_stanzas = re.findall( "\s+bind\s+([^:]+):(\d+).*\n\s+default_backend\s+([^\s]+)", haproxy_config, re.M) return (tuple(((service, addr, int(port)) for service, addr, port in listen_stanzas)) + tuple(((service, addr, int(port)) for addr, port, service in bind_stanzas))) def update_service_ports(old_service_ports=None, new_service_ports=None): if old_service_ports is None or new_service_ports is None: return None for port in old_service_ports: if port not in new_service_ports: close_port(port) for port in new_service_ports: if port not in old_service_ports: open_port(port) def update_sysctl(config_data): sysctl_dict = yaml.load(config_data.get("sysctl", "{}")) if sysctl_dict: sysctl_file = open("/etc/sysctl.d/50-haproxy.conf", "w") for key in sysctl_dict: sysctl_file.write("{}={}\n".format(key, sysctl_dict[key])) sysctl_file.close() subprocess.call(["sysctl", "-p", "/etc/sysctl.d/50-haproxy.conf"]) def update_ssl_cert(config_data): ssl_cert = config_data.get("ssl_cert") if not ssl_cert: return if ssl_cert == "SELFSIGNED": log("Using self-signed certificate") content = "".join(get_selfsigned_cert()) else: ssl_key = config_data.get("ssl_key") if not ssl_key: log("No ssl_key provided, proceeding without default certificate") return log("Using config-provided certificate") content = base64.b64decode(ssl_cert) content += base64.b64decode(ssl_key) pem_path = os.path.join(default_haproxy_lib_dir, "default.pem") write_ssl_pem(pem_path, content) def create_listen_stanza(service_name=None, service_ip=None, service_port=None, service_options=None, server_entries=None, service_errorfiles=None, service_crts=None, service_backends=None): if service_name is None or service_ip is None or service_port is None: return None fe_options = [] be_options = [] if service_options is not None: # For options that should be duplicated in both frontend and backend, # copy them to both. for o in dupe_options: if any(map(o.strip().startswith, service_options)): fe_options.append(o) be_options.append(o) # Filter provided service options into frontend-only and backend-only. results = izip( (fe_options, be_options), (True, False), tee((o, any(map(o.strip().startswith, frontend_only_options))) for o in service_options)) for out, cond, result in results: out.extend(option for option, match in result if match is cond and option not in out) service_config = [] unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-") service_config.append("frontend %s-%s" % (unit_name, service_port)) bind_stanza = " bind %s:%s" % (service_ip, service_port) if service_crts: # Enable SSL termination for this frontend, using the given # certificates. bind_stanza += " ssl" for i, crt in enumerate(service_crts): if crt == "DEFAULT": path = os.path.join(default_haproxy_lib_dir, "default.pem") else: path = os.path.join(default_haproxy_lib_dir, "service_%s" % service_name, "%d.pem" % i) # SSLv3 is always off, since it's vulnerable to POODLE attacks bind_stanza += " crt %s no-sslv3" % path service_config.append(bind_stanza) service_config.append(" default_backend %s" % (service_name,)) service_config.extend(" %s" % service_option.strip() for service_option in fe_options) # For now errorfiles are common for all backends, in the future we # might offer support for per-backend error files. backend_errorfiles = [] # List of (status, path) tuples if service_errorfiles is not None: for errorfile in service_errorfiles: path = os.path.join(default_haproxy_lib_dir, "service_%s" % service_name, "%s.http" % errorfile["http_status"]) backend_errorfiles.append((errorfile["http_status"], path)) # Default backend _append_backend( service_config, service_name, be_options, backend_errorfiles, server_entries) # Extra backends if service_backends is not None: for service_backend in service_backends: _append_backend( service_config, service_backend["backend_name"], be_options, backend_errorfiles, service_backend["servers"]) return '\n'.join(service_config) def _append_backend(service_config, name, options, errorfiles, server_entries): """Append a new backend stanza to the given service_config. A backend stanza consists in a 'backend <name>' line followed by option lines, errorfile lines and server line. """ service_config.append("") service_config.append("backend %s" % (name,)) service_config.extend(" %s" % option.strip() for option in options) for status, path in errorfiles: service_config.append(" errorfile %s %s" % (status, path)) if isinstance(server_entries, (list, tuple)): for i, (server_name, server_ip, server_port, server_options) in enumerate(server_entries): server_line = " server %s %s:%s" % \ (server_name, server_ip, server_port) if server_options is not None: if isinstance(server_options, basestring): server_line += " " + server_options else: server_line += " " + " ".join(server_options) server_line = server_line.format(i=i) service_config.append(server_line) def create_monitoring_stanza(service_name="haproxy_monitoring"): config_data = config_get() if config_data['enable_monitoring'] is False: return None monitoring_password = get_monitoring_password() if config_data['monitoring_password'] != "changeme": monitoring_password = config_data['monitoring_password'] elif (monitoring_password is None and config_data['monitoring_password'] == "changeme"): monitoring_password = pwgen(length=20) monitoring_config = [] monitoring_config.append("mode http") monitoring_config.append("acl allowed_cidr src %s" % config_data['monitoring_allowed_cidr']) monitoring_config.append("http-request deny unless allowed_cidr") monitoring_config.append("stats enable") monitoring_config.append("stats uri /") monitoring_config.append("stats realm Haproxy\ Statistics") monitoring_config.append("stats auth %s:%s" % (config_data['monitoring_username'], monitoring_password)) monitoring_config.append("stats refresh %d" % config_data['monitoring_stats_refresh']) return create_listen_stanza(service_name, "0.0.0.0", config_data['monitoring_port'], monitoring_config) def get_config_services(): config_data = config_get() services = {} return parse_services_yaml(services, config_data['services']) def parse_services_yaml(services, yaml_data): """ Parse given yaml services data. Add it into the "services" dict. Ensure that you union multiple services "server" entries, as these are the haproxy backends that are contacted. """ yaml_services = yaml.safe_load(yaml_data) if yaml_services is None: return services for service in yaml_services: service_name = service["service_name"] if not services: # 'None' is used as a marker for the first service defined, which # is used as the default service if a proxied server doesn't # specify which service it is bound to. services[None] = {"service_name": service_name} if "service_options" in service: if isinstance(service["service_options"], basestring): service["service_options"] = comma_split( service["service_options"]) if is_proxy(service_name) and ("option forwardfor" not in service["service_options"]): service["service_options"].append("option forwardfor") if (("server_options" in service and isinstance(service["server_options"], basestring))): service["server_options"] = comma_split(service["server_options"]) services[service_name] = merge_service( services.get(service_name, {}), service) return services def _add_items_if_missing(target, additions): """ Append items from `additions` to `target` if they are not present already. Returns a new list. """ result = target[:] for addition in additions: if addition not in result: result.append(addition) return result def merge_service(old_service, new_service): """ Helper function to merge two service entries correctly. Everything will get trampled (preferring old_service), except "servers" which will be unioned acrosss both entries, stripping strict dups. """ service = new_service.copy() service.update(old_service) # Merge all 'servers' entries of the default backend. if "servers" in old_service and "servers" in new_service: service["servers"] = _add_items_if_missing( old_service["servers"], new_service["servers"]) # Merge all 'backends' and their contained "servers". if "backends" in old_service and "backends" in new_service: backends_by_name = {} # Go through backends in old and new configs and add them to # backends_by_name, merging 'servers' while at it. for backend in service["backends"] + new_service["backends"]: backend_name = backend.get("backend_name") if backend_name is None: raise InvalidRelationDataError( "Each backend must have backend_name.") if backend_name in backends_by_name: # Merge servers. target_backend = backends_by_name[backend_name] target_backend["servers"] = _add_items_if_missing( target_backend["servers"], backend["servers"]) else: backends_by_name[backend_name] = backend service["backends"] = sorted( backends_by_name.values(), key=itemgetter('backend_name')) return service def ensure_service_host_port(services): config_data = config_get() seen = [] missing = [] for service, options in sorted(services.iteritems()): if "service_host" not in options: missing.append(options) continue if "service_port" not in options: missing.append(options) continue seen.append((options["service_host"], int(options["service_port"]))) seen.sort() last_port = seen and seen[-1][1] or int(config_data["monitoring_port"]) for options in missing: last_port += 2 options["service_host"] = "0.0.0.0" options["service_port"] = last_port return services def get_config_service(service_name=None): return get_config_services().get(service_name, None) def is_proxy(service_name): flag_path = os.path.join(default_haproxy_service_config_dir, "%s.is.proxy" % service_name) return os.path.exists(flag_path) def create_services(): services_dict = get_config_services() config_data = config_get() # Augment services_dict with service definitions from relation data. relation_data = relations_of_type("reverseproxy") # Handle relations which specify their own services clauses for relation_info in relation_data: if "services" in relation_info: services_dict = parse_services_yaml(services_dict, relation_info['services']) if len(services_dict) == 0: log("No services configured, exiting.") return for relation_info in relation_data: unit = relation_info['__unit__'] # Skip entries that specify their own services clauses, this was # handled earlier. if "services" in relation_info: log("Unit '%s' overrides 'services', " "skipping further processing." % unit) continue juju_service_name = unit.rpartition('/')[0] relation_ok = True for required in ("port", "private-address"): if required not in relation_info: log("No %s in relation data for '%s', skipping." % (required, unit)) relation_ok = False break if not relation_ok: continue # Mandatory switches ( private-address, port ) host = relation_info['private-address'] port = relation_info['port'] server_name = ("%s-%s" % (unit.replace("/", "-"), port)) # Optional switches ( service_name, sitenames ) service_names = set() if 'service_name' in relation_info: if relation_info['service_name'] in services_dict: service_names.add(relation_info['service_name']) else: log("Service '%s' does not exist." % relation_info['service_name']) continue if 'sitenames' in relation_info: sitenames = relation_info['sitenames'].split() for sitename in sitenames: if sitename in services_dict: service_names.add(sitename) if juju_service_name + "_service" in services_dict: service_names.add(juju_service_name + "_service") if juju_service_name in services_dict: service_names.add(juju_service_name) if not service_names: service_names.add(services_dict[None]["service_name"]) for service_name in service_names: service = services_dict[service_name] # Add the server entries servers = service.setdefault("servers", []) servers.append((server_name, host, port, services_dict[service_name].get( 'server_options', []))) has_servers = False for service_name, service in services_dict.iteritems(): if service.get("servers", []): has_servers = True if not has_servers: log("No backend servers, exiting.") return del services_dict[None] services_dict = ensure_service_host_port(services_dict) if config_data["peering_mode"] != "active-active": services_dict = apply_peer_config(services_dict) write_service_config(services_dict) return services_dict def apply_peer_config(services_dict): peer_data = relations_of_type("peer") peer_services = {} for relation_info in peer_data: unit_name = relation_info["__unit__"] peer_services_data = relation_info.get("all_services") if peer_services_data is None: continue service_data = yaml.safe_load(peer_services_data) for service in service_data: service_name = service["service_name"] if service_name in services_dict: peer_service = peer_services.setdefault(service_name, {}) peer_service["service_name"] = service_name peer_service["service_host"] = service["service_host"] peer_service["service_port"] = service["service_port"] peer_service["service_options"] = ["balance leastconn", "mode tcp", "option tcplog"] servers = peer_service.setdefault("servers", []) servers.append((unit_name.replace("/", "-"), relation_info["private-address"], service["service_port"] + 1, ["check"])) if not peer_services: return services_dict unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-") private_address = unit_get("private-address") for service_name, peer_service in peer_services.iteritems(): original_service = services_dict[service_name] # If the original service has timeout settings, copy them over to the # peer service. for option in original_service.get("service_options", ()): if "timeout" in option: peer_service["service_options"].append(option) servers = peer_service["servers"] # Add ourselves to the list of servers for the peer listen stanza. servers.append((unit_name, private_address, original_service["service_port"] + 1, ["check"])) # Make all but the first server in the peer listen stanza a backup # server. servers.sort() for server in servers[1:]: server[3].append("backup") # Remap original service port, will now be used by peer listen stanza. original_service["service_port"] += 1 # Remap original service to a new name, stuff peer listen stanza into # it's place. be_service = service_name + "_be" original_service["service_name"] = be_service services_dict[be_service] = original_service services_dict[service_name] = peer_service return services_dict def write_service_config(services_dict): # Construct the new haproxy.cfg file for service_key, service_config in services_dict.items(): log("Service: %s" % service_key) service_name = service_config["service_name"] server_entries = service_config.get('servers') backends = service_config.get('backends', []) errorfiles = service_config.get('errorfiles', []) for errorfile in errorfiles: path = get_service_lib_path(service_name) full_path = os.path.join( path, "%s.http" % errorfile["http_status"]) with open(full_path, 'w') as f: f.write(base64.b64decode(errorfile["content"])) # Write to disk the content of the given SSL certificates crts = service_config.get('crts', []) for i, crt in enumerate(crts): if crt == "DEFAULT": continue content = base64.b64decode(crt) path = get_service_lib_path(service_name) full_path = os.path.join(path, "%d.pem" % i) write_ssl_pem(full_path, content) with open(full_path, 'w') as f: f.write(content) if not os.path.exists(default_haproxy_service_config_dir): os.mkdir(default_haproxy_service_config_dir, 0600) with open(os.path.join(default_haproxy_service_config_dir, "%s.service" % service_name), 'w') as config: config.write(create_listen_stanza( service_name, service_config['service_host'], service_config['service_port'], service_config['service_options'], server_entries, errorfiles, crts, backends)) def get_service_lib_path(service_name): # Get a service-specific lib path path = os.path.join(default_haproxy_lib_dir, "service_%s" % service_name) if not os.path.exists(path): os.makedirs(path) return path def load_services(service_name=None): services = '' if service_name is not None: if os.path.exists("%s/%s.service" % (default_haproxy_service_config_dir, service_name)): with open("%s/%s.service" % (default_haproxy_service_config_dir, service_name)) as f: services = f.read() else: services = None else: for service in glob.glob("%s/*.service" % default_haproxy_service_config_dir): with open(service) as f: services += f.read() services += "\n\n" return services def remove_services(service_name=None): if service_name is not None: path = "%s/%s.service" % (default_haproxy_service_config_dir, service_name) if os.path.exists(path): try: os.remove(path) except Exception, e: log(str(e)) return False return True else: for service in glob.glob("%s/*.service" % default_haproxy_service_config_dir): try: os.remove(service) except Exception, e: log(str(e)) pass return True def construct_haproxy_config(haproxy_globals=None, haproxy_defaults=None, haproxy_monitoring=None, haproxy_services=None): if None in (haproxy_globals, haproxy_defaults): return with open(default_haproxy_config, 'w') as haproxy_config: config_string = '' for config in (haproxy_globals, haproxy_defaults, haproxy_monitoring, haproxy_services): if config is not None: config_string += config + '\n\n' haproxy_config.write(config_string) def service_haproxy(action=None, haproxy_config=default_haproxy_config): if None in (action, haproxy_config): return None elif action == "check": command = ['/usr/sbin/haproxy', '-f', haproxy_config, '-c'] else: command = ['service', 'haproxy', action] return_value = subprocess.call(command) return return_value == 0 def install_hook(): # Run both during initial install and during upgrade-charm. if not os.path.exists(default_haproxy_service_config_dir): os.mkdir(default_haproxy_service_config_dir, 0600) config_data = config_get() source = config_data.get('source') if source == 'backports': release = lsb_release()['DISTRIB_CODENAME'] source = apt_backports_template % {'release': release} add_backports_preferences(release) add_source(source, config_data.get('key')) apt_update(fatal=True) apt_install(['haproxy', 'python-jinja2'], fatal=True) # Install pyasn1 library and modules for inspecting SSL certificates apt_install(['python-pyasn1', 'python-pyasn1-modules'], fatal=False) ensure_package_status(service_affecting_packages, config_data['package_status']) enable_haproxy() def config_changed(): config_data = config_get() ensure_package_status(service_affecting_packages, config_data['package_status']) old_service_ports = get_service_ports() old_stanzas = get_listen_stanzas() haproxy_globals = create_haproxy_globals() haproxy_defaults = create_haproxy_defaults() if config_data['enable_monitoring'] is True: haproxy_monitoring = create_monitoring_stanza() else: haproxy_monitoring = None remove_services() if config_data.changed("ssl_cert"): # TODO: handle also the case where it's the public-address value # that changes (see also #1444062) _notify_reverseproxy() if not create_services(): sys.exit() haproxy_services = load_services() update_sysctl(config_data) update_ssl_cert(config_data) construct_haproxy_config(haproxy_globals, haproxy_defaults, haproxy_monitoring, haproxy_services) write_metrics_cronjob(metrics_script_path, metrics_cronjob_path) if service_haproxy("check"): update_service_ports(old_service_ports, get_service_ports()) service_haproxy("reload") if not (get_listen_stanzas() == old_stanzas): notify_website() notify_peer() else: # XXX Ideally the config should be restored to a working state if the # check fails, otherwise an inadvertent reload will cause the service # to be broken. log("HAProxy configuration check failed, exiting.") sys.exit(1) if config_data.changed("global_log") or config_data.changed("source"): # restart rsyslog to pickup haproxy rsyslog config # This could be removed once the following bug is fixed in the haproxy # package: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=790871 service_restart("rsyslog") def start_hook(): if service_haproxy("status"): return service_haproxy("restart") else: return service_haproxy("start") def stop_hook(): if service_haproxy("status"): return service_haproxy("stop") def reverseproxy_interface(hook_name=None): if hook_name is None: return None if hook_name == "joined": # When we join a new reverseproxy relation we communicate to the # remote unit our public IP and public SSL certificate, since # some applications might need it in order to tell third parties # how to interact with them. _notify_reverseproxy(relation_ids=(relation_id(),)) return if hook_name in ("changed", "departed"): config_changed() def _notify_reverseproxy(relation_ids=None): config_data = config_get() ssl_cert = config_data.get("ssl_cert") if ssl_cert == "SELFSIGNED": ssl_cert = base64.b64encode(get_selfsigned_cert()[0]) relation_settings = { "public-address": unit_get("public-address"), "ssl_cert": ssl_cert, } for rid in relation_ids or get_relation_ids("reverseproxy"): relation_set(relation_id=rid, relation_settings=relation_settings) def website_interface(hook_name=None): if hook_name is None: return None # Notify website relation but only for the current relation in context. notify_website(changed=hook_name == "changed", relation_ids=(relation_id(),)) def get_hostname(host=None): my_host = socket.gethostname() if host is None or host == "0.0.0.0": # If the listen ip has been set to 0.0.0.0 then pass back the hostname return socket.getfqdn(my_host) elif host == "localhost": # If the fqdn lookup has returned localhost (lxc setups) then return # hostname return my_host return host def notify_relation(relation, changed=False, relation_ids=None): default_host = get_hostname() default_port = 80 for rid in relation_ids or get_relation_ids(relation): service_names = set() if rid is None: rid = relation_id() for relation_data in relations_for_id(rid): if 'service_name' in relation_data: service_names.add(relation_data['service_name']) if changed: if 'is-proxy' in relation_data: remote_service = ("%s__%d" % (relation_data['hostname'], relation_data['port'])) open("%s/%s.is.proxy" % ( default_haproxy_service_config_dir, remote_service), 'a').close() service_name = None if len(service_names) == 1: service_name = service_names.pop() elif len(service_names) > 1: log("Remote units requested more than a single service name." "Falling back to default host/port.") if service_name is not None: # If a specfic service has been asked for then return the ip:port # for that service, else pass back the default requestedservice = get_config_service(service_name) my_host = get_hostname(requestedservice['service_host']) my_port = requestedservice['service_port'] else: my_host = default_host my_port = default_port all_services = "" services_dict = create_services() if services_dict is not None: all_services = yaml.safe_dump(sorted(services_dict.itervalues())) relation_set(relation_id=rid, port=str(my_port), hostname=my_host, all_services=all_services) def notify_website(changed=False, relation_ids=None): notify_relation("website", changed=changed, relation_ids=relation_ids) def notify_peer(changed=False, relation_ids=None): notify_relation("peer", changed=changed, relation_ids=relation_ids) def install_nrpe_scripts(): scripts_src = os.path.join(os.environ["CHARM_DIR"], "files", "nrpe") scripts_dst = "/usr/lib/nagios/plugins" if not os.path.exists(scripts_dst): os.makedirs(scripts_dst) for fname in glob.glob(os.path.join(scripts_src, "*.sh")): shutil.copy2(fname, os.path.join(scripts_dst, os.path.basename(fname))) def update_nrpe_config(): install_nrpe_scripts() nrpe_compat = nrpe.NRPE() nrpe_compat.add_check('haproxy', 'Check HAProxy', 'check_haproxy.sh') nrpe_compat.add_check('haproxy_queue', 'Check HAProxy queue depth', 'check_haproxy_queue_depth.sh') nrpe_compat.write() def delete_metrics_cronjob(cron_path): try: os.unlink(cron_path) except OSError: pass def write_metrics_cronjob(script_path, cron_path): config_data = config_get() if config_data['enable_monitoring'] is False: log("enable_monitoring must be set to true for metrics") delete_metrics_cronjob(cron_path) return # need the following two configs to be valid metrics_target = config_data['metrics_target'].strip() metrics_sample_interval = config_data['metrics_sample_interval'] if (not metrics_target or ':' not in metrics_target or not metrics_sample_interval): log("Required config not found or invalid " "(metrics_target, metrics_sample_interval), " "disabling metrics") delete_metrics_cronjob(cron_path) return charm_dir = os.environ['CHARM_DIR'] statsd_host, statsd_port = metrics_target.split(':', 1) metrics_prefix = config_data['metrics_prefix'].strip() metrics_prefix = metrics_prefix.replace( "$UNIT", local_unit().replace('.', '-').replace('/', '-')) haproxy_hostport = ":".join(['localhost', str(config_data['monitoring_port'])]) haproxy_httpauth = ":".join([config_data['monitoring_username'].strip(), get_monitoring_password()]) # ensure script installed shutil.copy2('%s/files/metrics/haproxy_to_statsd.sh' % charm_dir, metrics_script_path) # write the crontab with open(cron_path, 'w') as cronjob: cronjob.write(render_template("metrics_cronjob.template", { 'interval': config_data['metrics_sample_interval'], 'script': script_path, 'metrics_prefix': metrics_prefix, 'metrics_sample_interval': metrics_sample_interval, 'haproxy_hostport': haproxy_hostport, 'haproxy_httpauth': haproxy_httpauth, 'statsd_host': statsd_host, 'statsd_port': statsd_port, })) def add_backports_preferences(release): with open(haproxy_preferences_path, "w") as preferences: preferences.write( "Package: haproxy\n" "Pin: release a=%(release)s-backports\n" "Pin-Priority: 500\n" % {'release': release}) def has_ssl_support(): """Return True if the locally installed haproxy package supports SSL.""" cache = apt_cache() package = cache["haproxy"] return package.current_ver.ver_str.split(".")[0:2] >= ["1", "5"] def get_selfsigned_cert(): """Return the content of the self-signed certificate. If no self-signed certificate is there or the existing one doesn't match our unit data, a new one will be created. @return: A 2-tuple whose first item holds the content of the public certificate and the second item the content of the private key. """ cert_file = os.path.join(default_haproxy_lib_dir, "selfsigned_ca.crt") key_file = os.path.join(default_haproxy_lib_dir, "selfsigned.key") if is_selfsigned_cert_stale(cert_file, key_file): log("Generating self-signed certificate") gen_selfsigned_cert(cert_file, key_file) result = () for content_file in [cert_file, key_file]: with open(content_file, "r") as fd: result += (fd.read(),) return result def is_selfsigned_cert_stale(cert_file, key_file): """ Do we need to generate a new self-signed cert? @param cert_file: destination path of generated certificate @param key_file: destination path of generated private key """ # Basic Existence Checks if not os.path.exists(cert_file): return True if not os.path.exists(key_file): return True # Common Name from OpenSSL import crypto with open(cert_file) as fd: cert = crypto.load_certificate( crypto.FILETYPE_PEM, fd.read()) cn = cert.get_subject().commonName if unit_get('public-address') != cn: return True # Subject Alternate Name -- only trusty+ support this try: from pyasn1.codec.der import decoder from pyasn1_modules import rfc2459 except ImportError: log('Cannot check subjAltName on <= 12.04, skipping.') return False cert_addresses = set() unit_addresses = set( [unit_get('public-address'), unit_get('private-address')]) for i in range(0, cert.get_extension_count()): extension = cert.get_extension(i) try: names = decoder.decode( extension.get_data(), asn1Spec=rfc2459.SubjectAltName())[0] for name in names: cert_addresses.add(str(name.getComponent())) except: pass if cert_addresses != unit_addresses: log('subjAltName: Cert (%s) != Unit (%s), assuming stale' % ( cert_addresses, unit_addresses)) return True return False def gen_selfsigned_cert(cert_file, key_file): """ Create a self-signed certificate. @param cert_file: destination path of generated certificate @param key_file: destination path of generated private key """ os.environ['OPENSSL_CN'] = unit_get('public-address') os.environ['OPENSSL_PUBLIC'] = unit_get("public-address") os.environ['OPENSSL_PRIVATE'] = unit_get("private-address") # Set the umask so the child process will inherit it and # the generated files will be readable only by root.. old_mask = os.umask(077) subprocess.call( ['openssl', 'req', '-new', '-x509', '-nodes', '-config', os.path.join(os.environ['CHARM_DIR'], 'data', 'openssl.cnf'), '-keyout', key_file, '-out', cert_file, '-days', '3650'],) os.umask(old_mask) uid = pwd.getpwnam('haproxy').pw_uid os.chown(key_file, uid, -1) os.chown(cert_file, uid, -1) def write_ssl_pem(path, content): """Write an SSL pem file and set permissions on it.""" # Set the umask so the child process will inherit it and we # can make certificate files readable only by the 'haproxy' # user (see below). old_mask = os.umask(077) with open(path, 'w') as f: f.write(content) os.umask(old_mask) uid = pwd.getpwnam('haproxy').pw_uid os.chown(path, uid, -1) def statistics_interface(): config = config_get() enable_monitoring = config['enable_monitoring'] monitoring_port = config['monitoring_port'] monitoring_password = get_monitoring_password() monitoring_username = config['monitoring_username'] for relid in get_relation_ids('statistics'): if not enable_monitoring: relation_set(relation_id=relid, enabled=enable_monitoring) else: relation_set(relation_id=relid, enabled=enable_monitoring, port=monitoring_port, password=monitoring_password, user=monitoring_username) def main(hook_name): if hook_name == "install": install_hook() elif hook_name == "upgrade-charm": install_hook() config_changed() update_nrpe_config() elif hook_name == "config-changed": config_data = config_get() if config_data.changed("source"): install_hook() config_changed() update_nrpe_config() statistics_interface() if config_data.implicit_save: config_data.save() elif hook_name == "start": start_hook() elif hook_name == "stop": stop_hook() elif hook_name == "reverseproxy-relation-broken": config_changed() elif hook_name == "reverseproxy-relation-changed": reverseproxy_interface("changed") elif hook_name == "reverseproxy-relation-departed": reverseproxy_interface("departed") elif hook_name == "reverseproxy-relation-joined": reverseproxy_interface("joined") elif hook_name == "website-relation-joined": website_interface("joined") elif hook_name == "website-relation-changed": website_interface("changed") elif hook_name == "peer-relation-joined": website_interface("joined") elif hook_name == "peer-relation-changed": reverseproxy_interface("changed") elif hook_name in ("nrpe-external-master-relation-joined", "local-monitors-relation-joined"): update_nrpe_config() elif hook_name in ("statistics-relation-joined", "statistics-relation-changed"): statistics_interface() else: print "Unknown hook" sys.exit(1) if __name__ == "__main__": hook_name = os.path.basename(sys.argv[0]) # Also support being invoked directly with hook as argument name. if hook_name == "hooks.py": if len(sys.argv) < 2: sys.exit("Missing required hook name argument.") hook_name = sys.argv[1] main(hook_name)
import xml.etree.cElementTree as ET import re import os import dateutil.parser as dparser from juriscraper.lib.string_utils import titlecase, harmonize, clean_string, CaseNameTweaker from cl.corpus_importer.court_regexes import state_pairs from regexes_columbia import SPECIAL_REGEXES from parse_judges import find_judges CASE_NAME_TWEAKER = CaseNameTweaker() SIMPLE_TAGS = [ "reporter_caption", "citation", "caption", "court", "docket", "posture" ,"date", "hearing_date" ,"panel", "attorneys" ] STRIP_REGEX = [r'</?citation.*>', r'</?page_number.*>'] OPINION_TYPES = ['opinion', 'dissent', 'concurrence'] def parse_file(file_path, court_fallback=''): """Parses a file, turning it into a correctly formatted dictionary, ready to be used by a populate script. :param file_path: A path the file to be parsed. :param court_fallback: A string used as a fallback in getting the court object. The regexes associated to its value in special_regexes will be used. """ raw_info = get_text(file_path) info = {} # get basic info info['unpublished'] = raw_info['unpublished'] info['file'] = os.path.splitext(os.path.basename(file_path))[0] info['docket'] = ''.join(raw_info.get('docket', [])) or None info['citations'] = raw_info.get('citation', []) info['attorneys'] = ''.join(raw_info.get('attorneys', [])) or None info['posture'] = ''.join(raw_info.get('posture', [])) or None info['court_id'] = get_court_object(''.join(raw_info.get('court', [])), court_fallback) or None info['panel'] = find_judges(''.join(raw_info.get('panel', []))) or [] # get dates dates = raw_info.get('date', []) + raw_info.get('hearing_date', []) info['dates'] = parse_dates(dates) # get case names info['case_name_full'] = format_case_name(''.join(raw_info.get('caption', []))) or None info['case_name'] = format_case_name(''.join(raw_info.get('reporter_caption', []))) or None info['case_name_short'] = CASE_NAME_TWEAKER.make_case_name_short(info['case_name']) or None # condense opinion texts if there isn't an associated byline # print a warning whenever we're appending multiple texts together info['opinions'] = [] for current_type in OPINION_TYPES: last_texts = [] for opinion in raw_info.get('opinions', []): if opinion['type'] != current_type: continue last_texts.append(opinion['opinion']) if opinion['byline']: if len(last_texts) > 1: print "Combining multiple %s texts in '%s'." % (current_type, file_path) # add the opinion and all of the previous texts judges = find_judges(opinion['byline']) info['opinions'].append({ 'opinion': '\n'.join(last_texts) ,'type': current_type ,'author': judges[0] if judges else None ,'joining': judges[1:] if len(judges) > 0 else [] }) last_texts = [] # if there are remaining texts without bylines, either add them to the last opinion of this type, or if there # are none, make a new opinion without an author if last_texts: relevant_opinions = [o for o in info['opinions'] if o['type'] == current_type] if relevant_opinions: print "Combining multiple %s texts in '%s'." % (current_type, file_path) relevant_opinions[-1]['opinion'] += '\n%s' % '\n'.join(last_texts) else: info['opinions'].append({ 'opinion': '\n'.join(last_texts) ,'type': current_type ,'author': None ,'joining': [] }) return info def get_text(file_path): """Reads a file and returns a dictionary of grabbed text. :param file_path: A path the file to be parsed. """ with open(file_path, 'r') as f: file_string = f.read() raw_info = {} # used when associating a byline of an opinion with the opinion's text current_byline = { 'type': None ,'name': None } # if this is an unpublished opinion, note this down and remove all <unpublished> tags raw_info['unpublished'] = False if '<opinion unpublished=true>' in file_string: file_string = file_string.replace('<opinion unpublished=true>', '<opinion>') file_string = file_string.replace('<unpublished>', '').replace('</unpublished>', '') raw_info['unpublished'] = True # turn the file into a readable tree try: root = ET.fromstring(file_string) except ET.ParseError: # these seem to be erroneously swapped quite often -- try to fix the misordered tags file_string = file_string.replace('</footnote_body></block_quote>', '</block_quote></footnote_body>') root = ET.fromstring(file_string) for child in root.iter(): # if this child is one of the ones identified by SIMPLE_TAGS, just grab its text if child.tag in SIMPLE_TAGS: # strip unwanted tags and xml formatting text = get_xml_string(child) for r in STRIP_REGEX: text = re.sub(r, '', text) text = re.sub(r'<.*?>', ' ', text).strip() # put into a list associated with its tag raw_info.setdefault(child.tag, []).append(text) continue for opinion_type in OPINION_TYPES: # if this child is a byline, note it down and use it later if child.tag == "%s_byline" % opinion_type: current_byline['type'] = opinion_type current_byline['name'] = get_xml_string(child) break # if this child is an opinion text blob, add it to an incomplete opinion and move into the info dict if child.tag == "%s_text" % opinion_type: # add the full opinion info, possibly associating it to a byline raw_info.setdefault('opinions', []).append({ 'type': opinion_type ,'byline': current_byline['name'] if current_byline['type'] == opinion_type else None ,'opinion': get_xml_string(child) }) current_byline['type'] = current_byline['name'] = None break return raw_info def get_xml_string(e): """Returns a normalized string of the text in <element>. :param e: An XML element. """ inner_string = re.sub(r'(^<%s\b.*?>|</%s\b.*?>$)' % (e.tag, e.tag), '', ET.tostring(e)) return inner_string.decode('utf-8').strip() def parse_dates(raw_dates): """Parses the dates from a list of string. Returns a list of lists of (string, datetime) tuples if there is a string before the date (or None). :param raw_dates: A list of (probably) date-containing strings """ months = re.compile("january|february|march|april|may|june|july|august|september|october|november|december") dates = [] for raw_date in raw_dates: # there can be multiple years in a string, so we split on possible indicators raw_parts = re.split('(?<=[0-9][0-9][0-9][0-9])(\s|.)', raw_date) #index over split line and add dates inner_dates = [] for raw_part in raw_parts: # consider any string without either a month or year not a date no_month = False if re.search(months, raw_part.lower()) is None: no_month = True if re.search('[0-9][0-9][0-9][0-9]', raw_part) is None: continue # try to grab a date from the string using an intelligent library try: date = dparser.parse(raw_part, fuzzy=True).date() except: continue # split on either the month or the first number (e.g. for a 1/1/2016 date) to get the text before it if no_month: text = re.compile('(\d+)').split(raw_part.lower())[0].strip() else: text = months.split(raw_part.lower())[0].strip() # if we ended up getting some text, add it, else ignore it if text: inner_dates.append((clean_string(text), date)) else: inner_dates.append((None, date)) dates.append(inner_dates) return dates def format_case_name(n): """Applies standard harmonization methods after normalizing with lowercase.""" return titlecase(harmonize(n.lower())) def get_court_object(raw_court, fallback=''): """Get the court object from a string. Searches through `state_pairs`. :param raw_court: A raw court string, parsed from an XML file. :param fallback: If fail to find one, will apply the regexes associated to this key in `SPECIAL_REGEXES`. """ if '.' in raw_court: j = raw_court.find('.') raw_court = raw_court[:j] if ',' in raw_court: j = raw_court.find(',') raw_court = raw_court[:j] for regex, value in state_pairs: if re.search(regex, raw_court): return value if fallback in SPECIAL_REGEXES: for regex, value in SPECIAL_REGEXES: if re.search(regex, raw_court): return value if __name__ == '__main__': parsed = parse_file('/vagrant/flp/columbia_data/opinions/e6054c371b81a4b7.xml') pass
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'DataPoint.value' db.alter_column('datapoint', 'value', self.gf('django.db.models.fields.FloatField')(null=True)) # Changing field 'HistoricalDataPointEntry.value' db.alter_column(u'datapoints_historicaldatapointentry', 'value', self.gf('django.db.models.fields.FloatField')(null=True)) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'DataPoint.value' raise RuntimeError("Cannot reverse this migration. 'DataPoint.value' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Changing field 'DataPoint.value' db.alter_column('datapoint', 'value', self.gf('django.db.models.fields.FloatField')()) # User chose to not deal with backwards NULL issues for 'HistoricalDataPointEntry.value' raise RuntimeError("Cannot reverse this migration. 'HistoricalDataPointEntry.value' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Changing field 'HistoricalDataPointEntry.value' db.alter_column(u'datapoints_historicaldatapointentry', 'value', self.gf('django.db.models.fields.FloatField')()) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'datapoints.aggdatapoint': { 'Meta': {'unique_together': "(('region_id', 'campaign_id', 'indicator_id'),)", 'object_name': 'AggDataPoint', 'db_table': "'agg_datapoint'"}, 'cache_job': ('django.db.models.fields.related.ForeignKey', [], {'default': '-1', 'to': u"orm['datapoints.CacheJob']"}), 'campaign_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator_id': ('django.db.models.fields.IntegerField', [], {}), 'region_id': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.FloatField', [], {}) }, u'datapoints.baddata': { 'Meta': {'object_name': 'BadData', 'db_table': "'bad_data'"}, 'cache_job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CacheJob']"}), 'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.DataPoint']"}), 'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}), 'error_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'datapoints.cachejob': { 'Meta': {'ordering': "('-date_attempted',)", 'object_name': 'CacheJob', 'db_table': "'cache_job'"}, 'date_attempted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_error': ('django.db.models.fields.BooleanField', [], {}), 'response_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'datapoints.calculatedindicatorcomponent': { 'Meta': {'object_name': 'CalculatedIndicatorComponent', 'db_table': "'calculated_indicator_component'"}, 'calculation': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator_master'", 'to': u"orm['datapoints.Indicator']"}), 'indicator_component': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator_component'", 'to': u"orm['datapoints.Indicator']"}) }, u'datapoints.campaign': { 'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"}, 'campaign_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CampaignType']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'get_full_name'", 'unique_with': '()'}), 'start_date': ('django.db.models.fields.DateField', [], {}) }, u'datapoints.campaigntype': { 'Meta': {'object_name': 'CampaignType', 'db_table': "'campaign_type'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '55'}) }, u'datapoints.columnattributes': { 'Meta': {'object_name': 'ColumnAttributes', 'db_table': "'column_attributes'"}, 'column_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'display_on_table_flag': ('django.db.models.fields.BooleanField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'table_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'datapoints.datapoint': { 'Meta': {'ordering': "['region', 'campaign']", 'unique_together': "(('indicator', 'region', 'campaign'),)", 'object_name': 'DataPoint', 'db_table': "'datapoint'"}, 'cache_job': ('django.db.models.fields.related.ForeignKey', [], {'default': '-1', 'to': u"orm['datapoints.CacheJob']"}), 'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}), 'source_datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.SourceDataPoint']"}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, u'datapoints.datapointabstracted': { 'Meta': {'unique_together': "(('region', 'campaign'),)", 'object_name': 'DataPointAbstracted', 'db_table': "'datapoint_abstracted'"}, 'cache_job': ('django.db.models.fields.related.ForeignKey', [], {'default': '-1', 'to': u"orm['datapoints.CacheJob']"}), 'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator_json': ('jsonfield.fields.JSONField', [], {}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}) }, u'datapoints.datapointcomputed': { 'Meta': {'unique_together': "(('region_id', 'campaign_id', 'indicator_id'),)", 'object_name': 'DataPointComputed', 'db_table': "'datapoint_with_computed'"}, 'cache_job': ('django.db.models.fields.related.ForeignKey', [], {'default': '-1', 'to': u"orm['datapoints.CacheJob']"}), 'campaign_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator_id': ('django.db.models.fields.IntegerField', [], {}), 'region_id': ('django.db.models.fields.IntegerField', [], {}), 'value': ('django.db.models.fields.FloatField', [], {}) }, u'datapoints.expecteddata': { 'Meta': {'unique_together': "(('region', 'campaign'),)", 'object_name': 'ExpectedData', 'db_table': "'expected_data'"}, 'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ex_parent_region'", 'to': u"orm['datapoints.Region']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ex_child_region'", 'to': u"orm['datapoints.Region']"}) }, u'datapoints.historicaldatapointentry': { 'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalDataPointEntry'}, 'cache_job_id': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'campaign_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'changed_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}), u'history_date': ('django.db.models.fields.DateTimeField', [], {}), u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}), u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}), 'indicator_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'region_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'source_datapoint_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, u'datapoints.indicator': { 'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}) }, u'datapoints.indicatorabstracted': { 'Meta': {'object_name': 'IndicatorAbstracted', 'db_table': "'indicator_abstracted'"}, 'bound_json': ('jsonfield.fields.JSONField', [], {}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'tag_json': ('jsonfield.fields.JSONField', [], {}) }, u'datapoints.indicatorbound': { 'Meta': {'object_name': 'IndicatorBound', 'db_table': "'indicator_bound'"}, 'bound_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'direction': ('django.db.models.fields.IntegerField', [], {'default': '1'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}), 'mn_val': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'mx_val': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, u'datapoints.indicatorpermission': { 'Meta': {'unique_together': "(('group', 'indicator'),)", 'object_name': 'IndicatorPermission', 'db_table': "'indicator_permission'"}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}) }, u'datapoints.indicatortag': { 'Meta': {'object_name': 'IndicatorTag', 'db_table': "'indicator_tag'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.IndicatorTag']", 'null': 'True'}), 'tag_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'datapoints.indicatortotag': { 'Meta': {'object_name': 'IndicatorToTag', 'db_table': "'indicator_to_tag'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}), 'indicator_tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.IndicatorTag']"}) }, u'datapoints.office': { 'Meta': {'object_name': 'Office', 'db_table': "'office'"}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '55'}) }, u'datapoints.recondata': { 'Meta': {'unique_together': "(('region', 'campaign', 'indicator'),)", 'object_name': 'ReconData', 'db_table': "'recon_data'"}, 'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}), 'target_value': ('django.db.models.fields.FloatField', [], {}) }, u'datapoints.region': { 'Meta': {'unique_together': "(('name', 'region_type', 'office'),)", 'object_name': 'Region', 'db_table': "'region'"}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}), 'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'null': 'True'}), 'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'region_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.RegionType']"}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}) }, u'datapoints.regionheirarchy': { 'Meta': {'object_name': 'RegionHeirarchy', 'db_table': "'region_heirarchy_cache'", 'managed': 'False'}, 'contained_by_region_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'region_id': ('django.db.models.fields.IntegerField', [], {}), 'region_type_id': ('django.db.models.fields.IntegerField', [], {}) }, u'datapoints.regionpermission': { 'Meta': {'unique_together': "(('user', 'region', 'read_write'),)", 'object_name': 'RegionPermission', 'db_table': "'region_permission'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'read_write': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'datapoints.regionpolygon': { 'Meta': {'object_name': 'RegionPolygon', 'db_table': "'region_polygon'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'polygon': ('jsonfield.fields.JSONField', [], {}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'unique': 'True'}), 'shape_area': ('django.db.models.fields.FloatField', [], {}), 'shape_len': ('django.db.models.fields.FloatField', [], {}) }, u'datapoints.regiontype': { 'Meta': {'object_name': 'RegionType', 'db_table': "'region_type'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}) }, u'datapoints.responsibility': { 'Meta': {'ordering': "('indicator',)", 'unique_together': "(('user', 'indicator', 'region'),)", 'object_name': 'Responsibility', 'db_table': "'responsibility'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'datapoints.source': { 'Meta': {'object_name': 'Source', 'db_table': "'source'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}) }, u'datapoints.userabstracted': { 'Meta': {'object_name': 'UserAbstracted', 'db_table': "'user_abstracted'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'group_json': ('jsonfield.fields.JSONField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {}), 'is_staff': ('django.db.models.fields.BooleanField', [], {}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {}), 'last_login': ('django.db.models.fields.DateTimeField', [], {}), 'last_name': ('django.db.models.fields.BooleanField', [], {}), 'region_permission_json': ('jsonfield.fields.JSONField', [], {}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'datapoints.usergroup': { 'Meta': {'object_name': 'UserGroup', 'db_table': "'auth_user_groups'", 'managed': 'False'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'source_data.document': { 'Meta': {'ordering': "('-id',)", 'unique_together': "(('docfile', 'doc_text'),)", 'object_name': 'Document'}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'doc_text': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'master_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}), 'source_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'source_data.processstatus': { 'Meta': {'object_name': 'ProcessStatus'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, 'source_data.sourcedatapoint': { 'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint', 'db_table': "'source_datapoint'"}, 'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)'}), 'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}), 'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'row_number': ('django.db.models.fields.IntegerField', [], {}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}), 'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}) } } complete_apps = ['datapoints']
from oopgrade import DataMigration from addons import get_module_resource def up(cursor, installed_version): if not installed_version: return xml_content = '''<?xml version="1.0" encoding="UTF-8" ?> <openerp> <data noupdate="1"> <record model="product.category" id="categ_inversions"> <field name="name">Inversions</field> </record> <record id="apo_journal" model="account.journal"> <field name="code">APO</field> <field name="user_id" ref="base.user_root"/> <field eval="False" name="centralisation"/> <field eval="False" name="group_invoice_lines"/> <field name="type">cash</field> <field name="default_credit_account_id" model="account.account" search="[('code','=','555000000010')]"/> <field name="default_debit_account_id" model="account.account" search="[('code','=','555000000010')]"/> <field name="view_id" ref="account.account_journal_bank_view"/> <field name="sequence_id" ref="account.sequence_journal"/> <field eval="True" name="active"/> <field eval="True" name="update_posted"/> <field name="name">Factures Liquidació Aportacions</field> <field eval="False" name="refund_journal"/> <field eval="True" name="entry_posted"/> </record> <record id="apo_product_template_ae" model="product.template"> <field name="name">Aportacions</field> </record> <record id="apo_product_ae" model="product.product"> <field name="product_tmpl_id" ref="apo_product_template_ae"/> <field name="default_code">APO_AE</field> </record> <record id="apo_investment_payment_mode" model="payment.mode"> <field name="name">APORTACIONS (Enginyers)</field> <field name="type" model="payment_type" search="[('code', '=', 'RECIBO_CSB')]"/> <field name="journal" ref="apo_journal"/> <field name="bank_id" model="res_partner_bank" search="[('partner_id', '=', 1)]"/> <field name="tipo">sepa19</field> <field name="nombre">Som Energia SCCL</field> <field name="sufijo">000</field> <field name="require_bank_account" eval="True"/> <field name="partner_id">1</field> <field name="sepa_creditor_code">ES24000F55091367</field> </record> </data> </openerp> ''' dm = DataMigration(xml_content, cursor, 'som_generationkwh', { 'payment.mode': ['name'] }) dm.migrate() migrate = up
from openerp import models, fields, api class AccountConfigSettings(models.TransientModel): _inherit = 'account.config.settings' _afip_ws_selection = ( lambda self, *args, **kwargs: self.env[ 'account.journal']._get_afip_ws_selection(*args, **kwargs)) afip_ws = fields.Selection( _afip_ws_selection, 'AFIP WS', ) @api.multi def set_chart_of_accounts(self): """ We send this value in context because to use them on journals creation """ return super(AccountConfigSettings, self.with_context( afip_ws=self.afip_ws, )).set_chart_of_accounts()
from CRISPResso.CRISPRessoCountCORE import main if __name__ == '__main__': main()
from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ ('meinberlin_plans', '0007_make_plan_description_required'), ] operations = [ migrations.AlterField( model_name='plan', name='district', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meinberlin_maps.MapPreset', verbose_name='District'), ), migrations.AlterField( model_name='plan', name='participation', field=models.SmallIntegerField(choices=[(1, 'Yes'), (0, 'No'), (2, 'Still undecided')], verbose_name='Participation'), ), migrations.AlterField( model_name='plan', name='project', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='a4projects.Project', verbose_name='Project'), ), ]
from __future__ import unicode_literals AUTHOR = '2016FALLCADP_AG4' SITENAME = '2016FALLCADP_AG4報告倉儲' USE_FOLDER_AS_CATEGORY = False TIMEZONE = 'Asia/Taipei' DEFAULT_LANG = 'en' FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None LINKS = (('40423106','https://40423106.github.io/2016fallcadp_hw'),('40423107','https://40423107.github.io/2016fallcadp_hw'),('40423108','https://40423108.github.io/2016fallcadp_hw'),('40423121','https://40423121.github.io/2016fallcadp_hw'), ('40423125','https://40423125.github.io/2016fallcadp_hw'), ('40423140','https://40423140.github.io/2016fallcadp_hw')) DEFAULT_PAGINATION = 10 PLUGIN_PATHS = ['plugin'] PLUGINS = ['liquid_tags.notebook', 'summary', 'tipue_search', 'sitemap'] SITEMAP = { 'format': 'xml', 'priorities': { 'articles': 0.5, 'indexes': 0.5, 'pages': 0.5 }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly' } } DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search')) DISPLAY_CATEGORIES_ON_SIDEBAR = True DISPLAY_RECENT_POSTS_ON_SIDEBAR = True DISPLAY_TAGS_ON_SIDEBAR = True DISPLAY_TAGS_INLINE = True TAGS_URL = "tags.html" CATEGORIES_URL = "categories.html"
""" Author : tharindra galahena (inf0_warri0r) Project: image categarizetion using SOM Blog : http://www.inf0warri0r.blogspot.com Date : 14/05/2013 License: Copyright 2013 Tharindra Galahena This is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this. If not, see http://www.gnu.org/licenses/. """ from PySide import QtGui from categorize import Ui_categorizer_window import sys import weights from PIL import Image import som class MyWidget(QtGui.QMainWindow, Ui_categorizer_window): def __init__(self, parent=None): super(MyWidget, self).__init__(parent) self.setupUi(self) self.d = weights.data("weights") self.ow, self.oh, self.inpm, self.inpn, self.w, self.m = self.d.load() if self.ow == -1: QtGui.QMessageBox.about(self, "ERROR", "error in weights file") exit(0) self.s = som.som(self.ow * self.oh * 3, self.inpm + self.inpn, 0.01) self.s.init() self.s.put_weights(self.w, self.m) self.file = "" self.pushButton_2.clicked.connect(self.open) self.pushButton.clicked.connect(self.open_file) self.label.setScaledContents(True) def open_file(self): self.file = QtGui.QFileDialog.getOpenFileName(self, 'Open File')[0] self.lineEdit.setText(self.file) qimage = QtGui.QImage(self.file) pix = QtGui.QPixmap.fromImage(qimage) self.label.setPixmap(pix) def open(self): if self.file == "": return 0 img1 = Image.open(self.file) ls = list() p = img1.load() for y in range(0, self.oh): for x in range(0, self.ow): sm = float(p[x, y][0]) + float(p[x, y][1]) + float(p[x, y][2]) if sm == 0.0: sm = 1 ls.append(float(p[x, y][0]) / sm) ls.append(float(p[x, y][1]) / sm) ls.append(float(p[x, y][2]) / sm) ct = int(self.s.find(ls)) if ct < self.inpm: cat = "forest" else: cat = "beach" QtGui.QMessageBox.about(self, "Category", "Category = " + cat) if __name__ == '__main__': app = QtGui.QApplication(sys.argv) window = MyWidget() window.show() sys.exit(app.exec_())
from django.apps import AppConfig class ICNConfig(AppConfig): name = 'channels.icn' def ready(self): from . import content_flow
import xpensemate.db.proxy.abstract_proxy import xpensemate.db.proxy.factory import xpensemate.db.proxy.postgres
import os import logging from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy import UniqueConstraint from tmlib.models.base import ExperimentModel, DateMixIn, IdMixIn logger = logging.getLogger(__name__) class Cycle(ExperimentModel, DateMixIn, IdMixIn): '''A *cycle* represents an individual image acquisition time point of a a "multiplexing" experiment. Attributes ---------- site_shifts: List[tmlib.models.site.SiteShift] shifts belonging to the cycle ''' __tablename__ = 'cycles' __table_args__ = (UniqueConstraint('index'), ) #: int: zero-based index in the acquisition sequence index = Column(Integer, index=True) #: int: ID of parent experiment experiment_id = Column( Integer, ForeignKey('experiment.id', onupdate='CASCADE', ondelete='CASCADE'), index=True ) #: tmlib.models.experiment.Experiment: parent experiment experiment = relationship( 'Experiment', backref=backref('cycles', cascade='all, delete-orphan') ) def __init__(self, index, experiment_id): ''' Parameters ---------- index: int index of the cycle (based on the order of acquisition) experiment_id: int ID of the parent :class:`Experiment <tmlib.models.experiment.Experiment>` ''' self.index = index self.experiment_id = experiment_id def __repr__(self): return '<Cycle(id=%r, index=%r)>' % (self.id, self.index)
from __future__ import unicode_literals from django.utils import timezone from djcelery_transactions import task from redis_cache import get_redis_connection from .models import Schedule from django.core.exceptions import ObjectDoesNotExist @task(track_started=True, name='check_schedule_task') # pragma: no cover def check_schedule_task(sched_id=None): """ See if any schedules are expired and fire appropriately """ logger = check_schedule_task.get_logger() if sched_id: schedules = [Schedule.objects.get(pk=sched_id)] else: schedules = Schedule.objects.filter(status='S', is_active=True, next_fire__lt=timezone.now()) r = get_redis_connection() # fire off all expired schedules for sched in schedules: try: # try to acquire a lock key = 'fire_schedule_%d' % sched.pk if not r.get(key): with r.lock(key, timeout=1800): # reget our schedule, it may have been updated sched = Schedule.objects.get(id=sched.pk, status='S', is_active=True, next_fire__lt=timezone.now()) if sched and sched.update_schedule(): broadcast = sched.get_broadcast() trigger = sched.get_trigger() print "Firing %d" % sched.pk if broadcast: broadcast.fire() elif trigger: trigger.fire() else: print "Schedule had nothing interesting to fire" # if its one time, delete our schedule if sched.repeat_period == 'O': sched.reset() except ObjectDoesNotExist as e: # this means the schedule already got fired, so perfectly ok, ignore pass except: # pragma: no cover logger.error("Error running schedule: %s" % sched.pk, exc_info=True)
from sst.actions import ( assert_element, assert_title_contains, click_button, get_element, go_to, wait_for, write_textfield, ) from u1testutils import mail from u1testutils.sso import mail as sso_mail from u1testutils.sst import config from acceptance import helpers, urls config.set_base_url_from_env() NAME = 'Some Name' primary_email = helpers.register_account(displayname=NAME) secondary_email = mail.make_unique_test_email_address() go_to(urls.EMAILS) wait_for(assert_title_contains, "'s email addresses") write_textfield('id_newemail', secondary_email) click_button(get_element(name='continue')) link = sso_mail.get_verification_link_for_address(secondary_email) go_to(link) click_button(get_element(name='continue')) wait_for(assert_element, **{'data-qa-id': 'edit_account'})
import datetime import logging from django.contrib.auth.models import User from django.contrib.gis.geoip2 import GeoIP2 from django.core.files.images import get_image_dimensions from django.db.models import Count from django.utils import timezone logger = logging.getLogger('apps') def unique_items(list_with_possible_duplicates): """ Given an initial list, returns a list but without duplicates """ try: return list(set(list_with_possible_duplicates)) except TypeError: # We have a list which is not flat, use the old way to remove duplicates found = [] for i in list_with_possible_duplicates: if i not in found: found.append(i) return found ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def base26_encode(num, alphabet=ALPHABET): """Encode a number in Base X `num`: The number to encode `alphabet`: The alphabet to use for encoding """ if (num == 0): return alphabet[0] arr = [] base = len(alphabet) while num: rem = num % base num = num // base arr.append(alphabet[rem]) arr.reverse() return ''.join(arr) def base26_decode(string, alphabet=ALPHABET): """Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding """ base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) num += alphabet.index(char) * (base ** power) idx += 1 return num def get_client_ip(request): x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') return ip def get_client_country_code(request): try: DEBUG_COUNTRY = request.GET.get('DEBUG_COUNTRY', None) if DEBUG_COUNTRY: return DEBUG_COUNTRY except AttributeError: pass geoip2 = GeoIP2() try: country = geoip2.country_code(get_client_ip(request)) if country is None: country = "UNKNOWN" return country except: return "UNKNOWN" def get_european_union_country_codes(): return ( 'at', 'be', 'bg', 'cy', 'cz', 'de', 'dk', 'ee', 'es', 'fi', 'fr', 'gr', 'hr', 'hu', 'ie', 'it', 'lt', 'lu', 'lv', 'mt', 'nl', 'po', 'pt', 'ro', 'se', 'si', 'sk', ) def inactive_accounts(): """Gets all the user profiles of users with at least one image, who haven't uploaded in over 2 months""" from astrobin.models import Image, UserProfile recipient_pks = [] profiles = UserProfile.objects \ .annotate(num_images=Count("user__image")) \ .filter(num_images__gt=0) two_months_ago = timezone.now() - datetime.timedelta(days=60) for profile in profiles: images = Image.objects_including_wip.filter(user=profile.user).order_by("-uploaded") if images.count() > 0: last_uploaded = images[0].uploaded if last_uploaded < two_months_ago \ and (profile.inactive_account_reminder_sent is None or profile.inactive_account_reminder_sent < two_months_ago): # This user has at least 1 upload but all of them are older than 2 months recipient_pks.append(profile.pk) return UserProfile.objects.filter(pk__in=recipient_pks) def never_activated_accounts(): """Gets all the users who created account over 2 weeks ago but never activated it.""" two_weeks_ago = timezone.now() - datetime.timedelta(days=14) return User.objects.filter( is_active=False, date_joined__lt=two_weeks_ago, userprofile__never_activated_account_reminder_sent__isnull=True, userprofile__deleted__isnull=True, ) def never_activated_accounts_to_be_deleted(): """Gets all the users who created account over 3 weeks ago but never activated it.""" three_weeks_ago = timezone.now() - datetime.timedelta(days=21) return User.objects.filter( is_active=False, date_joined__lt=three_weeks_ago, userprofile__never_activated_account_reminder_sent__isnull=False, userprofile__deleted__isnull=True, ) def uniq(seq): # Not order preserving keys = {} for e in seq: keys[e] = 1 return list(keys.keys()) def uniq_id_tuple(seq): seen = set() ret = [] for e in seq: id = e[0] if id not in seen: seen.add(id) ret.append(e) return ret def get_image_resolution(image): try: w, h = image.w, image.h if not (w and h): w, h = get_image_dimensions(image.image_file) except (FileNotFoundError, TypeError) as e: # This might happen in unit tests logger.warning("utils.get_image_resolution: unable to get image dimensions for %d: %s" % (image.pk, str(e))) w, h = 0, 0 return w, h def dec_decimal_precision_from_pixel_scale(pixel_scale: float = 0) -> int: if pixel_scale == 0 or pixel_scale > 10: return 0 if pixel_scale > 1: return 1 return 2 def ra_decimal_precision_from_pixel_scale(pixel_scale: float = 0) -> int: return dec_decimal_precision_from_pixel_scale(pixel_scale) + 1 def number_unit_decimals(value, unit, precision): if precision == 0: value = f'{int(round(value))}{unit}' else: decimal_part = ("%s" % round((value - int(value)) * pow(10, precision))).ljust(precision, '0') value = f'{int(value)}{unit}.{decimal_part}' return value def number_unit_decimals_html(value, unit, precision): if precision == 0: value = '%s<span class="symbol">%s</span>' % (("%d" % value).rjust(2, '0'), unit) else: decimal_part = ("%s" % round((value - int(value)) * pow(10, precision))).ljust(precision, '0') value = '%s<span class="symbol">%s</span>.%s' % (("%d" % value).rjust(2, '0'), unit, decimal_part) return value def decimal_to_hours_minutes_seconds(value): value = abs(value) hours = int(value / 15) minutes = int(((value / 15) - hours) * 60) seconds = ((((value / 15) - hours) * 60) - minutes) * 60 return hours, minutes, seconds def decimal_to_hours_minutes_seconds_string(value, hour_symbol="h", minute_symbol="m", second_symbol="s", precision=0): hours, minutes, seconds = decimal_to_hours_minutes_seconds(value) is_positive = value >= 0 seconds = number_unit_decimals(seconds, second_symbol, precision) return f'{"" if is_positive else "-"}{hours}{hour_symbol} {minutes}{minute_symbol} {seconds}' def decimal_to_hours_minutes_seconds_html(value, hour_symbol="h", minute_symbol="m", second_symbol="s", precision=0): hours, minutes, seconds = decimal_to_hours_minutes_seconds(value) is_positive = value >= 0 seconds = number_unit_decimals_html(seconds, second_symbol, precision) hours = '%s%s<span class="symbol">%s</span>' % ("" if is_positive else "-", ("%d"% hours).rjust(2, '0'), hour_symbol) minutes = '%s<span class="symbol">%s</span>' % (("%d" % minutes).rjust(2, '0'), minute_symbol) return f'{hours}{minutes}{seconds}' def decimal_to_degrees_minutes_seconds(value): value = abs(value) minutes, seconds = divmod(value * 3600, 60) degrees, minutes = divmod(minutes, 60) return degrees, minutes, seconds def decimal_to_degrees_minutes_seconds_string(value, degree_symbol="°", minute_symbol="&prime;", second_symbol="&Prime;", precision=0): is_positive = value >= 0 degrees, minutes, seconds = decimal_to_degrees_minutes_seconds(value) seconds = number_unit_decimals(seconds, second_symbol, precision) return f'{"+" if is_positive else "-"}{int(degrees)}{degree_symbol} {int(minutes)}{minute_symbol} {seconds}' def decimal_to_degrees_minutes_seconds_html(value, degree_symbol="°", minute_symbol="′", second_symbol="″", precision=0): is_positive = value >= 0 degrees, minutes, seconds = decimal_to_degrees_minutes_seconds(value) seconds = number_unit_decimals_html(seconds, second_symbol, precision) degrees = '%s%s<span class="symbol">%s</span>' % ("+" if is_positive else "-", ("%d" % degrees).rjust(2, '0'), degree_symbol) minutes = '%s<span class="symbol">%s</span>' % (("%d" % minutes).rjust(2, '0'), minute_symbol) return f'{degrees}{minutes}{seconds}' def degrees_minutes_seconds_to_decimal_degrees(degrees, minutes, seconds, direction): if seconds is None: seconds = 0 if minutes is None: minutes = 0 if degrees is None: degrees = 0 dd = float(degrees) + float(minutes) / 60 + float(seconds) / (60 * 60) if direction == 'E' or direction == 'N': dd *= -1 return dd
import argparse import os import sys from openerp.cli import Command from openerp.modules.module import get_modules from . import utils def print_if(flag, text): if flag: print(text) class Get(Command): """ Get Odoo modules """ def get_module(self, module, env_root, exclude=None, verbose=False, quiet=False): """ Download and activate module and dependencies """ # Modules already visited are skipped if exclude and module in exclude: return True exclude = exclude if exclude is not None else ['base'] exclude.append(module) # Modules already in the path are skipped if module in get_modules(): print_if(verbose, '. %s is available from addons path.' % module) return True if module in os.listdir(env_root): # Modules in the env root are already active module_path = os.path.join(env_root, module) print_if(verbose, '. %s already active (at %s)' % (module, module_path)) else: # Modules in the local cache are available to activate path = os.path.join(env_root, utils.LOCAL_CACHE) module_path = utils.crawl_modules(path).get(module) if not module_path: # Modules not in the local cache are downloaded index = utils.indexed_modules(path) if module not in index: print_if(not quiet, '! %s was not found in the index!' % module) return False utils.download_repo(path, index[module]) module_path = utils.crawl_modules(path).get(module) if not module_path: print('! ERROR: %s not found on the repo!' % module) return False # Symlink module into current environment if module not in os.listdir(env_root): target_path = os.path.join(env_root, module) os.symlink(module_path, target_path) print_if(not quiet, '+ %s activated (from %s)' % (module, module_path)) # Get dependencies try: manifest = utils.load_manifest(module_path) except IOError: manifest = {} depends = manifest.get('depends', []) for m in depends: if not self.get_module(m, env_root, exclude=exclude, verbose=verbose, quiet=quiet): return False return True def get_modules(self, modules, env_root, exclude=None, verbose=None, quiet=None): exclude = exclude or ['base'] for m in modules: self.get_module(m, env_root, exclude, verbose=verbose, quiet=quiet) return True def run(self, cmdargs): parser = argparse.ArgumentParser( prog="%s get" % os.path.basename(sys.argv[0]), description=self.__doc__) parser.add_argument( 'modules', nargs='+', help="Modules to get") parser.add_argument('-q', '--quiet', dest='quiet', help='Suppress information messages') parser.add_argument('-v', '--verbose', dest='verbose', help='Verbose messages') if not cmdargs: sys.exit(parser.print_help()) args = parser.parse_args(args=cmdargs) env_root = os.getcwd() self.get_modules(args.modules, env_root, verbose=args.verbose, quiet=args.quiet) print('Done.')
"""Juju GUI deploy helper.""" from __future__ import print_function import json import logging import os import tempfile from charmhelpers.contrib.charmhelpers import make_charm_config_file from helpers import ( command, get_password, juju, wait_for_unit, ) DEFAULT_SERIES = 'xenial' rsync = command('rsync', '-a', '--exclude', '.git', '--exclude', '.bzr', '--exclude', '/tests') def juju_deploy( charm_name, app_name=None, options=None, force_machine=None, charm_source=None, series=None): """Deploy and expose the charm. Return the first unit's public address. Also wait until the service is exposed and the first unit started. If app_name is None, use the name of the charm. If options are provided, they will be used when deploying the charm. If force_machine is not None, create the unit in the specified machine. If charm_source is None, dynamically retrieve the charm source directory. If series is None, the series specified in the SERIES environment variable is used if found, defaulting to "xenial". """ # Note: this function is used by both the functional tests and # "make deploy": see the "if main" section below. if charm_source is None: # Dynamically retrieve the charm source based on the path of this file. charm_source = os.path.join(os.path.dirname(__file__), '..') if series is None: series = os.getenv('SERIES', '').strip() or DEFAULT_SERIES logging.debug('setting up the charm') path = tempfile.mkdtemp() rsync(charm_source, path) args = ['deploy', '--series', series] if app_name is None: app_name = charm_name if options is not None: config_file = make_charm_config_file({app_name: options}) args.extend(['--config', config_file.name]) if force_machine is not None: args.extend(['--to', str(force_machine)]) args.append(path) args.append(app_name) logging.debug('deploying {} (series: {}) from {}'.format( app_name, series, path)) juju(*args) logging.debug('exposing {}'.format(app_name)) juju('expose', app_name) logging.debug('waiting for the unit to be ready') return wait_for_unit(app_name) if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unit = juju_deploy('juju-gui') print(json.dumps(unit, indent=2)) print('password: {}'.format(get_password()))
""" Unit tests for video utils. """ from unittest import TestCase from datetime import datetime import ddt import pytz import requests from django.conf import settings from django.core.files.uploadedfile import UploadedFile from django.test.utils import override_settings from edxval.api import ( create_profile, create_video, get_course_video_image_url, update_video_image ) from openedx.core.djangoapps.profile_images.tests.helpers import make_image_file from mock import patch from contentstore.tests.utils import CourseTestCase from contentstore.video_utils import ( download_youtube_video_thumbnail, scrape_youtube_thumbnail, validate_video_image, YOUTUBE_THUMBNAIL_SIZES ) class ValidateVideoImageTestCase(TestCase): """ Tests for `validate_video_image` method. """ def test_invalid_image_file_info(self): """ Test that when no file information is provided to validate_video_image, it gives proper error message. """ error = validate_video_image({}) self.assertEquals(error, 'The image must have name, content type, and size information.') def test_corrupt_image_file(self): """ Test that when corrupt file is provided to validate_video_image, it gives proper error message. """ with open(settings.MEDIA_ROOT + '/test-corrupt-image.png', 'w+') as image_file: uploaded_image_file = UploadedFile( image_file, content_type='image/png', size=settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'] ) error = validate_video_image(uploaded_image_file) self.assertEquals(error, 'There is a problem with this image file. Try to upload a different file.') @ddt.ddt class ScrapeVideoThumbnailsTestCase(CourseTestCase): """ Test cases for scraping video thumbnails from youtube. """ def setUp(self): super(ScrapeVideoThumbnailsTestCase, self).setUp() course_ids = [unicode(self.course.id)] profiles = ['youtube'] created = datetime.now(pytz.utc) previous_uploads = [ { 'edx_video_id': 'test1', 'client_video_id': 'test1.mp4', 'duration': 42.0, 'status': 'upload', 'courses': course_ids, 'encoded_videos': [], 'created': created }, { 'edx_video_id': 'test-youtube-video-1', 'client_video_id': 'test-youtube-id.mp4', 'duration': 128.0, 'status': 'file_complete', 'courses': course_ids, 'created': created, 'encoded_videos': [ { 'profile': 'youtube', 'url': '3_yD_cEKoCk', 'file_size': 1600, 'bitrate': 100, } ], }, { 'edx_video_id': 'test-youtube-video-2', 'client_video_id': 'test-youtube-id.mp4', 'image': 'image2.jpg', 'duration': 128.0, 'status': 'file_complete', 'courses': course_ids, 'created': created, 'encoded_videos': [ { 'profile': 'youtube', 'url': '3_yD_cEKoCk', 'file_size': 1600, 'bitrate': 100, } ], }, ] for profile in profiles: create_profile(profile) for video in previous_uploads: create_video(video) # Create video images. with make_image_file() as image_file: update_video_image( 'test-youtube-video-2', unicode(self.course.id), image_file, 'image.jpg' ) def mocked_youtube_thumbnail_response( self, mocked_content=None, error_response=False, image_width=settings.VIDEO_IMAGE_MIN_WIDTH, image_height=settings.VIDEO_IMAGE_MIN_HEIGHT ): """ Returns a mocked youtube thumbnail response. """ image_content = '' with make_image_file(dimensions=(image_width, image_height), ) as image_file: image_content = image_file.read() if mocked_content or error_response: image_content = mocked_content mocked_response = requests.Response() mocked_response.status_code = requests.codes.ok if image_content else requests.codes.not_found # pylint: disable=no-member mocked_response._content = image_content # pylint: disable=protected-access mocked_response.headers = {'content-type': 'image/jpeg'} return mocked_response @override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret') @patch('requests.get') @ddt.data( ( { 'maxresdefault': 'maxresdefault-result-image-content', 'sddefault': 'sddefault-result-image-content', 'hqdefault': 'hqdefault-result-image-content', 'mqdefault': 'mqdefault-result-image-content', 'default': 'default-result-image-content' }, 'maxresdefault-result-image-content' ), ( { 'maxresdefault': '', 'sddefault': 'sddefault-result-image-content', 'hqdefault': 'hqdefault-result-image-content', 'mqdefault': 'mqdefault-result-image-content', 'default': 'default-result-image-content' }, 'sddefault-result-image-content' ), ( { 'maxresdefault': '', 'sddefault': '', 'hqdefault': 'hqdefault-result-image-content', 'mqdefault': 'mqdefault-result-image-content', 'default': 'default-result-image-content' }, 'hqdefault-result-image-content' ), ( { 'maxresdefault': '', 'sddefault': '', 'hqdefault': '', 'mqdefault': 'mqdefault-result-image-content', 'default': 'default-result-image-content' }, 'mqdefault-result-image-content' ), ( { 'maxresdefault': '', 'sddefault': '', 'hqdefault': '', 'mqdefault': '', 'default': 'default-result-image-content' }, 'default-result-image-content' ), ) @ddt.unpack def test_youtube_video_thumbnail_download( self, thumbnail_content_data, expected_thumbnail_content, mocked_request ): """ Test that we get highest resolution video thumbnail available from youtube. """ # Mock get youtube thumbnail responses. def mocked_youtube_thumbnail_responses(resolutions): """ Returns a list of mocked responses containing youtube thumbnails. """ mocked_responses = [] for resolution in YOUTUBE_THUMBNAIL_SIZES: mocked_content = resolutions.get(resolution, '') error_response = False if mocked_content else True mocked_responses.append(self.mocked_youtube_thumbnail_response(mocked_content, error_response)) return mocked_responses mocked_request.side_effect = mocked_youtube_thumbnail_responses(thumbnail_content_data) thumbnail_content, thumbnail_content_type = download_youtube_video_thumbnail('test-yt-id') # Verify that we get the expected thumbnail content. self.assertEqual(thumbnail_content, expected_thumbnail_content) self.assertEqual(thumbnail_content_type, 'image/jpeg') @override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret') @patch('requests.get') def test_scrape_youtube_thumbnail(self, mocked_request): """ Test that youtube thumbnails are correctly scrapped. """ course_id = unicode(self.course.id) video1_edx_video_id = 'test-youtube-video-1' video2_edx_video_id = 'test-youtube-video-2' # Mock get youtube thumbnail responses. mocked_request.side_effect = [self.mocked_youtube_thumbnail_response()] # Verify that video1 has no image attached. video1_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video1_edx_video_id) self.assertIsNone(video1_image_url) # Verify that video2 has already image attached. video2_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video2_edx_video_id) self.assertIsNotNone(video2_image_url) # Scrape video thumbnails. scrape_youtube_thumbnail(course_id, video1_edx_video_id, 'test-yt-id') scrape_youtube_thumbnail(course_id, video2_edx_video_id, 'test-yt-id2') # Verify that now video1 image is attached. video1_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video1_edx_video_id) self.assertIsNotNone(video1_image_url) # Also verify that video2's image is not updated. video2_image_url_latest = get_course_video_image_url(course_id=course_id, edx_video_id=video2_edx_video_id) self.assertEqual(video2_image_url, video2_image_url_latest) @ddt.data( ( 100, 100, False ), ( 640, 360, True ) ) @override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret') @patch('contentstore.video_utils.LOGGER') @patch('requests.get') @ddt.unpack def test_scrape_youtube_thumbnail_logging( self, image_width, image_height, is_success, mocked_request, mock_logger ): """ Test that we get correct logs in case of failure as well as success. """ course_id = unicode(self.course.id) video1_edx_video_id = 'test-youtube-video-1' mocked_request.side_effect = [ self.mocked_youtube_thumbnail_response( image_width=image_width, image_height=image_height ) ] scrape_youtube_thumbnail(course_id, video1_edx_video_id, 'test-yt-id') if is_success: mock_logger.info.assert_called_with( u'VIDEOS: Scraping youtube video thumbnail for edx_video_id [%s] in course [%s]', video1_edx_video_id, course_id ) else: mock_logger.info.assert_called_with( u'VIDEOS: Scraping youtube video thumbnail failed for edx_video_id [%s] in course [%s] with error: %s', video1_edx_video_id, course_id, 'This image file must be larger than 2 KB.' ) @ddt.data( ( None, 'image/jpeg', u'This image file must be larger than {image_min_size}.'.format( image_min_size=settings.VIDEO_IMAGE_MIN_FILE_SIZE_KB ) ), ( 'dummy-content', None, u'This image file type is not supported. Supported file types are {supported_file_formats}.'.format( supported_file_formats=settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS.keys() ) ), ( None, None, u'This image file type is not supported. Supported file types are {supported_file_formats}.'.format( supported_file_formats=settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS.keys() ) ), ) @patch('contentstore.video_utils.LOGGER') @patch('contentstore.video_utils.download_youtube_video_thumbnail') @ddt.unpack def test_no_video_thumbnail_downloaded( self, image_content, image_content_type, error_message, mock_download_youtube_thumbnail, mock_logger ): """ Test that when no thumbnail is downloaded, video image is not updated. """ mock_download_youtube_thumbnail.return_value = image_content, image_content_type course_id = unicode(self.course.id) video1_edx_video_id = 'test-youtube-video-1' # Verify that video1 has no image attached. video1_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video1_edx_video_id) self.assertIsNone(video1_image_url) # Scrape video thumbnail. scrape_youtube_thumbnail(course_id, video1_edx_video_id, 'test-yt-id') mock_logger.info.assert_called_with( u'VIDEOS: Scraping youtube video thumbnail failed for edx_video_id [%s] in course [%s] with error: %s', video1_edx_video_id, course_id, error_message ) # Verify that no image is attached to video1. video1_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video1_edx_video_id) self.assertIsNone(video1_image_url)
from __future__ import annotations from typing import Iterable, Optional, Set from sqlalchemy.orm import Query as BaseQuery from baseframe import __ from coaster.sqlalchemy import Query, StateManager, auto_init_default, with_roles from coaster.utils import LabeledEnum from . import ( BaseScopedIdNameMixin, Commentset, MarkdownColumn, Project, TimestampMixin, TSVectorType, User, UuidMixin, db, ) from .comment import SET_TYPE from .helpers import add_search_trigger, reopen, visual_field_delimiter __all__ = ['Update'] class UPDATE_STATE(LabeledEnum): # noqa: N801 DRAFT = (0, 'draft', __("Draft")) PUBLISHED = (1, 'published', __("Published")) DELETED = (2, 'deleted', __("Deleted")) class VISIBILITY_STATE(LabeledEnum): # noqa: N801 PUBLIC = (0, 'public', __("Public")) RESTRICTED = (1, 'restricted', __("Restricted")) class Update(UuidMixin, BaseScopedIdNameMixin, TimestampMixin, db.Model): __tablename__ = 'update' _visibility_state = db.Column( 'visibility_state', db.SmallInteger, StateManager.check_constraint('visibility_state', VISIBILITY_STATE), default=VISIBILITY_STATE.PUBLIC, nullable=False, index=True, ) visibility_state = StateManager( '_visibility_state', VISIBILITY_STATE, doc="Visibility state" ) _state = db.Column( 'state', db.SmallInteger, StateManager.check_constraint('state', UPDATE_STATE), default=UPDATE_STATE.DRAFT, nullable=False, index=True, ) state = StateManager('_state', UPDATE_STATE, doc="Update state") user_id = db.Column(None, db.ForeignKey('user.id'), nullable=False, index=True) user = with_roles( db.relationship( User, backref=db.backref('updates', lazy='dynamic'), foreign_keys=[user_id] ), read={'all'}, grants={'creator'}, ) project_id = db.Column( None, db.ForeignKey('project.id'), nullable=False, index=True ) project = with_roles( db.relationship(Project, backref=db.backref('updates', lazy='dynamic')), read={'all'}, datasets={'primary'}, grants_via={ None: { 'editor': {'editor', 'project_editor'}, 'participant': {'reader', 'project_participant'}, 'crew': {'reader', 'project_crew'}, } }, ) parent = db.synonym('project') body = MarkdownColumn('body', nullable=False) #: Update number, for Project updates, assigned when the update is published number = with_roles( db.Column(db.Integer, nullable=True, default=None), read={'all'} ) #: Like pinned tweets. You can keep posting updates, #: but might want to pin an update from a week ago. is_pinned = with_roles( db.Column(db.Boolean, default=False, nullable=False), read={'all'} ) published_by_id = db.Column( None, db.ForeignKey('user.id'), nullable=True, index=True ) published_by = with_roles( db.relationship( User, backref=db.backref('published_updates', lazy='dynamic'), foreign_keys=[published_by_id], ), read={'all'}, ) published_at = with_roles( db.Column(db.TIMESTAMP(timezone=True), nullable=True), read={'all'} ) deleted_by_id = db.Column(None, db.ForeignKey('user.id'), nullable=True, index=True) deleted_by = with_roles( db.relationship( User, backref=db.backref('deleted_updates', lazy='dynamic'), foreign_keys=[deleted_by_id], ), read={'reader'}, ) deleted_at = with_roles( db.Column(db.TIMESTAMP(timezone=True), nullable=True), read={'reader'} ) edited_at = with_roles( db.Column(db.TIMESTAMP(timezone=True), nullable=True), read={'all'} ) commentset_id = db.Column(None, db.ForeignKey('commentset.id'), nullable=False) commentset = with_roles( db.relationship( Commentset, uselist=False, lazy='joined', cascade='all', single_parent=True, backref=db.backref('update', uselist=False), ), read={'all'}, ) search_vector = db.deferred( db.Column( TSVectorType( 'name', 'title', 'body_text', weights={'name': 'A', 'title': 'A', 'body_text': 'B'}, regconfig='english', hltext=lambda: db.func.concat_ws( visual_field_delimiter, Update.title, Update.body_html ), ), nullable=False, ) ) __roles__ = { 'all': { 'read': {'name', 'title', 'urls'}, 'call': {'features', 'visibility_state', 'state', 'url_for'}, }, 'reader': {'read': {'body'}}, } __datasets__ = { 'primary': { 'name', 'title', 'number', 'body', 'body_text', 'body_html', 'published_at', 'edited_at', 'user', 'is_pinned', 'is_restricted', 'is_currently_restricted', 'visibility_label', 'state_label', 'urls', 'uuid_b58', }, 'without_parent': { 'name', 'title', 'number', 'body', 'body_text', 'body_html', 'published_at', 'edited_at', 'user', 'is_pinned', 'is_restricted', 'is_currently_restricted', 'visibility_label', 'state_label', 'urls', 'uuid_b58', }, 'related': {'name', 'title', 'urls'}, } def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self.commentset = Commentset(settype=SET_TYPE.UPDATE) def __repr__(self) -> str: """Represent :class:`Update` as a string.""" return '<Update "{title}" {uuid_b58}>'.format( title=self.title, uuid_b58=self.uuid_b58 ) @property def visibility_label(self) -> str: return self.visibility_state.label.title with_roles(visibility_label, read={'all'}) @property def state_label(self) -> str: return self.state.label.title with_roles(state_label, read={'all'}) state.add_conditional_state( 'UNPUBLISHED', state.DRAFT, lambda update: update.published_at is None, lambda update: update.published_at.is_(None), label=('unpublished', __("Unpublished")), ) state.add_conditional_state( 'WITHDRAWN', state.DRAFT, lambda update: update.published_at is not None, lambda update: update.published_at.isnot(None), label=('withdrawn', __("Withdrawn")), ) @with_roles(call={'editor'}) @state.transition(state.DRAFT, state.PUBLISHED) def publish(self, actor: User) -> bool: first_publishing = False self.published_by = actor if self.published_at is None: first_publishing = True self.published_at = db.func.utcnow() if self.number is None: self.number = ( db.select([db.func.coalesce(db.func.max(Update.number), 0) + 1]) .where(Update.project == self.project) .scalar_subquery() ) return first_publishing @with_roles(call={'editor'}) @state.transition(state.PUBLISHED, state.DRAFT) def undo_publish(self) -> None: pass @with_roles(call={'creator', 'editor'}) @state.transition(None, state.DELETED) def delete(self, actor: User) -> None: if self.state.UNPUBLISHED: # If it was never published, hard delete it db.session.delete(self) else: # If not, then soft delete self.deleted_by = actor self.deleted_at = db.func.utcnow() @with_roles(call={'editor'}) @state.transition(state.DELETED, state.DRAFT) def undo_delete(self) -> None: self.deleted_by = None self.deleted_at = None @with_roles(call={'editor'}) @visibility_state.transition(visibility_state.RESTRICTED, visibility_state.PUBLIC) def make_public(self) -> None: pass @with_roles(call={'editor'}) @visibility_state.transition(visibility_state.PUBLIC, visibility_state.RESTRICTED) def make_restricted(self) -> None: pass @property def is_restricted(self) -> bool: return bool(self.visibility_state.RESTRICTED) @is_restricted.setter def is_restricted(self, value: bool) -> None: if value and self.visibility_state.PUBLIC: self.make_restricted() elif not value and self.visibility_state.RESTRICTED: self.make_public() with_roles(is_restricted, read={'all'}) @property def is_currently_restricted(self) -> bool: return self.is_restricted and not self.current_roles.reader with_roles(is_currently_restricted, read={'all'}) def roles_for(self, actor: Optional[User], anchors: Iterable = ()) -> Set: roles = super().roles_for(actor, anchors) if not self.visibility_state.RESTRICTED: # Everyone gets reader role when the post is not restricted. # If it is, 'reader' must be mapped from 'participant' in the project, # specified above in the grants_via annotation on project. roles.add('reader') return roles @classmethod def all_published_public(cls) -> Query: return cls.query.join(Project).filter( Project.state.PUBLISHED, cls.state.PUBLISHED, cls.visibility_state.PUBLIC ) @with_roles(read={'all'}) def getnext(self): if self.state.PUBLISHED: return ( Update.query.filter( Update.project == self.project, Update.state.PUBLISHED, Update.number > self.number, ) .order_by(Update.number.asc()) .first() ) @with_roles(read={'all'}) def getprev(self): if self.state.PUBLISHED: return ( Update.query.filter( Update.project == self.project, Update.state.PUBLISHED, Update.number < self.number, ) .order_by(Update.number.desc()) .first() ) add_search_trigger(Update, 'search_vector') auto_init_default(Update._visibility_state) auto_init_default(Update._state) @reopen(Project) class __Project: updates: BaseQuery @property def published_updates(self) -> BaseQuery: return self.updates.filter(Update.state.PUBLISHED).order_by( Update.is_pinned.desc(), Update.published_at.desc() ) with_roles(published_updates, read={'all'}) @property def draft_updates(self) -> BaseQuery: return self.updates.filter(Update.state.DRAFT).order_by(Update.created_at) with_roles(draft_updates, read={'editor'}) @property def pinned_update(self) -> Optional[Update]: return ( self.updates.filter(Update.state.PUBLISHED, Update.is_pinned.is_(True)) .order_by(Update.published_at.desc()) .first() ) with_roles(pinned_update, read={'all'})
import re from subprocess import Popen, PIPE from time import time, gmtime, strftime aliases = {"zoidber": "zoidberg", "zoidberg10": "zoidberg", "webmaster": "dhmh", "mast3rranan": "ranan", "ranan2": "ranan"} exclude = ["locale/*", "module/lib/*"] date_format = "%Y-%m-%d" line_re = re.compile(r" (\d+) \**", re.I) def add_exclude_flags(args): for dir in exclude: args.extend(["-X", dir]) def wipe(data, perc=1): s = (sum(data.values()) * perc) / 100 for k, v in data.items(): if v < s: del data[k] return data def de_alias(data): for k, v in aliases.iteritems(): if k not in data: continue alias = aliases[k] if alias in data: data[alias] += data[k] else: data[alias] = data[k] del data[k] return data def output(data): s = float(sum(data.values())) print "Total Lines: %d" % s for k, v in data.iteritems(): print "%15s: %.1f%% | %d" % (k, (v * 100) / s, v) print def file_list(): args = ["hg", "status", "-A"] add_exclude_flags(args) p = Popen(args, stdout=PIPE) out, err = p.communicate() return [x.split()[1] for x in out.splitlines() if x.split()[0] in "CMA"] def hg_annotate(path): args = ["hg", "annotate", "-u", path] p = Popen(args, stdout=PIPE) out, err = p.communicate() data = {} for line in out.splitlines(): author, non, line = line.partition(":") # probably binary file if author == path: return {} author = author.strip().lower() if not line.strip(): continue # don't count blank lines if author in data: data[author] += 1 else: data[author] = 1 return de_alias(data) def hg_churn(days=None): args = ["hg", "churn"] if days: args.append("-d") t = time() - 60 * 60 * 24 * days args.append("%s to %s" % (strftime(date_format, gmtime(t)), strftime(date_format))) add_exclude_flags(args) p = Popen(args, stdout=PIPE) out, err = p.communicate() data = {} for line in out.splitlines(): m = line_re.search(line) author = line.split()[0] lines = int(m.group(1)) if "@" in author: author, n, email = author.partition("@") author = author.strip().lower() if author in data: data[author] += lines else: data[author] = lines return de_alias(data) def complete_annotate(): files = file_list() data = {} for f in files: tmp = hg_annotate(f) for k, v in tmp.iteritems(): if k in data: data[k] += v else: data[k] = v return data if __name__ == "__main__": for d in (30, 90, 180): c = wipe(hg_churn(d)) print "Changes in %d days:" % d output(c) c = wipe(hg_churn()) print "Total changes:" output(c) print "Current source code version:" data = wipe(complete_annotate()) output(data)
import logging from flask import Flask, request, g from flask.ext.restful import fields, reqparse, marshal_with, abort from flask.ext.restful.types import boolean from jormungandr import i_manager from jormungandr.exceptions import RegionNotFound from jormungandr.instance_manager import instances_comparator from jormungandr import authentication from jormungandr.interfaces.v1.fields import DisruptionsField from jormungandr.protobuf_to_dict import protobuf_to_dict from fields import stop_point, stop_area, line, physical_mode, \ commercial_mode, company, network, pagination, place,\ PbField, stop_date_time, enum_type, NonNullList, NonNullNested,\ display_informations_vj, error,\ SectionGeoJson, Co2Emission, PbEnum, feed_publisher from jormungandr.interfaces.parsers import option_value, date_time_format from ResourceUri import ResourceUri, complete_links import datetime from functools import wraps from fields import DateTime from jormungandr.timezone import set_request_timezone from make_links import add_id_links, clean_links, create_external_link, create_internal_link from errors import ManageError from jormungandr.interfaces.argument import ArgumentDoc from jormungandr.interfaces.parsers import depth_argument from operator import itemgetter from datetime import datetime, timedelta import sys from copy import copy from datetime import datetime from collections import defaultdict from navitiacommon import type_pb2, response_pb2 from jormungandr.utils import date_to_timestamp, ResourceUtc from copy import deepcopy from jormungandr.travelers_profile import travelers_profile from jormungandr.interfaces.v1.transform_id import transform_id from jormungandr.interfaces.v1.Calendars import calendar f_datetime = "%Y%m%dT%H%M%S" class SectionLinks(fields.Raw): def output(self, key, obj): links = None try: if obj.HasField("uris"): links = obj.uris.ListFields() except ValueError: return None response = [] if links: for type_, value in links: response.append({"type": type_.name, "id": value}) if obj.HasField('pt_display_informations'): for value in obj.pt_display_informations.notes: response.append({"type": 'notes', "id": value.uri, 'value': value.note}) return response class FareLinks(fields.Raw): def output(self, key, obj): ticket_ids = [] try: for t_id in obj.ticket_id: ticket_ids.append(t_id) except ValueError: return None response = [] for value in ticket_ids: response.append(create_internal_link(_type="ticket", rel="tickets", id=value)) return response class TicketLinks(fields.Raw): def output(self, key, obj): section_ids = [] try: for s_id in obj.section_id: section_ids.append(s_id) except ValueError: return None response = [] for value in section_ids: response.append({"type": "section", "rel": "sections", "internal": True, "templated": False, "id": value}) return response class section_type(enum_type): def if_on_demand_stop_time(self, stop): properties = stop.properties descriptor = properties.DESCRIPTOR enum = descriptor.enum_types_by_name["AdditionalInformation"] for v in properties.additional_informations: if enum.values_by_number[v].name == 'on_demand_transport': return True return False def output(self, key, obj): try: if obj.stop_date_times: first_stop = obj.stop_date_times[0] last_stop = obj.stop_date_times[-1] if self.if_on_demand_stop_time(first_stop): return 'on_demand_transport' elif self.if_on_demand_stop_time(last_stop): return 'on_demand_transport' return 'public_transport' except ValueError: pass return super(section_type, self).output("type", obj) class section_place(PbField): def output(self, key, obj): enum_t = obj.DESCRIPTOR.fields_by_name['type'].enum_type.values_by_name if obj.type == enum_t['WAITING'].number: return None else: return super(PbField, self).output(key, obj) section = { "type": section_type(), "id": fields.String(), "mode": enum_type(attribute="street_network.mode"), "duration": fields.Integer(), "from": section_place(place, attribute="origin"), "to": section_place(place, attribute="destination"), "links": SectionLinks(attribute="uris"), "display_informations": PbField(display_informations_vj, attribute='pt_display_informations'), "additional_informations": NonNullList(PbEnum(response_pb2.SectionAdditionalInformationType)), "geojson": SectionGeoJson(), "path": NonNullList(NonNullNested({"length": fields.Integer(), "name": fields.String(), "duration": fields.Integer(), "direction": fields.Integer()}), attribute="street_network.path_items"), "transfer_type": enum_type(), "stop_date_times": NonNullList(NonNullNested(stop_date_time)), "departure_date_time": DateTime(attribute="begin_date_time"), "arrival_date_time": DateTime(attribute="end_date_time"), "co2_emission": Co2Emission(), } cost = { 'value': fields.Float(), 'currency': fields.String(), } fare = { 'total': NonNullNested(cost), 'found': fields.Boolean(), 'links': FareLinks(attribute="ticket_id") } journey = { 'duration': fields.Integer(), 'nb_transfers': fields.Integer(), 'departure_date_time': DateTime(), 'arrival_date_time': DateTime(), 'requested_date_time': DateTime(), 'sections': NonNullList(NonNullNested(section)), 'from': PbField(place, attribute='origin'), 'to': PbField(place, attribute='destination'), 'type': fields.String(), 'fare': NonNullNested(fare), 'tags': fields.List(fields.String), "status": fields.String(attribute="most_serious_disruption_effect"), "calendars": NonNullList(NonNullNested(calendar)), "co2_emission": Co2Emission(), } ticket = { "id": fields.String(), "name": fields.String(), "comment": fields.String(), "found": fields.Boolean(), "cost": NonNullNested(cost), "links": TicketLinks(attribute="section_id") } journeys = { "journeys": NonNullList(NonNullNested(journey)), "error": PbField(error, attribute='error'), "tickets": fields.List(NonNullNested(ticket)), "disruptions": DisruptionsField, "feed_publishers": fields.List(NonNullNested(feed_publisher)), } def dt_represents(value): if value == "arrival": return False elif value == "departure": return True else: raise ValueError("Unable to parse datetime_represents") class add_debug_info(object): """ display info stored in g for the debug must be called after the transformation from protobuff to dict """ def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): objects = f(*args, **kwargs) response = objects[0] def get_debug(): if not 'debug' in response: response['debug'] = {} return response['debug'] if hasattr(g, 'errors_by_region'): get_debug()['errors_by_region'] = {} for region, er in g.errors_by_region.iteritems(): get_debug()['errors_by_region'][region] = er.message if hasattr(g, 'regions_called'): get_debug()['regions_called'] = g.regions_called return objects return wrapper class add_journey_href(object): def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): objects = f(*args, **kwargs) if objects[1] != 200: return objects if not "journeys" in objects[0].keys(): return objects if "region" in kwargs.keys(): del kwargs["region"] if "uri" in kwargs.keys(): kwargs["from"] = kwargs["uri"].split("/")[-1] del kwargs["uri"] if "lon" in kwargs.keys() and "lat" in kwargs.keys(): if not "from" in kwargs.keys(): kwargs["from"] = kwargs["lon"] + ';' + kwargs["lat"] del kwargs["lon"] del kwargs["lat"] for journey in objects[0]['journeys']: if not "sections" in journey.keys(): kwargs["datetime"] = journey["requested_date_time"] kwargs["to"] = journey["to"]["id"] journey['links'] = [create_external_link("v1.journeys", rel="journeys", **kwargs)] return objects return wrapper class add_journey_pagination(object): def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): objects = f(*args, **kwargs) if objects[1] != 200: return objects #self is the first parameter, so the resources scenario = g.scenario if scenario and hasattr(scenario, 'extremes') and callable(scenario.extremes): datetime_before, datetime_after = scenario.extremes(objects[0]) else: datetime_before, datetime_after = self.extremes(objects[0]) if datetime_before and datetime_after: if not "links" in objects[0]: objects[0]["links"] = [] args = dict(deepcopy(request.args)) args["datetime"] = datetime_before.strftime(f_datetime) args["datetime_represents"] = "arrival" if "region" in kwargs: args["region"] = kwargs["region"] # Note, it's not the right thing to do, the rel should be 'next' and # the type 'journey' but for compatibility reason we cannot change before the v2 objects[0]["links"].append(create_external_link("v1.journeys", rel='prev', _type='prev', **args)) args["datetime"] = datetime_after.strftime(f_datetime) args["datetime_represents"] = "departure" objects[0]["links"].append(create_external_link("v1.journeys", rel='next', _type='next', **args)) datetime_first, datetime_last = self.first_and_last(objects[0]) if datetime_first and datetime_last: if not "links" in objects[0]: objects[0]["links"] = [] args = dict(deepcopy(request.args)) args["datetime"] = datetime_first.strftime(f_datetime) args["datetime_represents"] = "departure" if "region" in kwargs: args["region"] = kwargs["region"] objects[0]["links"].append(create_external_link("v1.journeys", rel='first', _type='first', **args)) args["datetime"] = datetime_last.strftime(f_datetime) args["datetime_represents"] = "arrival" objects[0]["links"].append(create_external_link("v1.journeys", rel='last', _type='last', **args)) return objects return wrapper def extremes(self, resp): datetime_before = None datetime_after = None if 'journeys' not in resp: return (None, None) section_is_pt = lambda section: section['type'] == "public_transport"\ or section['type'] == "on_demand_transport" filter_journey = lambda journey: 'arrival_date_time' in journey and\ journey['arrival_date_time'] != '' and\ "sections" in journey and\ any(section_is_pt(section) for section in journey['sections']) list_journeys = filter(filter_journey, resp['journeys']) if not list_journeys: return (None, None) prev_journey = min(list_journeys, key=itemgetter('arrival_date_time')) next_journey = max(list_journeys, key=itemgetter('departure_date_time')) f_datetime = "%Y%m%dT%H%M%S" f_departure = datetime.strptime(next_journey['departure_date_time'], f_datetime) f_arrival = datetime.strptime(prev_journey['arrival_date_time'], f_datetime) datetime_after = f_departure + timedelta(minutes=1) datetime_before = f_arrival - timedelta(minutes=1) return (datetime_before, datetime_after) def first_and_last(self, resp): datetime_first = None datetime_last = None try: list_journeys = [journey for journey in resp['journeys'] if 'arrival_date_time' in journey.keys() and journey['arrival_date_time'] != '' and 'departure_date_time' in journey.keys() and journey['departure_date_time'] != ''] asap_min = min(list_journeys, key=itemgetter('departure_date_time')) asap_max = max(list_journeys, key=itemgetter('arrival_date_time')) except: return (None, None) if asap_min['departure_date_time'] and asap_max['arrival_date_time']: departure = asap_min['departure_date_time'] departure_date = datetime.strptime(departure, f_datetime) midnight = datetime.strptime('0000', '%H%M').time() datetime_first = datetime.combine(departure_date, midnight) arrival = asap_max['arrival_date_time'] arrival_date = datetime.strptime(arrival, f_datetime) almost_midnight = datetime.strptime('2359', '%H%M').time() datetime_last = datetime.combine(arrival_date, almost_midnight) return (datetime_first, datetime_last) class add_fare_links(object): def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): objects = f(*args, **kwargs) if objects[1] != 200: return objects if not "journeys" in objects[0].keys(): return objects ticket_by_section = defaultdict(list) if not 'tickets' in objects[0].keys(): return objects for t in objects[0]['tickets']: if "links" in t.keys(): for s in t['links']: ticket_by_section[s['id']].append(t['id']) for j in objects[0]['journeys']: if not "sections" in j.keys(): continue for s in j['sections']: #them we add the link to the different tickets needed for ticket_needed in ticket_by_section[s["id"]]: s['links'].append(create_internal_link(_type="ticket", rel="tickets", id=ticket_needed)) return objects return wrapper def compute_regions(args): """ method computing the region the journey has to be computed on The complexity comes from the fact that the regions in jormungandr can overlap. return the kraken instance key rules are easy: we fetch the different regions the user can use for 'origin' and 'destination' we do the intersection and sort the list """ _region = None possible_regions = set() from_regions = set() to_regions = set() if args['origin']: from_regions = set(i_manager.get_regions(object_id=args['origin'])) #Note: if get_regions does not find any region, it raises a RegionNotFoundException if args['destination']: to_regions = set(i_manager.get_regions(object_id=args['destination'])) if not from_regions: #we didn't get any origin, the region is in the destination's list possible_regions = to_regions elif not to_regions: #we didn't get any origin, the region is in the destination's list possible_regions = from_regions else: #we need the intersection set possible_regions = from_regions.intersection(to_regions) logging.getLogger(__name__).debug("orig region = {o}, dest region = {d} => set = {p}". format(o=from_regions, d=to_regions, p=possible_regions)) if not possible_regions: raise RegionNotFound(custom_msg="cannot find a region with {o} and {d} in the same time" .format(o=args['origin'], d=args['destination'])) sorted_regions = list(possible_regions) regions = sorted(sorted_regions, cmp=instances_comparator) return regions class Journeys(ResourceUri, ResourceUtc): def __init__(self): # journeys must have a custom authentication process ResourceUri.__init__(self, authentication=False) ResourceUtc.__init__(self) modes = ["walking", "car", "bike", "bss"] types = { "all": "All types", "best": "The best journey", "rapid": "A good trade off between duration, changes and constraint respect", 'no_train': "Journey without train", 'comfort': "A journey with less changes and walking", 'car': "A journey with car to get to the public transport", 'less_fallback_walk': "A journey with less walking", 'less_fallback_bike': "A journey with less biking", 'less_fallback_bss': "A journey with less bss", 'fastest': "A journey with minimum duration", 'non_pt_walk': "A journey without public transport, only walking", 'non_pt_bike': "A journey without public transport, only biking", 'non_pt_bss': "A journey without public transport, only bike sharing", } self.parsers = {} self.parsers["get"] = reqparse.RequestParser( argument_class=ArgumentDoc) parser_get = self.parsers["get"] parser_get.add_argument("from", type=str, dest="origin") parser_get.add_argument("to", type=str, dest="destination") parser_get.add_argument("datetime", type=date_time_format) parser_get.add_argument("datetime_represents", dest="clockwise", type=dt_represents, default=True) parser_get.add_argument("max_nb_transfers", type=int, dest="max_transfers") parser_get.add_argument("first_section_mode[]", type=option_value(modes), default=["walking"], dest="origin_mode", action="append") parser_get.add_argument("last_section_mode[]", type=option_value(modes), default=["walking"], dest="destination_mode", action="append") parser_get.add_argument("max_duration_to_pt", type=int, description="maximal duration of non public transport in second") parser_get.add_argument("max_walking_duration_to_pt", type=int, description="maximal duration of walking on public transport in second") parser_get.add_argument("max_bike_duration_to_pt", type=int, description="maximal duration of bike on public transport in second") parser_get.add_argument("max_bss_duration_to_pt", type=int, description="maximal duration of bss on public transport in second") parser_get.add_argument("max_car_duration_to_pt", type=int, description="maximal duration of car on public transport in second") parser_get.add_argument("walking_speed", type=float) parser_get.add_argument("bike_speed", type=float) parser_get.add_argument("bss_speed", type=float) parser_get.add_argument("car_speed", type=float) parser_get.add_argument("forbidden_uris[]", type=str, action="append") parser_get.add_argument("count", type=int) parser_get.add_argument("min_nb_journeys", type=int) parser_get.add_argument("max_nb_journeys", type=int) parser_get.add_argument("type", type=option_value(types), default="all") parser_get.add_argument("disruption_active", type=boolean, default=False) parser_get.add_argument("max_duration", type=int, default=3600*24) parser_get.add_argument("wheelchair", type=boolean, default=False) parser_get.add_argument("debug", type=boolean, default=False, hidden=True) # for retrocompatibility purpose, we duplicate (without []): parser_get.add_argument("first_section_mode", type=option_value(modes), action="append") parser_get.add_argument("last_section_mode", type=option_value(modes), action="append") parser_get.add_argument("show_codes", type=boolean, default=False, description="show more identification codes") parser_get.add_argument("traveler_type", type=option_value(travelers_profile.keys())) parser_get.add_argument("_override_scenario", type=str, description="debug param to specify a custom scenario") self.method_decorators.append(complete_links(self)) # manage post protocol (n-m calculation) self.parsers["post"] = deepcopy(parser_get) parser_post = self.parsers["post"] parser_post.add_argument("details", type=boolean, default=False, location="json") for index, elem in enumerate(parser_post.args): if elem.name in ["from", "to"]: parser_post.args[index].type = list parser_post.args[index].dest = elem.name parser_post.args[index].location = "json" @add_debug_info() @add_fare_links() @add_journey_pagination() @add_journey_href() @marshal_with(journeys) @ManageError() def get(self, region=None, lon=None, lat=None, uri=None): args = self.parsers['get'].parse_args() if args['traveler_type']: profile = travelers_profile[args['traveler_type']] profile.override_params(args) if args['max_duration_to_pt']: #retrocompatibility: max_duration_to_pt override all individual value by mode args['max_walking_duration_to_pt'] = args['max_duration_to_pt'] args['max_bike_duration_to_pt'] = args['max_duration_to_pt'] args['max_bss_duration_to_pt'] = args['max_duration_to_pt'] args['max_car_duration_to_pt'] = args['max_duration_to_pt'] # TODO : Changer le protobuff pour que ce soit propre if args['destination_mode'] == 'vls': args['destination_mode'] = 'bss' if args['origin_mode'] == 'vls': args['origin_mode'] = 'bss' #count override min_nb_journey or max_nb_journey if 'count' in args and args['count']: args['min_nb_journeys'] = args['count'] args['max_nb_journeys'] = args['count'] # for last and first section mode retrocompatibility if 'first_section_mode' in args and args['first_section_mode']: args['origin_mode'] = args['first_section_mode'] if 'last_section_mode' in args and args['last_section_mode']: args['destination_mode'] = args['last_section_mode'] if region: self.region = i_manager.get_region(region) if uri: objects = uri.split('/') if objects and len(objects) % 2 == 0: args['origin'] = objects[-1] else: abort(503, message="Unable to compute journeys " "from this object") if not args["origin"]: #@vlara really ? I though we could do reverse isochrone ? #shoudl be in my opinion if not args["origin"] and not args["destination"]: abort(400, message="from argument is required") #we transform the origin/destination url to add information if args['origin']: args['origin'] = transform_id(args['origin']) if args['destination']: args['destination'] = transform_id(args['destination']) if not args['datetime']: args['datetime'] = datetime.now() args['datetime'] = args['datetime'].replace(hour=13, minute=37) if not region: #TODO how to handle lon/lat ? don't we have to override args['origin'] ? possible_regions = compute_regions(args) else: possible_regions = [region] api = None if args['destination']: api = 'journeys' else: api = 'isochrone' # we save the original datetime for debuging purpose args['original_datetime'] = args['datetime'] #we add the interpreted parameters to the stats self._register_interpreted_parameters(args) logging.getLogger(__name__).debug("We are about to ask journeys on regions : {}" .format(possible_regions)) #we want to store the different errors responses = {} for r in possible_regions: self.region = r #we store the region in the 'g' object, which is local to a request set_request_timezone(self.region) if args['debug']: # In debug we store all queried region if not hasattr(g, 'regions_called'): g.regions_called = [] g.regions_called.append(r) original_datetime = args['original_datetime'] new_datetime = self.convert_to_utc(original_datetime) args['datetime'] = date_to_timestamp(new_datetime) response = i_manager.dispatch(args, api, instance_name=self.region) if response.HasField('error') \ and len(possible_regions) != 1: logging.getLogger(__name__).debug("impossible to find journeys for the region {}," " we'll try the next possible region ".format(r)) if args['debug']: # In debug we store all errors if not hasattr(g, 'errors_by_region'): g.errors_by_region = {} g.errors_by_region[r] = response.error responses[r] = response continue if all(map(lambda j: j.type in ("non_pt_walk", "non_pt_bike", "non_pt_bss", "car"), response.journeys)): responses[r] = response continue return response for response in responses.itervalues(): if not response.HasField("error"): return response # if no response have been found for all the possible regions, we have a problem # if all response had the same error we give it, else we give a generic 'no solution' error first_response = responses.itervalues().next() if all(r.error.id == first_response.error.id for r in responses.values()): return first_response resp = response_pb2.Response() er = resp.error er.id = response_pb2.Error.no_solution er.message = "No journey found" return resp @add_journey_pagination() @add_journey_href() @marshal_with(journeys) @ManageError() def post(self, region=None, lon=None, lat=None, uri=None): args = self.parsers['post'].parse_args() if args['traveler_type']: profile = travelers_profile[args['traveler_type']] profile.override_params(args) #check that we have at least one departure and one arrival if len(args['from']) == 0: abort(400, message="from argument must contain at least one item") if len(args['to']) == 0: abort(400, message="to argument must contain at least one item") # TODO : Changer le protobuff pour que ce soit propre if args['destination_mode'] == 'vls': args['destination_mode'] = 'bss' if args['origin_mode'] == 'vls': args['origin_mode'] = 'bss' if args['max_duration_to_pt']: #retrocompatibility: max_duration_to_pt override all individual value by mode args['max_walking_duration_to_pt'] = args['max_duration_to_pt'] args['max_bike_duration_to_pt'] = args['max_duration_to_pt'] args['max_bss_duration_to_pt'] = args['max_duration_to_pt'] args['max_car_duration_to_pt'] = args['max_duration_to_pt'] #count override min_nb_journey or max_nb_journey if 'count' in args and args['count']: args['min_nb_journeys'] = args['count'] args['max_nb_journeys'] = args['count'] if region: self.region = i_manager.get_region(region) set_request_timezone(self.region) if not region: #TODO how to handle lon/lat ? don't we have to override args['origin'] ? self.region = compute_regions(args) #store json data into 4 arrays args['origin'] = [] args['origin_access_duration'] = [] args['destination'] = [] args['destination_access_duration'] = [] for loop in [('from', 'origin', True), ('to', 'destination', False)]: for location in args[loop[0]]: if "access_duration" in location: args[loop[1]+'_access_duration'].append(location["access_duration"]) else: args[loop[1]+'_access_duration'].append(0) stop_uri = location["uri"] stop_uri = transform_id(stop_uri) args[loop[1]].append(stop_uri) #default Date if not "datetime" in args or not args['datetime']: args['datetime'] = datetime.now() args['datetime'] = args['datetime'].replace(hour=13, minute=37) # we save the original datetime for debuging purpose args['original_datetime'] = args['datetime'] original_datetime = args['original_datetime'] #we add the interpreted parameters to the stats self._register_interpreted_parameters(args) new_datetime = self.convert_to_utc(original_datetime) args['datetime'] = date_to_timestamp(new_datetime) api = 'nm_journeys' response = i_manager.dispatch(args, api, instance_name=self.region) return response
from astrobin_apps_equipment.api.filters.equipment_item_filter import EquipmentItemFilter from astrobin_apps_equipment.models import Sensor class SensorFilter(EquipmentItemFilter): class Meta(EquipmentItemFilter.Meta): model = Sensor
""" Specific overrides to the base prod settings to make development easier. """ from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import del DEFAULT_FILE_STORAGE MEDIA_ROOT = "/edx/var/edxapp/uploads" DEBUG = True USE_I18N = True TEMPLATE_DEBUG = True SITE_NAME = 'localhost:8000' PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack') CELERY_ALWAYS_EAGER = True import logging for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']: logging.getLogger(pkg_name).setLevel(logging.CRITICAL) EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms) ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/" ANALYTICS_API_KEY = "" ANALYTICS_DASHBOARD_URL = None INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') MIDDLEWARE_CLASSES += ( 'django_comment_client.utils.QueryCountDebugMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar_mongo.panel.MongoDebugPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ) DEBUG_TOOLBAR_CONFIG = { 'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar' } def should_show_debug_toolbar(_): return True # We always want the toolbar on devstack regardless of IP, auth, etc. PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT) FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True FEATURES['ENABLE_PAYMENT_FAKE'] = True CC_PROCESSOR_NAME = 'CyberSource2' CC_PROCESSOR = { 'CyberSource2': { "PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/', "SECRET_KEY": 'abcd123', "ACCESS_KEY": 'abcd123', "PROFILE_ID": 'edx', } } FEATURES['ENABLE_OAUTH2_PROVIDER'] = True OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2' FEATURES['ENABLE_MOBILE_REST_API'] = True FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True FEATURES['ENFORCE_PASSWORD_POLICY'] = False FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False FEATURES['SQUELCH_PII_IN_LOGS'] = False FEATURES['PREVENT_CONCURRENT_LOGINS'] = False FEATURES['ADVANCED_SECURITY'] = False PASSWORD_MIN_LENGTH = None PASSWORD_COMPLEXITY = {} FEATURES['MILESTONES_APP'] = True FEATURES['ENTRANCE_EXAMS'] = True FEATURES['LICENSING'] = True FEATURES['ENABLE_COURSEWARE_SEARCH'] = False SEARCH_ENGINE = "search.elastic.ElasticSearchEngine" FEATURES['ENABLE_DASHBOARD_SEARCH'] = True FEATURES['CERTIFICATES_HTML_VIEW'] = True from django.utils.translation import ugettext as _ LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')} COURSE_DISCOVERY_MEANINGS = { 'org': { 'name': _('Organization'), }, 'modes': { 'name': _('Course Type'), 'terms': { 'honor': _('Honor'), 'verified': _('Verified'), }, }, 'language': LANGUAGE_MAP, } FEATURES['ENABLE_COURSE_DISCOVERY'] = True FEATURES['COURSES_ARE_BROWSEABLE'] = True HOMEPAGE_COURSE_MAX = 9 FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True VERIFY_STUDENT["SOFTWARE_SECURE"] = { "API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB", "API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", } SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True FEATURES['ENABLE_SHOPPING_CART'] = True FEATURES['STORE_BILLING_INFO'] = True FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS: AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS) try: from .private import * # pylint: disable=wildcard-import except ImportError: pass MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE) SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
from flask import jsonify from app.api import api from app.exceptions import CustomError @api.app_errorhandler(404) # this has to be an app-wide handler def not_found(e): response = jsonify({'status': 404, 'error': 'not found', "success": False, 'message': 'invalid resource URI'}) response.status_code = 404 return response @api.errorhandler(405) def method_not_supported(e): response = jsonify({'status': 405, 'error': 'method not supported',"success": False, 'message': 'the method is not supported'}) response.status_code = 405 return response @api.app_errorhandler(422) # this has to be an app-wide handler def internal_server_error(e): data = getattr(e, 'data') response = jsonify({'status': 422, 'error400': 'internal server error',"success": False, 'message': data['message']}) response.status_code = 422 return response @api.app_errorhandler(CustomError) def bad_request(e): response = jsonify({'status': 400, 'error': 'custom error',"success": False, 'message': e.args[0]}) response.status_code = 400 return response @api.app_errorhandler(NameError) def bad_request(e): response = jsonify({'status': 400, 'error': 'custom error',"success": False, 'message': e.args[0]}) response.status_code = 400 return response @api.app_errorhandler(ValueError) def bad_request(e): response = jsonify({'status': 400, 'error': 'custom error',"success": False, 'message': e.args[0]}) response.status_code = 400 return response
from pyramid.httpexceptions import ( HTTPForbidden, HTTPNotFound, ) import pyramid_jsonapi.workflow as wf from . import stages def get_doc(view, stages, query): query = wf.execute_stage( view, stages, 'alter_query', query ) res_obj = wf.loop.get_one_altered_result_object(view, stages, query) results = view.pj_shared.results = wf.Results( view, objects=[res_obj], many=False, is_top=True, not_found_message=view.not_found_message, ) # We have a result but we still need to fill the relationships. # Stage 'alter_result' will run on each related object. wf.loop.fill_result_object_related(res_obj, stages) return results.serialise() def workflow(view, stages): return get_doc(view, stages, view.single_item_query())
""" Run tests for the LTI Consumer XBlock """ import os import logging import sys import warnings if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings') try: from django.conf import settings # pylint: disable=wrong-import-position from django.core.management import execute_from_command_line # pylint: disable=wrong-import-position except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django # pylint: disable=unused-import, wrong-import-position except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise settings.INSTALLED_APPS += ('lti_consumer',) # Suppress logging: it just clutters the test output with error logs that are expected. logging.disable(logging.CRITICAL) # Suppress a warning from XBlock: "IdGenerator will be required in the future in order to support XBlockAsides" warnings.filterwarnings("ignore", category=FutureWarning, message=r"IdGenerator will be required.*") arguments = sys.argv[1:] options = [argument for argument in arguments if argument.startswith('-')] paths = [argument for argument in arguments if argument not in options] execute_from_command_line([sys.argv[0], 'test'] + paths + options)
from openquake.hazardlib.gsim.atkinson_macias_2009 import AtkinsonMacias2009 from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase class AtkinsonMacias2009TestCase(BaseGSIMTestCase): GSIM_CLASS = AtkinsonMacias2009 # Verification tables provided by G. M. Atkinson def test_mean(self): # Some minor discrepancies tests do not pass at 0.5 % or lower self.check('AM09/ATKINSON_MACIAS_2009_MEAN.csv', max_discrep_percentage=0.6) def test_std_total(self): self.check('AM09/ATKINSON_MACIAS_2009_STD_TOTAL.csv', max_discrep_percentage=0.1)
""" Automated testing, huzzah This script automates the regression, module, and fuzz tests. TODO: interface to formatter tests as well """ import glob import os import subprocess import sys import time verbose = True tests = [] root_path = sys.path[0] log_path = '{0}/log/'.format(root_path) log_file = '{0}/runtests.out'.format(log_path) old_log_file = '{0}/runtests.out.1'.format(log_path) print_fail_log = False def init(): """ set up the options, read arguments, etc """ global verbose, tests, print_fail_log if not os.path.exists(log_path): os.mkdir(log_path) if (os.path.exists(log_file)): if os.path.exists(old_log_file): os.remove(old_log_file) os.rename(log_file, old_log_file) f = open(log_file, 'w') f.write('# Date: {0}\n'.format(time.strftime('%Y-%m-%d %H:%M:%S'))) f.close() test_defs = { 'fuzz' : fuzz_test, 'unit' : unit_tests, 'regression' : regression_tests, } for arg in sys.argv[1:]: if arg == '--quiet': verbose = False elif arg == '--print-fail-log': print_fail_log = True elif arg.startswith('--') and arg[2:] in test_defs.keys(): tests.append( test_defs[arg[2:]] ) elif arg == '--help': print ''' Usage: {0} [OPTIONS] Valid options: --<test> \t where test may be: {1} --quiet \t Only print failures and warnings --print-fail-log\t Prints a full log at the end if a test fails '''.format(sys.argv[0], ', '.join([name for name in test_defs])) sys.exit() else: print 'Unrecognised argument {0}'.format(arg) if not tests: tests = [func for test,func in test_defs.items()] #uniquify tests = list(set(tests)) def output(text, level=0): """ wrapper to print, checks verbosity before printing """ if level == 0 and not verbose: return print text def feedback(path, retval): """ Generates feedback for a test based on a return value Also applies ansi colour sequences, huzzah! """ retval = max(0, min(retval, 2)) colours = ['\033[92m', '\033[91m', '\033[93m'] texts = ['pass', 'fail', 'warning'] end = '\033[0m' output(' {1}\t\t{0}{2}{3}'.format(colours[retval], path, texts[retval], end), 1 if retval else 0) def test(path, args=''): """ Test a path with the given arguments""" log = open(log_file, 'a') log.write('Begin {0} {1}\n'.format(path, args)) log.flush() ret = subprocess.call(['php', path, args], stdout=log, stderr=log) log.write('\nEnd {0} {1}\n'.format(path, args)) log.close() feedback(path + ' ' + args, ret) return ret == 0 def unit_tests(): """ Runs unit tests, i.e. those in unit/""" output ('Begin unit tests', 1) os.chdir(root_path + '/unit/') ret = 0 for t in glob.iglob('*.php'): if not test(t): ret = 1 return ret def fuzz_test(): """ execute the fuzz tester """ output ('Begin fuzz test (this may take some time)', 1) os.chdir('fuzz') ret = 0 if not test('ifuzz.php'): ret = 1 if not test('fuzz.php'): ret = 1 return ret def regression_tests(): output ('Begin regression tests', 1) os.chdir(root_path + '/regression/') files = glob.glob('*/*') files = filter(lambda s: not s.endswith('.luminous') and not s.startswith('.') and not s.endswith('~'), files) files.sort() ret = 0 for f in files: if not test('test.php', f): ret = 1 return ret if __name__ == '__main__': init() ret = 0 for func in tests: r = func() if r: ret = r os.chdir(root_path) if ret and print_fail_log: with open(log_file) as f: print f.read() sys.exit(ret)
from spack import * class SpectrumMpi(Package): """ IBM MPI implementation from Spectrum MPI. """ homepage = "http://www-03.ibm.com/systems/spectrum-computing/products/mpi" url = "http://www-03.ibm.com/systems/spectrum-computing/products/mpi" provides('mpi') def install(self, spec, prefix): raise InstallError('IBM MPI is not installable; it is vendor supplied') def setup_dependent_package(self, module, dependent_spec): # get the compiler names if '%xl' in dependent_spec or '%xl_r' in dependent_spec: self.spec.mpicc = join_path(self.prefix.bin, 'mpixlc') self.spec.mpicxx = join_path(self.prefix.bin, 'mpixlC') self.spec.mpif77 = join_path(self.prefix.bin, 'mpixlf') self.spec.mpifc = join_path(self.prefix.bin, 'mpixlf') else: self.spec.mpicc = join_path(self.prefix.bin, 'mpicc') self.spec.mpicxx = join_path(self.prefix.bin, 'mpicxx') self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77') self.spec.mpifc = join_path(self.prefix.bin, 'mpif90') def setup_dependent_environment(self, spack_env, run_env, dependent_spec): if '%xl' in dependent_spec or '%xl_r' in dependent_spec: spack_env.set('MPICC', join_path(self.prefix.bin, 'mpixlc')) spack_env.set('MPICXX', join_path(self.prefix.bin, 'mpixlC')) spack_env.set('MPIF77', join_path(self.prefix.bin, 'mpixlf')) spack_env.set('MPIF90', join_path(self.prefix.bin, 'mpixlf')) else: spack_env.set('MPICC', join_path(self.prefix.bin, 'mpicc')) spack_env.set('MPICXX', join_path(self.prefix.bin, 'mpic++')) spack_env.set('MPIF77', join_path(self.prefix.bin, 'mpif77')) spack_env.set('MPIF90', join_path(self.prefix.bin, 'mpif90')) spack_env.set('OMPI_CC', spack_cc) spack_env.set('OMPI_CXX', spack_cxx) spack_env.set('OMPI_FC', spack_fc) spack_env.set('OMPI_F77', spack_f77)
import sys from paravistest import datadir, pictureext, get_picture_dir from presentations import CreatePrsForFile, PrsTypeEnum import pvserver as paravis myParavis = paravis.myParavis picturedir = get_picture_dir("MeshPresentation/B0") file = datadir + "carre_en_quad4_seg2.med" print " --------------------------------- " print "file ", file print " --------------------------------- " print "CreatePrsForFile..." CreatePrsForFile(myParavis, file, [PrsTypeEnum.MESH], picturedir, pictureext)
import os, sys, numpy, time, shutil, glob, traceback pretty_time = lambda: time.asctime( time.localtime(time.time())) def server( config_filename , design_filename , project_filename , transfer_filename , exchange_location , hot_start = False ): # ------------------------------------------------------------------- # Start Up # ------------------------------------------------------------------- # process exchange location if exchange_location: exchange_location = exchange_location.split(':') server_name = exchange_location[0] exchange_folder = ':'.join(exchange_location[1:]) else: server_name = '' exchange_folder = '' # check for existing project file if os.path.exists(project_filename): # load project The_Project = libSU2.load_data(project_filename) The_Project.folder_self = os.getcwd() # or start new project else: # new design data design_init = { 'VARIABLES' : [] , 'OBJECTIVES' : {} , 'GRADIENTS' : {} } libSU2.save_data(design_filename,design_init,append=False) # start project The_Project = Project( config_name = config_filename , design_name = design_filename ) #: if load/start project # make sure to start with waiting if not hot_start: if server_name: os.system('scp -q %s:%s%s ./ ' % (server_name,exchange_folder,transfer_filename) ) Status_set = {'STATUS' : 'WAIT'} with libSU2.FileLock(transfer_filename,timeout=100): libSU2.save_data(transfer_filename,Status_set,append=True) if server_name: os.system('scp -q %s %s:%s' % (transfer_filename,server_name,exchange_folder) ) #: if not hot_start # start log sys.stdout.write( 'Start Server ... \n' ) sys.stdout.write( pretty_time() + '\n' ) sys.stdout.write(' \n') sys.stdout.flush() # ------------------------------------------------------------------- # Listen for Jobs # ------------------------------------------------------------------- # keep on keepin on keepon = True while keepon: # get design data if server_name: os.system('scp -q %s:%s%s ./ ' % (server_name,exchange_folder,transfer_filename) ) # load design data with libSU2.FileLock(transfer_filename,timeout=100): Transfer_Data = libSU2.load_data(transfer_filename) Status = str( Transfer_Data['STATUS'] ) # ------------------------------------------------------------------- # Run Case if Status == 'RUN': # log sys.stdout.write( 'Run Project ... \n' ) sys.stdout.write( pretty_time() + '\n' ) sys.stdout.write( ' \n' ) sys.stdout.flush() # setup config deltas config_delta = [] for DV_X in Transfer_Data['VARIABLES']: config_delta.append( {'VARIABLES':DV_X} ) # RUN PROJECT try: # evaluate project Transfer_Data,_,_ = The_Project.evaluate(config_delta) # save project libSU2.save_data(project_filename,The_Project) # save project data libSU2.save_data( design_filename, The_Project.design_current, append=False ) except (KeyboardInterrupt, SystemExit): raise except Exception,err: sys.stdout.write( 'RUN FAILED \n\n' ) print(traceback.format_exc()) sys.stdout.write( '\n' ) #: try SDS.py # finish up sys.stdout.write( pretty_time() + '\n\n' ) sys.stdout.flush() # save new transfer data Transfer_Data['STATUS'] = 'WAIT' with libSU2.FileLock(transfer_filename,timeout=100): libSU2.save_data(transfer_filename,Transfer_Data,append=False) # push design data if server_name: os.system('scp -q %s %s:%s' % (transfer_filename,server_name,exchange_folder) ) # ------------------------------------------------------------------- # Stop Case elif Status == 'STOP': sys.stdout.write( 'Caught Stop Signal \n') sys.stdout.write( pretty_time() + '\n') sys.stdout.write( ' \n') sys.stdout.flush() keepon = False # ------------------------------------------------------------------- # Sleep Case else: time.sleep(10) #: if use case #: while keepon sys.stdout.write( 'DONE \n\n' ) return
import dbus from saluttest import exec_test from file_transfer_helper import ReceiveFileTest, SendFileTest from servicetest import call_async import constants as cs class SendFileNoMetadata(SendFileTest): # this is basically the equivalent of calling CreateChannel # without these two properties service_name = '' metadata = {} class ReceiveFileNoMetadata(ReceiveFileTest): service_name = '' metadata = {} class SendFileBadProps(SendFileTest): metadata = {'FORM_TYPE': 'this shouldnt be allowed'} def request_ft_channel(self): request = { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER, cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT, cs.TARGET_HANDLE: self.handle, cs.FT_CONTENT_TYPE: self.file.content_type, cs.FT_FILENAME: self.file.name, cs.FT_SIZE: self.file.size, cs.FT_CONTENT_HASH_TYPE: self.file.hash_type, cs.FT_CONTENT_HASH: self.file.hash, cs.FT_DESCRIPTION: self.file.description, cs.FT_DATE: self.file.date, cs.FT_INITIAL_OFFSET: 0, cs.FT_SERVICE_NAME: self.service_name, cs.FT_METADATA: dbus.Dictionary(self.metadata, signature='sas')} call_async(self.q, self.conn.Requests, 'CreateChannel', request) # FORM_TYPE is not allowed, soz self.q.expect('dbus-error', method='CreateChannel', name=cs.INVALID_ARGUMENT) return True class SendFileBadContact(SendFileTest): def announce_contact(self): SendFileTest.announce_contact(self, metadata=False) def request_ft_channel(self): request = { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER, cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT, cs.TARGET_HANDLE: self.handle, cs.FT_CONTENT_TYPE: self.file.content_type, cs.FT_FILENAME: self.file.name, cs.FT_SIZE: self.file.size, cs.FT_CONTENT_HASH_TYPE: self.file.hash_type, cs.FT_CONTENT_HASH: self.file.hash, cs.FT_DESCRIPTION: self.file.description, cs.FT_DATE: self.file.date, cs.FT_INITIAL_OFFSET: 0, cs.FT_SERVICE_NAME: self.service_name, cs.FT_METADATA: dbus.Dictionary(self.metadata, signature='sas')} call_async(self.q, self.conn.Requests, 'CreateChannel', request) # no support for metadata, soz self.q.expect('dbus-error', method='CreateChannel', name=cs.NOT_CAPABLE) return True if __name__ == '__main__': test = SendFileNoMetadata() exec_test(test.test) test = ReceiveFileNoMetadata() exec_test(test.test) test = SendFileBadProps() exec_test(test.test) test = SendFileBadContact() exec_test(test.test)
import os import sys import re import locale import mimetypes import psutil import time import base64 from Crypto.Cipher import AES from Crypto import Random from nxdrive.logging_config import get_logger NUXEO_DRIVE_FOLDER_NAME = 'Nuxeo Drive' log = get_logger(__name__) WIN32_SUFFIX = os.path.join('library.zip', 'nxdrive') OSX_SUFFIX = "Contents/Resources/lib/python2.7/site-packages.zip/nxdrive" ENCODING = locale.getpreferredencoding() DEFAULT_ENCODING = 'utf-8' WIN32_PATCHED_MIME_TYPES = { 'image/pjpeg': 'image/jpeg', 'image/x-png': 'image/png', 'image/bmp': 'image/x-ms-bmp', 'audio/x-mpg': 'audio/mpeg', 'video/x-mpeg2a': 'video/mpeg', 'application/x-javascript': 'application/javascript', 'application/x-msexcel': 'application/vnd.ms-excel', 'application/x-mspowerpoint': 'application/vnd.ms-powerpoint', 'application/x-mspowerpoint.12': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', } DEVICE_DESCRIPTIONS = { 'linux2': 'Linux Desktop', 'darwin': 'Mac OSX Desktop', 'cygwin': 'Windows Desktop', 'win32': 'Windows Desktop', } TOKEN_PERMISSION = 'ReadWrite' def current_milli_time(): return int(round(time.time() * 1000)) def is_hexastring(value): for c in value: if c not in "0123456789ABCDEF": return False return True def is_office_temp_file(name): if name.startswith('~') and name.endswith(".tmp"): return True if len(name) == 8 and "." not in name: return is_hexastring(name) # for each car in "0123456789ABCDEF" elif name.endswith(".tmp") and name.startswith("ppt"): # ppt98D2.tmp return is_hexastring(name[3:7]) elif len(name) == 12 and name.endswith(".tmp"): # 813DEFA7.tmp file return is_hexastring(name[0:7]) return False def version_compare(x, y): """Compare version numbers using the usual x.y.z pattern. For instance, will result in: - 5.9.3 > 5.9.2 - 5.9.3 > 5.8 - 5.8 > 5.6.0 - 5.10 > 5.1.2 - 1.3.0524 > 1.3.0424 - 1.4 > 1.3.0524 - ... Also handles date-based releases, snapshots and hotfixes: - 5.9.4-I20140515_0120 > 5.9.4-I20140415_0120 - 5.9.4-I20140415_0120 > 5.9.3 - 5.9.4-I20140415_0120 < 5.9.4 - 5.9.4-I20140415_0120 < 5.9.5 - 5.9.4-SNAPSHOT > 5.9.3-SNAPSHOT - 5.9.4-SNAPSHOT > 5.9.3 - 5.9.4-SNAPSHOT < 5.9.4 - 5.9.4-SNAPSHOT < 5.9.5 - 5.9.4-I20140415_0120 > 5.9.3-SNAPSHOT - 5.9.4-I20140415_0120 < 5.9.5-SNAPSHOT - 5.9.4-I20140415_0120 = 5.9.4-SNAPSHOT (can't decide, consider as equal) - 5.8.0-HF15 > 5.8 - 5.8.0-HF15 > 5.7.1-SNAPSHOT - 5.8.0-HF15 < 5.9.1 - 5.8.0-HF15 > 5.8.0-HF14 - 5.8.0-HF15 > 5.6.0-HF35 - 5.8.0-HF15 < 5.10.0-HF01 - 5.8.0-HF15-SNAPSHOT > 5.8 - 5.8.0-HF15-SNAPSHOT > 5.8.0-HF14-SNAPSHOT - 5.8.0-HF15-SNAPSHOT > 5.8.0-HF14 - 5.8.0-HF15-SNAPSHOT < 5.8.0-HF15 - 5.8.0-HF15-SNAPSHOT < 5.8.0-HF16-SNAPSHOT """ x_numbers = x.split('.') y_numbers = y.split('.') while (x_numbers and y_numbers): x_number = x_numbers.pop(0) y_number = y_numbers.pop(0) # Handle hotfixes if 'HF' in x_number: hf = re.sub(ur'-HF', '.', x_number).split('.', 1) x_number = hf[0] x_numbers.append(hf[1]) if 'HF' in y_number: hf = re.sub(ur'-HF', '.', y_number).split('.', 1) y_number = hf[0] y_numbers.append(hf[1]) # Handle date-based and snapshots x_date_based = 'I' in x_number y_date_based = 'I' in y_number x_snapshot = 'SNAPSHOT' in x_number y_snapshot = 'SNAPSHOT' in y_number if (not x_date_based and not x_snapshot and (y_date_based or y_snapshot)): # y is date-based or snapshot, x is not x_number = int(x_number) y_number = int(re.sub(ur'-(I.*|SNAPSHOT)', '', y_number)) if y_number <= x_number: return 1 else: return -1 elif (not y_date_based and not y_snapshot and (x_date_based or x_snapshot)): # x is date-based or snapshot, y is not x_number = int(re.sub(ur'-(I.*|SNAPSHOT)', '', x_number)) y_number = int(y_number) if x_number <= y_number: return -1 else: return 1 else: if x_date_based and y_date_based: # x and y are date-based x_number = int(re.sub(ur'(I|-|_)', '', x_number)) y_number = int(re.sub(ur'(I|-|_)', '', y_number)) elif x_snapshot and y_snapshot: # x and y are snapshots x_number = int(re.sub(ur'-SNAPSHOT', '', x_number)) y_number = int(re.sub(ur'-SNAPSHOT', '', y_number)) elif x_date_based and y_snapshot: # x is date-based, y is snapshot x_number = int(re.sub(ur'-I.*', '', x_number)) y_number = int(re.sub(ur'-SNAPSHOT', '', y_number)) if x_number == y_number: return 0 elif x_snapshot and y_date_based: # x is snapshot, y is date-based x_number = int(re.sub(ur'-SNAPSHOT', '', x_number)) y_number = int(re.sub(ur'-I.*', '', y_number)) if x_number == y_number: return 0 else: # x and y are not date-based x_number = int(x_number) y_number = int(y_number) if x_number != y_number: diff = x_number - y_number if diff > 0: return 1 else: return -1 if x_numbers: return 1 if y_numbers: return -1 return 0 def normalized_path(path): """Return absolute, normalized file path.""" if isinstance(path, bytes): # Decode path with local encoding when not already decoded explicitly # by the caller path = path.decode(ENCODING) # XXX: we could os.path.normcase as well under Windows but it might be the # source of unexpected troubles so not doing it for now. return os.path.realpath((os.path.normpath(os.path.abspath(os.path.expanduser(path))))) def safe_long_path(path): """Utility to prefix path with the long path marker for Windows http://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath """ if sys.platform == 'win32': if isinstance(path, bytes): # Decode path with local encoding when not already decoded # explicitly by the caller path = unicode(path.decode(ENCODING)) path = u"\\\\?\\" + path return path def path_join(parent, child): if parent == '/': return '/' + child return parent + '/' + child def default_nuxeo_drive_folder(): # TODO: Factorize with manager.get_default_nuxeo_drive_folder """Find a reasonable location for the root Nuxeo Drive folder This folder is user specific, typically under the home folder. Under Windows, try to locate My Documents as a home folder, using the win32com shell API if allowed, else falling back on a manual detection. Note that we need to decode the path returned by os.path.expanduser with the local encoding because the value of the HOME environment variable is read as a byte string. Using os.path.expanduser(u'~') fails if the home path contains non ASCII characters since Unicode coercion attempts to decode the byte string as an ASCII string. """ if sys.platform == "win32": from win32com.shell import shell, shellcon try: my_documents = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, None, 0) except: # In some cases (not really sure how this happens) the current user # is not allowed to access its 'My Documents' folder path through # the win32com shell API, which raises the following error: # com_error: (-2147024891, 'Access is denied.', None, None) # We noticed that in this case the 'Location' tab is missing in the # Properties window of 'My Documents' accessed through the # Explorer. # So let's fall back on a manual (and poor) detection. # WARNING: it's important to check 'Documents' first as under # Windows 7 there also exists a 'My Documents' folder invisible in # the Explorer and cmd / powershell but visible from Python. # First try regular location for documents under Windows 7 and up log.debug("Access denied to win32com shell API: SHGetFolderPath," " falling back on manual detection of My Documents") my_documents = os.path.expanduser(r'~\Documents') my_documents = unicode(my_documents.decode(ENCODING)) if not os.path.exists(my_documents): # Compatibility for Windows XP my_documents = os.path.expanduser(r'~\My Documents') my_documents = unicode(my_documents.decode(ENCODING)) if os.path.exists(my_documents): nuxeo_drive_folder = os.path.join(my_documents, NUXEO_DRIVE_FOLDER_NAME) log.debug("Will use '%s' as default Nuxeo Drive folder location under Windows", nuxeo_drive_folder) return nuxeo_drive_folder # Fall back on home folder otherwise user_home = os.path.expanduser('~') user_home = unicode(user_home.decode(ENCODING)) nuxeo_drive_folder = os.path.join(user_home, NUXEO_DRIVE_FOLDER_NAME) log.debug("Will use '%s' as default Nuxeo Drive folder location", nuxeo_drive_folder) return nuxeo_drive_folder def find_resource_dir(directory, default_path): """Find the FS path of a directory in various OS binary packages""" import nxdrive nxdrive_path = os.path.dirname(nxdrive.__file__) app_resources = '/Contents/Resources/' cxfreeze_suffix = os.path.join('library.zip', 'nxdrive') dir_path = default_path if app_resources in nxdrive_path: # OSX frozen distribution, bundled as an app dir_path = re.sub(app_resources + ".*", app_resources + directory, nxdrive_path) elif nxdrive_path.endswith(cxfreeze_suffix): # cx_Freeze frozen distribution of nxdrive, data is out of the zip dir_path = nxdrive_path.replace(cxfreeze_suffix, directory) if not os.path.exists(dir_path): log.warning("Could not find the resource directory at: %s", dir_path) return None return dir_path def force_decode(string, codecs=['utf-8', 'cp1252']): if isinstance(string, unicode): string = string.encode('utf-8') for codec in codecs: try: return string.decode(codec) except: pass log.debug("Cannot decode string '%s' with any of the given codecs: %r", string, codecs) return None def encrypt(plaintext, secret, lazy=True): """Symetric encryption using AES""" secret = _lazysecret(secret) if lazy else secret iv = Random.new().read(AES.block_size) encobj = AES.new(secret, AES.MODE_CFB, iv) return base64.b64encode(iv + encobj.encrypt(plaintext)) def decrypt(ciphertext, secret, lazy=True): """Symetric decryption using AES""" secret = _lazysecret(secret) if lazy else secret ciphertext = base64.b64decode(ciphertext) iv = ciphertext[:AES.block_size] ciphertext = ciphertext[AES.block_size:] # Dont fail on decrypt try: encobj = AES.new(secret, AES.MODE_CFB, iv) return encobj.decrypt(ciphertext) except: return None def _lazysecret(secret, blocksize=32, padding='}'): """Pad secret if not legal AES block size (16, 24, 32)""" if len(secret) > blocksize: return secret[:-(len(secret) - blocksize)] if not len(secret) in (16, 24, 32): return secret + (blocksize - len(secret)) * padding return secret def guess_mime_type(filename): mime_type, _ = mimetypes.guess_type(filename) if mime_type: if sys.platform == 'win32': # Patch bad Windows MIME types # See https://jira.nuxeo.com/browse/NXP-11660 # and http://bugs.python.org/issue15207 mime_type = _patch_win32_mime_type(mime_type) log.trace("Guessed mime type '%s' for '%s'", mime_type, filename) return mime_type else: log.trace("Could not guess mime type for '%s', returing 'application/octet-stream'", filename) return "application/octet-stream" def guess_digest_algorithm(digest): # For now only md5 and sha1 are supported if digest is None or len(digest) == 32: return 'md5' elif len(digest) == 40: return 'sha1' else: raise Exception('Unknown digest algorithm for %s' % digest) def _patch_win32_mime_type(mime_type): patched_mime_type = WIN32_PATCHED_MIME_TYPES.get(mime_type) return patched_mime_type if patched_mime_type else mime_type def deprecated(func): """"This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.""" def new_func(*args, **kwargs): log.warning("Call to deprecated function {}.".format(func.__name__)) return func(*args, **kwargs) new_func.__name__ = func.__name__ new_func.__doc__ = func.__doc__ new_func.__dict__.update(func.__dict__) return new_func class ServerLoader(object): def __init__(self, remote_client, local_client): self._remote_client = remote_client self._local_client = local_client def sync(self, remote_uid, local): childs = self._local_client.get_children_info(local) rchilds = self._remote_client.get_children_info(remote_uid) existing_childs = dict() for child in rchilds: path = os.path.join(local, child.name) existing_childs[path] = child for child in childs: child_uid = None if child.path not in existing_childs: if child.folderish: print "Making folder: %s" % child.path child_uid = self._remote_client.make_folder(remote_uid, child.name) else: print "Making file: %s" % child.path self._remote_client.stream_file(remote_uid, self._local_client._abspath(child.path)) else: child_uid = existing_childs[child.path].uid if child.folderish: self.sync(child_uid, child.path) class PidLockFile(object): """ This class handle the pid lock file""" def __init__(self, folder, key): self.folder = folder self.key = key self.locked = False def _get_sync_pid_filepath(self, process_name=None): if process_name is None: process_name = self.key return os.path.join(self.folder, 'nxdrive_%s.pid' % process_name) def unlock(self): if not self.locked: return # Clean pid file pid_filepath = self._get_sync_pid_filepath() try: os.unlink(pid_filepath) except Exception, e: log.warning("Failed to remove stalled pid file: %s" " for stopped process %d: %r", pid_filepath, os.getpid(), e) def check_running(self, process_name=None): """Check whether another sync process is already runnning If nxdrive.pid file already exists and the pid points to a running nxdrive program then return the pid. Return None otherwise. """ if process_name is None: process_name = self.key pid_filepath = self._get_sync_pid_filepath(process_name=process_name) if os.path.exists(pid_filepath): pid = None with open(safe_long_path(pid_filepath), 'rb') as f: pid = os.getpid() try: pid = int(f.read().strip()) p = psutil.Process(pid) # If process has been created after the lock file # Changed from getctime() to getmtime() because of Windows' 'file system tunneling' if p.create_time() > os.path.getmtime(pid_filepath): raise ValueError return pid except (ValueError, psutil.NoSuchProcess): pass # This is a pid file that is empty or pointing to either a # stopped process or a non-nxdrive process: let's delete it if # possible try: os.unlink(pid_filepath) if pid is None: msg = "Removed old empty pid file: %s" % pid_filepath else: msg = ("Removed old pid file: %s for stopped process" " %d" % (pid_filepath, pid)) log.info(msg) except Exception, e: if pid is None: msg = ("Failed to remove empty stalled pid file: %s:" " %r" % (pid_filepath, e)) else: msg = ("Failed to remove stalled pid file: %s for" " stopped process %d: %r" % (pid_filepath, pid, e)) return pid log.warning(msg) self.locked = True return None def lock(self): pid = self.check_running(process_name=self.key) if pid is not None: log.warning("%s process with pid %d already running.", self.key, pid) return pid # Write the pid of this process pid_filepath = self._get_sync_pid_filepath(process_name=self.key) pid = os.getpid() with open(safe_long_path(pid_filepath), 'wb') as f: f.write(str(pid)) return None
from base64 import urlsafe_b64decode from datetime import datetime from email.utils import mktime_tz, parsedate_tz from gzip import open as gzip_open from pytz import timezone from os import getcwd from os.path import isabs as path_isabs from os.path import expanduser, realpath, join as path_join from logging import getLogger from googleapiclient.errors import HttpError import re date_expr = re.compile(r'^Date: (.+?)$', re.DOTALL | re.MULTILINE) logger = getLogger(__name__) TIMEZONE = 'Asia/Seoul' def fetch_structure(service, email, label_id, latest_mid): """ Fetch message_id, thread_id of message box. :param service: :param email: :param label_id: :param latest_mid: :return: list of tuples: (message_id, thread_id) """ page_token = '' first_loop = True output = [] logger.info( 'fetch_structure started. email: %s, label_id: %s, latest_mid: %d (0x%x)' % ( email, label_id, latest_mid, latest_mid ) ) while page_token or first_loop: first_loop = False # Expected keys # messages[] # nextPageToken # resultSizeEstimate response = service.users().messages().list( userId=email, labelIds=label_id, includeSpamTrash=False, pageToken=page_token ).execute() messages = response['messages'] if 'messages' in response else [] page_token = response['nextPageToken'] if 'nextPageToken' in response else '' for message in messages: message_id = int(message['id'], 16) thread_id = int(message['threadId'], 16) logger.debug('message id: %s, thread id: %s' % (message['id'], message['threadId'])) if message_id <= latest_mid: logger.debug('latest_mid reached.') page_token = '' break output.append((message_id, thread_id)) logger.info('fetch_structure completed. Total %s items' % len(output)) return output def get_default_timezone(): return timezone(TIMEZONE) def fetch_mail(service, email, message_id): """ response has below keys: id threadId labelIds[] snippet historyId internalDate sizeEstimate raw :param service: :param email: :param message_id: :return: """ try: response = service.users().messages().get(id='%x' % message_id, userId=email, format='raw').execute() logger.debug('fetch_mail: %s, mid %d (0x%x)' % (email, message_id, message_id)) except HttpError: logger.error('Email address \'%s\', message id: %d (0x%x) not found.' % (email, message_id, message_id)) response = None return response def extract_diary_dates(service, email, structure): logger.info( 'extract_diary_dates started. email: %s, structure: %d item(s).' % (email, len(structure)) ) # Please be patient! # It may take minutes because every alarm mail in the structure is going to be fetched # to extract its date field within. output = {} for message_id, thread_id in structure: if message_id != thread_id: continue # you have to fetch every single message to get date header field. message = fetch_mail(service, email, message_id) raw_message = urlsafe_b64decode(message['raw']).decode('ascii') date = None searched = date_expr.search(raw_message) if searched: date_text = searched.group(1) date = parsedate_tz(date_text) timestamp = mktime_tz(date) date = datetime.fromtimestamp(timestamp, get_default_timezone()).date() assert date is not None logger.debug('Message id %x, diary date %s extracted.' % (message_id, date)) output[message_id] = date logger.info('extract_diary_dates completed. %d date(s).' % len(output)) return output def fetch_and_archive(service, email, archive_path, mid_list): logger.info( 'fetch_and_archive started. email: %s, archive_path: %s, mid_list: %d message(s)' % (email, archive_path, len(mid_list)) ) if path_isabs(archive_path): output_dir = realpath(archive_path) else: output_dir = realpath(expanduser(path_join(getcwd(), archive_path))) count = 0 error = 0 for mid in mid_list: file_name = path_join(output_dir, ('%x.gz' % mid)) message = fetch_mail(service, email, mid) if not message: error += 1 continue with gzip_open(file_name, 'wb') as f: f.write(urlsafe_b64decode(message['raw'])) logger.debug('Message id %x gzipped to %s.' % (mid, file_name)) count += 1 logger.info('fetch_and_archive completed. Total %d item(s) saved. Error %d item(s).' % (count, error)) def get_archive(mid, archive_path): if path_isabs(archive_path): archive_dir = realpath(archive_path) else: archive_dir = realpath(expanduser(path_join(getcwd(), archive_path))) path = path_join(archive_dir, '%x.gz' % mid) with gzip_open(path, 'rb') as f: mime = f.read() logger.debug('Archive \'%s\' extracted successfully. %d bytes' % (path, len(mime))) return mime
"""""" from __future__ import annotations import logging from pprint import pformat from flask import current_app from flask.cli import AppGroup logging.basicConfig() logger = logging.getLogger("") config_commands = AppGroup("config") @config_commands.command() def show(only_path=False): """Show the current config.""" logger.setLevel(logging.INFO) infos = ["\n", f'Instance path: "{current_app.instance_path}"'] logger.info("\n ".join(infos)) if not only_path: log_config(current_app.config) def log_config(config): original_level = logger.level logger.setLevel(logging.INFO) try: return _log_config(config) finally: logger.setLevel(original_level) def _log_config(config): lines = ["Application configuration:"] for k, v in sorted(config.items()): prefix = " " indent = len(k) + 3 width = 80 - indent v = pformat(v, width=width).replace("\n", f"\n{' ' * indent}") lines.append(f"{prefix}{k}: {v}") logger.info("\n".join(lines))
from pycopia.aid import Enum import pycopia.SMI.Basetypes Range = pycopia.SMI.Basetypes.Range Ranges = pycopia.SMI.Basetypes.Ranges from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject from SNMPv2_SMI import MODULE_IDENTITY, NOTIFICATION_TYPE, OBJECT_TYPE, Integer32, Counter32 from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP from CISCO_SMI import ciscoMgmt from SNMPv2_TC import TEXTUAL_CONVENTION, DisplayString, TimeStamp, TruthValue class CISCO_SYSLOG_MIB(ModuleObject): path = '/usr/share/snmp/mibs/site/CISCO-SYSLOG-MIB' conformance = 3 name = 'CISCO-SYSLOG-MIB' language = 2 description = 'The MIB module to describe and store the system messages\ngenerated by the IOS.' class ciscoSyslogMIB(NodeObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41]) name = 'ciscoSyslogMIB' class ciscoSyslogMIBObjects(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1]) name = 'ciscoSyslogMIBObjects' class clogBasic(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1]) name = 'clogBasic' class clogHistory(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2]) name = 'clogHistory' class ciscoSyslogMIBNotificationPrefix(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 2]) name = 'ciscoSyslogMIBNotificationPrefix' class ciscoSyslogMIBNotifications(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 2, 0]) name = 'ciscoSyslogMIBNotifications' class ciscoSyslogMIBConformance(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 3]) name = 'ciscoSyslogMIBConformance' class ciscoSyslogMIBCompliances(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 3, 1]) name = 'ciscoSyslogMIBCompliances' class ciscoSyslogMIBGroups(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 3, 2]) name = 'ciscoSyslogMIBGroups' class SyslogSeverity(pycopia.SMI.Basetypes.Enumeration): status = 1 enumerations = [Enum(1, 'emergency'), Enum(2, 'alert'), Enum(3, 'critical'), Enum(4, 'error'), Enum(5, 'warning'), Enum(6, 'notice'), Enum(7, 'info'), Enum(8, 'debug')] class clogNotificationsSent(ScalarObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1, 1]) syntaxobject = pycopia.SMI.Basetypes.Counter32 access = 4 units = 'notifications' class clogNotificationsEnabled(ScalarObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1, 2]) syntaxobject = pycopia.SMI.Basetypes.TruthValue class clogMaxSeverity(ScalarObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1, 3]) syntaxobject = SyslogSeverity class clogMsgIgnores(ScalarObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1, 4]) syntaxobject = pycopia.SMI.Basetypes.Counter32 access = 4 units = 'messages' class clogMsgDrops(ScalarObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 1, 5]) syntaxobject = pycopia.SMI.Basetypes.Counter32 access = 4 units = 'messages' class clogHistTableMaxLength(ScalarObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 1]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'entries' class clogHistMsgsFlushed(ScalarObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 2]) syntaxobject = pycopia.SMI.Basetypes.Counter32 access = 4 units = 'messages' class clogHistIndex(ColumnObject): access = 2 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 1]) syntaxobject = pycopia.SMI.Basetypes.Integer32 class clogHistFacility(ColumnObject): access = 4 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 2]) syntaxobject = pycopia.SMI.Basetypes.DisplayString class clogHistSeverity(ColumnObject): access = 4 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 3]) syntaxobject = SyslogSeverity class clogHistMsgName(ColumnObject): access = 4 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 4]) syntaxobject = pycopia.SMI.Basetypes.DisplayString class clogHistMsgText(ColumnObject): access = 4 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 5]) syntaxobject = pycopia.SMI.Basetypes.DisplayString class clogHistTimestamp(ColumnObject): access = 4 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1, 6]) syntaxobject = pycopia.SMI.Basetypes.TimeStamp class clogHistoryEntry(RowObject): status = 1 index = pycopia.SMI.Objects.IndexObjects([clogHistIndex], False) OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 1, 2, 3, 1]) access = 2 columns = {'clogHistIndex': clogHistIndex, 'clogHistFacility': clogHistFacility, 'clogHistSeverity': clogHistSeverity, 'clogHistMsgName': clogHistMsgName, 'clogHistMsgText': clogHistMsgText, 'clogHistTimestamp': clogHistTimestamp} class clogMessageGenerated(NotificationObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 2, 0, 1]) class ciscoSyslogMIBGroup(GroupObject): access = 2 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 41, 3, 2, 1]) group = [clogNotificationsSent, clogNotificationsEnabled, clogMaxSeverity, clogMsgIgnores, clogMsgDrops, clogHistTableMaxLength, clogHistMsgsFlushed, clogHistFacility, clogHistSeverity, clogHistMsgName, clogHistMsgText, clogHistTimestamp] from pycopia import SMI SMI.update_oidmap(__name__)
import datetime as dt import struct import os import logging from hyo2.soundspeed.formats.readers.abstract import AbstractTextReader from hyo2.soundspeed.profile.dicts import Dicts from hyo2.soundspeed.base.callbacks.cli_callbacks import CliCallbacks logger = logging.getLogger(__name__) class Mvp(AbstractTextReader): # TODO: ATYPICAL READER!!! """MVP reader""" protocols = { "NAVO_ISS60": 0, "UNDEFINED": 1 } formats = { "ASVP": 0, "CALC": 1, "S12": 2, "M1": 3, "S05": 4, "S52": 5, "S10": 6, } def __init__(self): # super(Mvp, self).__init__() self.desc = "MVP" self._ext.add('s12') self._ext.add('calc') self._ext.add('asvp') self._ext.add('m1') self._ext.add('s05') self._ext.add('s52') self._ext.add('s10') # for listener self.file_content = None self.header = None self.footer = None self.protocol = None self.format = None def init_from_listener(self, header, data_blocks, footer, protocol, fmt): self.init_data() # create a new empty profile list self.ssp.append() # append a new profile # initialize probe/sensor type self.ssp.cur.meta.sensor_type = Dicts.sensor_types["MVP"] self.ssp.cur.meta.probe_type = Dicts.probe_types["MVP"] self.file_content = data_blocks self.header = header self.footer = footer self.protocol = protocol self.format = fmt logger.info("reading ...") logger.info("data blocks: %s" % len(self.file_content)) self.total_data = str() self._unify_packets() self.lines = self.total_data.splitlines() try: # log.info("got data:\n%s" % self.total_data) self._parse_header() self._parse_body() except RuntimeError as e: logger.error("error in data parsing, did you select the correct data format?") raise e self.ssp.cur.clone_data_to_proc() self.ssp.cur.init_sis() def read(self, data_path, settings, callbacks=CliCallbacks(), progress=None): # UNUSED logger.debug('*** %s ***: start' % self.driver) self.s = settings self.cb = callbacks self.init_data() # create a new empty profile list self.ssp.append() # append a new profile _, file_ext = os.path.splitext(data_path) file_ext = file_ext.lower() if file_ext == ".asvp": self.format = self.formats["ASVP"] elif file_ext == ".calc": self.format = self.formats["CALC"] elif file_ext == ".s12": self.format = self.formats["S12"] elif file_ext == ".m1": self.format = self.formats["M1"] elif file_ext == ".s05": self.format = self.formats["S05"] elif file_ext == ".s52": self.format = self.formats["S52"] elif file_ext == ".s10": self.format = self.formats["S10"] else: raise RuntimeError("unknown format: %s" % self.format) # initialize probe/sensor type if self.format in [self.formats["CALC"], self.formats["M1"], self.formats["S10"]]: self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] else: self.ssp.cur.meta.sensor_type = Dicts.sensor_types["MVP"] self.ssp.cur.meta.probe_type = Dicts.probe_types["MVP"] self.ssp.cur.meta.original_path = data_path self._read(data_path=data_path) self._parse_header() self._parse_body() self.fix() self.finalize() logger.debug('*** %s ***: done' % self.driver) return True def _parse_header(self): logger.info("reading > header") if self.format == self.formats["ASVP"]: logger.info("parsing header [ASVP]") self._parse_asvp_header() elif self.format == self.formats["CALC"]: logger.info("parsing header [CALC]") self._parse_calc_header() elif self.format == self.formats["S12"]: logger.info("parsing header [S12]") self._parse_s12_header() elif self.format == self.formats["M1"]: logger.info("parsing header [M1]") self._parse_m1_header() elif self.format == self.formats["S05"]: logger.info("parsing header [S05]") self._parse_s05_header() elif self.format == self.formats["S52"]: logger.info("parsing header [S52]") self._parse_s05_header() elif self.format == self.formats["S10"]: logger.info("parsing header [S10]") self._parse_s10_header() else: raise RuntimeError("unknown format: %s" % self.format) def _parse_body(self): logger.info("reading > body") # this assume that the user configured the correct format. if self.format == self.formats["ASVP"]: logger.info("parsing body [ASVP]") self._parse_asvp_body() elif self.format == self.formats["CALC"]: logger.info("parsing body [CALC]") self._parse_calc_body() elif self.format == self.formats["S12"]: logger.info("parsing body [S12]") self._parse_s12_body() elif self.format == self.formats["M1"]: logger.info("parsing body [M1]") self._parse_m1_body() elif self.format == self.formats["S05"]: logger.info("parsing body [S05]") self._parse_s05_body() elif self.format == self.formats["S52"]: logger.info("parsing body [S52]") self._parse_s05_body() elif self.format == self.formats["S10"]: logger.info("parsing body [S10]") self._parse_s10_body() else: raise RuntimeError("unknown format: %s" % self.format) logger.info("read %s samples" % self.ssp.cur.data.num_samples) def _unify_packets(self): """unify all the received blocks""" for block_count in range(len(self.file_content)): logger.info("%s block has length %.1f KB" % (block_count, len(self.file_content[block_count]) / 1024)) if self.protocol == self.protocols["NAVO_ISS60"]: block_header = struct.unpack('4s4sIIII20000s4s4s', self.file_content[block_count]) packet_number = block_header[2] total_num_packets = block_header[3] num_bytes = block_header[4] total_num_bytes = block_header[5] packet_data = self.file_content[block_count][24:24 + num_bytes] self.total_data += packet_data.decode() logger.info("packet %s/%s [%.1f KB]" % (packet_number + 1, total_num_packets, total_num_bytes / 1024)) elif self.protocol == self.protocols["UNDEFINED"]: self.total_data += self.file_content[block_count].decode() else: raise RuntimeError("unknown protocol %s" % self.protocol) def _parse_asvp_header(self): try: head_line = self.lines[0] fields = head_line.split() except (ValueError, IndexError): raise RuntimeError("unable to parse header") try: timestamp = fields[4] year = int(timestamp[0:4]) month = int(timestamp[4:6]) day = int(timestamp[6:8]) hour = int(timestamp[8:10]) minute = int(timestamp[10:12]) second = 0 if (year is not None) and (hour is not None): self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, second) logger.info("date/time: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError): raise RuntimeError("unable to parse date/time: %s" % fields[4]) try: self.ssp.cur.init_data(int(fields[12])) logger.info("number of samples: %s" % self.ssp.cur.data.num_samples) except (ValueError, IndexError): raise RuntimeError("unable to parse the number of samples") try: self.ssp.cur.meta.latitude = float(fields[5]) logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError): raise RuntimeError("unable to parse the latitude") try: self.ssp.cur.meta.longitude = float(fields[6]) logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError): raise RuntimeError("unable to parse the longitude") self.samples_offset = len(head_line) logger.info("samples offset: %s" % self.samples_offset) def _parse_asvp_body(self): count = 0 for line in self.total_data[self.samples_offset:len(self.total_data)].splitlines(): try: self.ssp.cur.data.depth[count], self.ssp.cur.data.speed[count] = line.split() except ValueError: if not line: logger.info("skipping empty line (count %s)" % count) else: logger.error("skipping line: %s" % line) continue count += 1 self.ssp.cur.data_resize(count) def _parse_calc_header(self): try: # Date [dd/mm/yyyy]: 09/04/2013 date_field = self.total_data.splitlines()[-1].split()[-1] day = int(date_field.split("/")[0]) month = int(date_field.split("/")[1]) year = int(date_field.split("/")[2]) logger.info("date: %s %s %s" % (year, month, day)) except (ValueError, IndexError): raise RuntimeError("unable to parse the date") try: # Time [hh:mm:ss.ss]: 13:24:09.39 time_field = self.total_data.splitlines()[-2].split()[-1] hour = int(time_field.split(":")[0]) minute = int(time_field.split(":")[1]) second = float(time_field.split(":")[2]) logger.info("time: %s %s %s" % (hour, minute, second)) except (ValueError, IndexError): raise RuntimeError("unable to parse the time") try: if (year is not None) and (hour is not None): # second truncation applied self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, int(second)) logger.info("datetime: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to datetime: %s" % e) try: # LON (dddmm.mmmmmmm,E): 05557.4253510,W lon_field = self.total_data.splitlines()[-3].split()[-1].split(",")[0] lon_deg = int(lon_field[0:3]) lon_min = float(lon_field[3:-1]) lon_hemi = self.total_data.splitlines()[-3].split()[-1].split(",")[-1] self.ssp.cur.meta.longitude = lon_deg + lon_min / 60.0 if lon_hemi == "W" or lon_hemi == "w": self.ssp.cur.meta.longitude *= -1 logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to longitude: %s" % e) try: # LAT ( ddmm.mmmmmmm,N): 4249.4583290,N lat_field = self.total_data.splitlines()[-4].split()[-1].split(",")[0] lat_deg = int(lat_field[0:2]) lat_min = float(lat_field[2:-1]) lat_hemi = self.total_data.splitlines()[-4].split()[-1].split(",")[-1] self.ssp.cur.meta.latitude = lat_deg + lat_min / 60.0 if lat_hemi == "S" or lat_hemi == "s": self.ssp.cur.meta.latitude *= -1 logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to longitude: %s" % e) self.ssp.cur.init_data(len(self.total_data.splitlines())) def _parse_calc_body(self): count = 0 for line in self.total_data.splitlines()[5:-9]: fields = line.split() if len(fields) != 3: logger.info("skipping %s row" % count) continue try: self.ssp.cur.data.depth[count] = float(fields[0]) self.ssp.cur.data.speed[count] = float(fields[1]) self.ssp.cur.data.temp[count] = float(fields[2]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping %s row: %s" % (count, e)) continue count += 1 self.ssp.cur.data_resize(count) def _parse_m1_header(self): lines = self.total_data.splitlines() date_token = "Date (dd/mm/yyyy):" time_token = "Time (hh|mm|ss.s):" lat_token = "LAT ( ddmm.mmmmmmm,N):" long_token = "LON (dddmm.mmmmmmm,E):" data_token = "<END_OF_HEADER> " day = None month = None year = None hour = None minute = None second = None for idx, line in enumerate(lines): if line[:len(date_token)] == date_token: # logger.debug("date string: %s" % line) try: # Date [dd/mm/yyyy]: 09/04/2013 date_field = line.split(":")[-1].strip() day = int(date_field.split("/")[0]) month = int(date_field.split("/")[1]) year = int(date_field.split("/")[2]) logger.info("date: %s %s %s" % (year, month, day)) except (ValueError, IndexError) as e: raise RuntimeError("unable to parse the date: %s" % e) elif line[:len(time_token)] == time_token: # logger.debug("time string: %s" % line) try: # Time [hh:mm:ss.ss]: 13:24:09.39 time_field = line.split()[-1].strip() hour = int(time_field.split(":")[0]) minute = int(time_field.split(":")[1]) second = float(time_field.split(":")[2]) logger.info("time: %s %s %s" % (hour, minute, second)) except (ValueError, IndexError): raise RuntimeError("unable to parse the time") elif line[:len(long_token)] == long_token: # logger.debug("longitude string: %s" % line) try: # LON (dddmm.mmmmmmm,E): 05557.4253510,W lon_field = line.split()[-1].split(",")[0].strip() lon_deg = int(lon_field[0:3]) lon_min = float(lon_field[3:-1]) lon_hemi = line.split()[-1].split(",")[-1].strip() self.ssp.cur.meta.longitude = lon_deg + lon_min / 60.0 if lon_hemi == "W" or lon_hemi == "w": self.ssp.cur.meta.longitude *= -1 logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to longitude: %s" % e) elif line[:len(lat_token)] == lat_token: # logger.debug("latitude string: %s" % line) try: # LAT ( ddmm.mmmmmmm,N): 4249.4583290,N lat_field = line.split()[-1].split(",")[0].strip() lat_deg = int(lat_field[0:2]) lat_min = float(lat_field[2:-1]) lat_hemi = line.split()[-1].split(",")[-1].strip() self.ssp.cur.meta.latitude = lat_deg + lat_min / 60.0 if lat_hemi == "S" or lat_hemi == "s": self.ssp.cur.meta.latitude *= -1 logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to longitude: %s" % e) elif line[:len(data_token)] == data_token: self.samples_offset = idx + 17 logger.debug("data starts at row: %s" % self.samples_offset) break try: if (year is not None) and (hour is not None): # second truncation applied self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, int(second)) logger.info("datetime: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to convert to datetime: %s" % e) self.ssp.cur.init_data(len(lines) - self.samples_offset) def _parse_m1_body(self): lines = self.total_data.splitlines() count = 0 for idx, line in enumerate(lines): if idx < self.samples_offset: continue if idx == self.samples_offset: logger.debug("first data row: %s" % line) fields = line.split(",") if len(fields) == 3: try: self.ssp.cur.data.depth[count] = float(fields[1].strip()) self.ssp.cur.data.speed[count] = float(fields[2].strip()) except (ValueError, IndexError, TypeError) as e: logger.error("skipping %s row: %s" % (idx, e)) continue count += 1 elif len(fields) >= 6: try: self.ssp.cur.data.depth[count] = float(fields[1].strip()) self.ssp.cur.data.speed[count] = float(fields[2].strip()) self.ssp.cur.data.temp[count] = float(fields[3].strip()) self.ssp.cur.data.sal[count] = float(fields[5].strip()) except (ValueError, IndexError, TypeError) as e: logger.error("skipping %s row: %s" % (idx, e)) continue count += 1 else: logger.info("skipping %s row" % idx) continue self.ssp.cur.data_resize(count) def _parse_s12_header(self): try: # $MVS12,00002,0095,132409,09,04,2013,6.75,1514.76,18.795,31.9262, header_fields = self.total_data.splitlines()[0].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: year = int(header_fields[6]) month = int(header_fields[5]) day = int(header_fields[4]) hour = int(header_fields[3][0:2]) minute = int(header_fields[3][2:4]) second = int(header_fields[3][4:6]) if (year is not None) and (hour is not None): self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, second) logger.info("date/time: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: # 4249.46,N,05557.43,W,0.0,AML_uSVPT*15\ footer_fields = self.total_data.splitlines()[-1].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse footer_fields: %s" % e) try: lat_field = footer_fields[0] lat_deg = int(lat_field[0:2]) lat_min = float(lat_field[2:]) lat_hemi = footer_fields[1] logger.debug("lat: %s %s %s" % (lat_deg, lat_min, lat_hemi)) self.ssp.cur.meta.latitude = lat_deg + lat_min / 60.0 if lat_hemi == "S" or lat_hemi == "s": self.ssp.cur.meta.latitude *= -1 logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse latitude: %s" % e) try: lon_field = footer_fields[2] lon_deg = int(lon_field[0:3]) lon_min = float(lon_field[3:]) lon_hemi = footer_fields[3] logger.debug("lon: %s %s %s" % (lon_deg, lon_min, lon_hemi)) self.ssp.cur.meta.longitude = lon_deg + lon_min / 60.0 if lon_hemi == "W" or lon_hemi == "w": self.ssp.cur.meta.longitude *= -1 logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse longitude: %s" % e) try: self.ssp.cur.init_data(len(self.total_data.splitlines())) logger.info("number of samples: %s" % self.ssp.cur.data.num_samples) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse the number of samples: %s" % e) try: sensor_field = footer_fields[5].split("*")[0] if sensor_field == "AML_SmartSVP": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] elif sensor_field == "AML_uSVPT": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] elif sensor_field == "AML_uCTD": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["CTD"] else: logger.warning("unknown sensor name: %s" % sensor_field) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse the number of samples: %s" % e) count = 0 try: self.ssp.cur.data.depth[count] = float(header_fields[-5]) self.ssp.cur.data.speed[count] = float(header_fields[-4]) self.ssp.cur.data.temp[count] = float(header_fields[-3]) self.ssp.cur.data.sal[count] = float(header_fields[-2]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping first line: %s" % e) def _parse_s12_body(self): count = 1 # given the first data are on the header for line in self.total_data.splitlines()[1:-1]: try: fields = line.split(",") self.ssp.cur.data.depth[count] = float(fields[-5]) self.ssp.cur.data.speed[count] = float(fields[-4]) self.ssp.cur.data.temp[count] = float(fields[-3]) self.ssp.cur.data.sal[count] = float(fields[-2]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping line %s: %s" % (count, e)) continue count += 1 self.ssp.cur.data_resize(count) def _parse_s05_header(self): try: # $MVS12,00002,0095,132409,09,04,2013,6.75,1514.76,18.795,31.9262, header_fields = self.total_data.splitlines()[0].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: year = int(header_fields[6]) month = int(header_fields[5]) day = int(header_fields[4]) hour = int(header_fields[3][0:2]) minute = int(header_fields[3][2:4]) second = int(header_fields[3][4:6]) if (year is not None) and (hour is not None): self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, second) logger.info("date/time: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: # 4249.46,N,05557.43,W,0.0,AML_uSVPT*15\ footer_fields = self.total_data.splitlines()[-1].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse footer_fields: %s" % e) try: lat_field = footer_fields[0] lat_deg = int(lat_field[0:2]) lat_min = float(lat_field[2:]) lat_hemi = footer_fields[1] logger.debug("lat: %s %s %s" % (lat_deg, lat_min, lat_hemi)) self.ssp.cur.meta.latitude = lat_deg + lat_min / 60.0 if lat_hemi == "S" or lat_hemi == "s": self.ssp.cur.meta.latitude *= -1 logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse latitude: %s" % e) try: lon_field = footer_fields[2] lon_deg = int(lon_field[0:3]) lon_min = float(lon_field[3:]) lon_hemi = footer_fields[3] logger.debug("lon: %s %s %s" % (lon_deg, lon_min, lon_hemi)) self.ssp.cur.meta.longitude = lon_deg + lon_min / 60.0 if lon_hemi == "W" or lon_hemi == "w": self.ssp.cur.meta.longitude *= -1 logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse longitude: %s" % e) try: self.ssp.cur.init_data(len(self.total_data.splitlines())) logger.info("number of samples: %s" % self.ssp.cur.data.num_samples) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse the number of samples: %s" % e) try: sensor_field = footer_fields[5].split("*")[0] if sensor_field == "AML_SmartSVP": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] elif sensor_field == "AML_uSVPT": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] elif sensor_field == "AML_uCTD": self.ssp.cur.meta.sensor_type = Dicts.sensor_types["CTD"] else: logger.warning("unknown sensor name: %s" % sensor_field) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse the number of samples: %s" % e) count = 0 try: self.ssp.cur.data.pressure[count] = float(header_fields[-5]) self.ssp.cur.data.temp[count] = float(header_fields[-3]) self.ssp.cur.data.conductivity[count] = float(header_fields[-2]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping first line: %s" % e) def _parse_s05_body(self): count = 1 # given the first data are on the header for line in self.total_data.splitlines()[1:-1]: try: fields = line.split(",") self.ssp.cur.data.pressure[count] = float(fields[-5]) self.ssp.cur.data.temp[count] = float(fields[-3]) self.ssp.cur.data.conductivity[count] = float(fields[-2]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping line %s: %s" % (count, e)) continue count += 1 self.ssp.cur.data_resize(count) def _parse_s10_header(self): try: # $MVS12,00002,0095,132409,09,04,2013,6.75,1514.76,18.795,31.9262, header_fields = self.total_data.splitlines()[0].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: year = int(header_fields[6]) month = int(header_fields[5]) day = int(header_fields[4]) hour = int(header_fields[3][0:2]) minute = int(header_fields[3][2:4]) second = int(header_fields[3][4:6]) if (year is not None) and (hour is not None): self.ssp.cur.meta.utc_time = dt.datetime(year, month, day, hour, minute, second) logger.info("date/time: %s" % self.ssp.cur.meta.utc_time) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse header fields: %s" % e) try: # 4249.46,N,05557.43,W,0.0,AML_uSVPT*15\ footer_fields = self.total_data.splitlines()[-1].split(",") except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse footer_fields: %s" % e) try: lat_field = footer_fields[0] lat_deg = int(lat_field[0:2]) lat_min = float(lat_field[2:]) lat_hemi = footer_fields[1] logger.debug("lat: %s %s %s" % (lat_deg, lat_min, lat_hemi)) self.ssp.cur.meta.latitude = lat_deg + lat_min / 60.0 if lat_hemi == "S" or lat_hemi == "s": self.ssp.cur.meta.latitude *= -1 logger.info("latitude: %s" % self.ssp.cur.meta.latitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse latitude: %s" % e) try: lon_field = footer_fields[2] lon_deg = int(lon_field[0:3]) lon_min = float(lon_field[3:]) lon_hemi = footer_fields[3] logger.debug("lon: %s %s %s" % (lon_deg, lon_min, lon_hemi)) self.ssp.cur.meta.longitude = lon_deg + lon_min / 60.0 if lon_hemi == "W" or lon_hemi == "w": self.ssp.cur.meta.longitude *= -1 logger.info("longitude: %s" % self.ssp.cur.meta.longitude) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse longitude: %s" % e) try: self.ssp.cur.init_data(len(self.total_data.splitlines())) logger.info("number of samples: %s" % self.ssp.cur.data.num_samples) except (ValueError, IndexError, TypeError) as e: raise RuntimeError("unable to parse the number of samples: %s" % e) # ALWAYS SVP # try: # sensor_field = footer_fields[5].split("*")[0] # if sensor_field == "AML_SmartSVP": # self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] # elif sensor_field == "AML_uSVPT": # self.ssp.cur.meta.sensor_type = Dicts.sensor_types["SVP"] # elif sensor_field == "AML_uCTD": # self.ssp.cur.meta.sensor_type = Dicts.sensor_types["CTD"] # else: # logger.warning("unknown sensor name: %s" % sensor_field) # # except (ValueError, IndexError, TypeError) as e: # raise RuntimeError("unable to parse the number of samples: %s" % e) count = 0 try: self.ssp.cur.data.depth[count] = float(header_fields[-5]) self.ssp.cur.data.speed[count] = float(header_fields[-4]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping first line: %s" % e) def _parse_s10_body(self): count = 1 # given the first data are on the header for line in self.total_data.splitlines()[1:-1]: try: fields = line.split(",") self.ssp.cur.data.depth[count] = float(fields[-5]) self.ssp.cur.data.speed[count] = float(fields[-4]) except (ValueError, IndexError, TypeError) as e: logger.error("skipping line %s: %s" % (count, e)) continue count += 1 self.ssp.cur.data_resize(count)
import smtplib def sendemail(to_addr_list, subject, message, smtpserver='smtp.gmail.com:587'): try: email_text = """\ From: %s To: %s Subject: %s %s """ % (from_addr, ", ".join(to_addr_list), subject, message) server = smtplib.SMTP(smtpserver) server.starttls() server.login(smtp_login, smtp_password) problems = server.sendmail(from_addr, to_addr_list, message) print(problems) server.quit() return True except Exception as e: return False chiavi = open("configurazione.txt", 'r') dati = chiavi.readline() _, _, from_addr, smtp_login, smtp_password, _, _, _, _ = dati.split("|", 8) email_file = open("maildump.csv", "r") email = email_file.readline() mail = email.split(";") for indirizzo in mail: print(indirizzo) successo = sendemail(indirizzo, "Chiusura account", "Gentile utente di Condivisione,\nIn vista dell'inizio di un nuovo anno scolastico, la sua utenza su Condivisione e' stata rimossa.\nPer tornare ad usufruire dei servizi di Condivisione, le sara' necessario creare una nuova utenza.\n\nGrazie per aver utilizzato Condivisione!\nQuesto messaggio e' stato creato automaticamente.") print(successo)
"""TxORM Property Unit Tests """ import gc import uuid from fractions import Fraction as fraction from decimal import Decimal as decimal from datetime import datetime, date, time, timedelta from twisted.trial import unittest from txorm.compat import b, u from txorm.compiler.state import State from txorm.object_data import get_obj_data from txorm.compiler.plain_sql import SQLRaw from txorm.compiler.expressions import Select from txorm.compiler import Field, txorm_compile from txorm.property.base import SimpleProperty, Property from txorm.exceptions import NoneError, PropertyPathError from txorm.property import ( Int, Bool, Float, Decimal, RawStr, Unicode, DateTime, Date, Time, TimeDelta, Enum, MysqlEnum, UUID, Fraction, PropertyRegistry ) from txorm.variable import ( Variable, BoolVariable, IntVariable, FloatVariable, DecimalVariable, RawStrVariable, UnicodeVariable, DateTimeVariable, DateVariable, TimeVariable, TimeDeltaVariable, EnumVariable, UUIDVariable, FractionVariable ) from .test_expressions import assert_variables class Wrapper(object): def __init__(self, obj): self.obj = obj __object_data__ = property(lambda self: self.obj.__object_data__) class DummyVariable(Variable): """Dummy varaible used for testing purposes """ pass class Custom(SimpleProperty): """Dummy property used for testing purposes """ variable_class = DummyVariable class PropertyTest(unittest.TestCase): def setUp(self): class Dummy(object): """Dummy class for testing purposes """ __database_table__ = 'dummytable' prop1 = Custom('field1', primary=True, size=11, unsigned=True) prop2 = Custom() prop3 = Custom('field3', default=50, allow_none=False) prop4 = Custom( 'field4', index=True, unique=True, auto_increment=True, array={'other_value': 1} ) class SubDummy(Dummy): """SubDummy class for testing purposes """ __database_table__ = 'subdummytable' self.Dummy = Dummy self.SubDummy = SubDummy def test_field(self): self.assertTrue(isinstance(self.Dummy.prop1, Field)) def test_cls(self): self.assertEqual(self.Dummy.prop1.cls, self.Dummy) self.assertEqual(self.Dummy.prop2.cls, self.Dummy) self.assertEqual(self.SubDummy.prop1.cls, self.SubDummy) self.assertEqual(self.SubDummy.prop2.cls, self.SubDummy) def test_name(self): self.assertEqual(self.Dummy.prop1.name, 'field1') def test_automatic_name(self): self.assertEqual(self.Dummy.prop2.name, 'prop2') def test_size(self): self.assertEqual(self.Dummy.prop1.size, 11) def test_unsigned(self): self.assertTrue(self.Dummy.prop1.unsigned) def test_auto_unsigned(self): self.assertFalse(self.Dummy.prop2.unsigned) def test_index(self): self.assertTrue(self.Dummy.prop4.index) def test_auto_index(self): self.assertFalse(self.Dummy.prop2.index) def test_unique(self): self.assertTrue(self.Dummy.prop4.unique) def test_auto_unique(self): self.assertFalse(self.Dummy.prop2.unique) def test_autoincrement(self): self.assertTrue(self.Dummy.prop4.auto_increment) def test_auto_autoincrement(self): self.assertFalse(self.Dummy.prop2.auto_increment) def test_array(self): self.assertEqual(self.Dummy.prop4.array['other_value'], 1) def test_auto_array(self): self.assertIsNone(self.Dummy.prop1.array) def test_auto_table(self): self.assertEqual(self.Dummy.prop1.table, self.Dummy) self.assertEqual(self.Dummy.prop2.table, self.Dummy) def test_auto_table_subclass(self): self.assertEqual(self.Dummy.prop1.table, self.Dummy) self.assertEqual(self.Dummy.prop2.table, self.Dummy) self.assertEqual(self.SubDummy.prop1.table, self.SubDummy) self.assertEqual(self.SubDummy.prop2.table, self.SubDummy) def test_variable_factory(self): variable = self.Dummy.prop1.variable_factory() self.assertTrue(isinstance(variable, DummyVariable)) self.assertFalse(variable.is_defined) variable = self.Dummy.prop3.variable_factory() self.assertTrue(isinstance(variable, DummyVariable)) self.assertTrue(variable.is_defined) def test_variable_factory_validator_attribute(self): prop = Custom() class Class1(object): __database_table__ = 'table1' prop1 = prop class Class2(object): __database_table__ = 'table2' prop2 = prop args = [] def validator(obj, attr, value): args.append((obj, attr, value)) variable1 = Class1.prop1.variable_factory(validator=validator) variable2 = Class2.prop2.variable_factory(validator=validator) variable1.set(1) variable2.set(2) self.assertEqual(args, [(None, 'prop1', 1), (None, 'prop2', 2)]) def test_default(self): obj = self.SubDummy() self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertEqual(obj.prop3, 50) self.assertEqual(obj.prop4, None) def test_set_get(self): obj = self.Dummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 self.assertEqual(obj.prop1, 10) self.assertEqual(obj.prop2, 20) self.assertEqual(obj.prop3, 30) self.assertEqual(obj.prop4, 40) def test_set_get_none(self): obj = self.Dummy() obj.prop1 = None obj.prop2 = None self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertRaises(NoneError, setattr, obj, 'prop3', None) def test_set_with_validator(self): args = [] def validator(obj, attr, value): args[:] = obj, attr, value return 42 class Class(object): __database_table__ = 'mytable' prop = Custom('column', primary=True, validator=validator) obj = Class() obj.prop = 21 self.assertEqual(args, [obj, 'prop', 21]) self.assertEqual(obj.prop, 42) def test_set_get_subclass(self): obj = self.SubDummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 self.assertEqual(obj.prop1, 10) self.assertEqual(obj.prop2, 20) self.assertEqual(obj.prop3, 30) self.assertEqual(obj.prop4, 40) def test_set_get_explicitly(self): obj = self.Dummy() prop1 = self.Dummy.prop1 prop2 = self.Dummy.prop2 prop3 = self.Dummy.prop3 prop4 = self.Dummy.prop4 prop1.__set__(obj, 10) prop2.__set__(obj, 20) prop3.__set__(obj, 30) prop4.__set__(obj, 40) self.assertEqual(prop1.__get__(obj), 10) self.assertEqual(prop2.__get__(obj), 20) self.assertEqual(prop3.__get__(obj), 30) self.assertEqual(prop4.__get__(obj), 40) def test_set_get_subclass_explicitly(self): obj = self.SubDummy() prop1 = self.Dummy.prop1 prop2 = self.Dummy.prop2 prop3 = self.Dummy.prop3 prop4 = self.Dummy.prop4 prop1.__set__(obj, 10) prop2.__set__(obj, 20) prop3.__set__(obj, 30) prop4.__set__(obj, 40) self.assertEqual(prop1.__get__(obj), 10) self.assertEqual(prop2.__get__(obj), 20) self.assertEqual(prop3.__get__(obj), 30) self.assertEqual(prop4.__get__(obj), 40) def test_delete(self): obj = self.Dummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 del obj.prop1 del obj.prop2 del obj.prop3 del obj.prop4 self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertEqual(obj.prop3, None) self.assertEqual(obj.prop4, None) def test_delete_subclass(self): obj = self.SubDummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 del obj.prop1 del obj.prop2 del obj.prop3 del obj.prop4 self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertEqual(obj.prop3, None) self.assertEqual(obj.prop4, None) def test_delete_explicitly(self): obj = self.Dummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 self.Dummy.prop1.__delete__(obj) self.Dummy.prop2.__delete__(obj) self.Dummy.prop3.__delete__(obj) self.Dummy.prop4.__delete__(obj) self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertEqual(obj.prop3, None) self.assertEqual(obj.prop4, None) def test_delete_subclass_explicitly(self): obj = self.SubDummy() obj.prop1 = 10 obj.prop2 = 20 obj.prop3 = 30 obj.prop4 = 40 self.Dummy.prop1.__delete__(obj) self.Dummy.prop2.__delete__(obj) self.Dummy.prop3.__delete__(obj) self.Dummy.prop4.__delete__(obj) self.assertEqual(obj.prop1, None) self.assertEqual(obj.prop2, None) self.assertEqual(obj.prop3, None) self.assertEqual(obj.prop4, None) def test_comparable_expression(self): prop1 = self.Dummy.prop1 prop2 = self.Dummy.prop2 prop3 = self.Dummy.prop3 prop4 = self.Dummy.prop4 expression = Select( SQLRaw('*'), (prop1 == 'value1') & (prop2 == 'value2') & (prop3 == 'value3') & (prop4 == 'value4') ) state = State() statement = txorm_compile(expression, state) self.assertEqual( statement, 'SELECT * FROM dummytable WHERE ' 'dummytable.field1 = ? AND ' 'dummytable.prop2 = ? AND ' 'dummytable.field3 = ? AND ' 'dummytable.field4 = ?' ) assert_variables(self, state.parameters, [ DummyVariable('value1'), DummyVariable('value2'), DummyVariable('value3'), DummyVariable('value4') ]) def test_comparable_expression_subclass(self): prop1 = self.SubDummy.prop1 prop2 = self.SubDummy.prop2 prop3 = self.SubDummy.prop3 prop4 = self.SubDummy.prop4 expression = Select( SQLRaw('*'), (prop1 == 'value1') & (prop2 == 'value2') & (prop3 == 'value3') & (prop4 == 'value4') ) state = State() statement = txorm_compile(expression, state) self.assertEqual( statement, 'SELECT * FROM subdummytable WHERE ' 'subdummytable.field1 = ? AND ' 'subdummytable.prop2 = ? AND ' 'subdummytable.field3 = ? AND ' 'subdummytable.field4 = ?' ) assert_variables(self, state.parameters, [ DummyVariable('value1'), DummyVariable('value2'), DummyVariable('value3'), DummyVariable('value4') ]) def test_set_get_delete_with_wrapper(self): obj = self.Dummy() get_obj_data(obj) # ensure the object data exsts self.Dummy.prop1.__set__(Wrapper(obj), 10) self.assertEqual(self.Dummy.prop1.__get__(Wrapper(obj)), 10) self.Dummy.prop1.__delete__(Wrapper(obj)) self.assertEqual(self.Dummy.prop1.__get__(Wrapper(obj)), None) def test_reuse_of_instance(self): prop = Custom() class Class1(object): __database_table__ = 'table1' prop1 = prop class Class2(object): __database_table__ = 'table2' prop2 = prop self.assertEqual(Class1.prop1.name, 'prop1') self.assertEqual(Class1.prop1.table, Class1) self.assertEqual(Class2.prop2.name, 'prop2') self.assertEqual(Class2.prop2.table, Class2) self.assertEqual(Class1.prop1, Class2.prop2) def test_creattion_counter(self): self.assertTrue( self.Dummy.prop3._creation_order > self.Dummy.prop2._creation_order ) self.assertTrue( self.Dummy.prop2._creation_order > self.Dummy.prop1._creation_order ) self.assertTrue( self.Dummy.prop4._creation_order > self.Dummy.prop3._creation_order ) class PropertyKindsTest(unittest.TestCase): def setup(self, property, *args, **kwargs): prop2_kwargs = kwargs.pop('prop2_kwargs', {}) kwargs['primary'] = True class Class(object): __database_table__ = 'mytable' prop1 = property('field1', *args, **kwargs) prop2 = property(**prop2_kwargs) class SubClass(Class): pass self.Class = Class self.SubClass = SubClass self.obj = SubClass() self.obj_data = get_obj_data(self.obj) self.field1 = self.SubClass.prop1 self.field2 = self.SubClass.prop2 self.variable1 = self.obj_data.variables[self.field1] self.variable2 = self.obj_data.variables[self.field2] def commons(self, variable_type): self.assertTrue(isinstance(self.field1, Field)) self.assertTrue(isinstance(self.field2, Field)) self.assertEqual(self.field1.name, 'field1') self.assertEqual(self.field1.table, self.SubClass) self.assertEqual(self.field2.name, 'prop2') self.assertEqual(self.field2.table, self.SubClass) self.assertTrue(isinstance(self.variable1, variable_type)) self.assertTrue(isinstance(self.variable2, variable_type)) def test_bool(self): self.setup(Bool, default=50, allow_none=False) self.commons(BoolVariable) self.assertEqual(self.obj.prop1, True) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 1 self.assertTrue(self.obj.prop1 is True) self.obj.prop1 = 0 self.assertTrue(self.obj.prop1 is False) def test_int(self): self.setup(Int, default=50, allow_none=False) self.commons(IntVariable) self.assertEqual(self.obj.prop1, 50) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = False self.assertTrue(self.obj.prop1 == 0) self.obj.prop1 = True self.assertTrue(self.obj.prop1 == 1) def test_float(self): self.setup(Float, default=50.5, allow_none=False) self.commons(FloatVariable) self.assertEqual(self.obj.prop1, 50.5) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 1 self.assertTrue(isinstance(self.obj.prop1, float)) def test_decimal(self): self.setup(Decimal, default=decimal('50.5'), allow_none=False) self.commons(DecimalVariable) self.assertEqual(self.obj.prop1, decimal('50.5')) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 1 self.assertTrue(isinstance(self.obj.prop1, decimal)) def test_str(self): self.setup(RawStr, default=b('def'), allow_none=False) self.commons(RawStrVariable) self.assertEqual(self.obj.prop1, b('def')) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.assertRaises(TypeError, setattr, self.obj, 'prop1', u('unicode')) def test_unicode(self): self.setup(Unicode, default=u('unicode'), allow_none=False) self.commons(UnicodeVariable) self.assertEqual(self.obj.prop1, u('unicode')) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.assertRaises(TypeError, setattr, self.obj, 'prop1', b('str')) def test_datetime(self): self.setup(DateTime, default=0, allow_none=False) self.commons(DateTimeVariable) self.assertEqual(self.obj.prop1, datetime.utcfromtimestamp(0)) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 0.0 self.assertEqual(self.obj.prop1, datetime.utcfromtimestamp(0)) self.obj.prop1 = datetime(2014, 7, 31, 0, 31) self.assertEqual(self.obj.prop1, datetime(2014, 7, 31, 0, 31)) self.assertRaises(TypeError, setattr, self.obj, 'prop1', object()) def test_date(self): self.setup(Date, default=date(2014, 8, 1), allow_none=False) self.commons(DateVariable) self.assertEqual(self.obj.prop1, date(2014, 8, 1)) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = datetime(2014, 8, 1, 18, 44, 12) self.assertEqual(self.obj.prop1, date(2014, 8, 1)) self.obj.prop1 = date(2014, 8, 1) self.assertEqual(self.obj.prop1, date(2014, 8, 1)) self.assertRaises(TypeError, setattr, self.obj, 'prop1', object()) def test_time(self): self.setup(Time, default=time(18, 50), allow_none=False) self.commons(TimeVariable) self.assertEqual(self.obj.prop1, time(18, 50)) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = datetime(2014, 8, 1, 18, 44, 12) self.assertEqual(self.obj.prop1, time(18, 44, 12)) self.obj.prop1 = time(18, 44, 12) self.assertEqual(self.obj.prop1, time(18, 44, 12)) self.assertRaises(TypeError, setattr, self.obj, 'prop1', object()) def test_timedelta(self): self.setup( TimeDelta, default=timedelta(days=1, seconds=2, microseconds=3), allow_none=False ) self.commons(TimeDeltaVariable) self.assertEqual( self.obj.prop1, timedelta(days=1, seconds=2, microseconds=3) ) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = timedelta(days=42, seconds=42, microseconds=42) self.assertEqual( self.obj.prop1, timedelta(days=42, seconds=42, microseconds=42) ) self.assertRaises(TypeError, setattr, self.obj, 'prop1', object()) def test_enum(self): self.setup( Enum, map={'sausage': 1, 'spam': 2}, default='spam', allow_none=False, prop2_kwargs={'map': {'susage': 1, 'spam': 2}} ) self.commons(EnumVariable) self.assertEqual(self.obj.prop1, 'spam') self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 'sausage' self.assertEqual(self.obj.prop1, 'sausage') self.obj.prop1 = 'spam' self.assertEqual(self.obj.prop1, 'spam') self.assertRaises(ValueError, setattr, self.obj, 'prop1', 'circus') self.assertRaises(ValueError, setattr, self.obj, 'prop1', 1) def test_enum_with_set_map(self): self.setup( Enum, map={'sausage': 1, 'spam': 2}, set_map={'bacon': 1, 'chorizo': 2}, default='chorizo', allow_none=False, prop2_kwargs={'map': {'susage': 1, 'spam': 2}} ) self.commons(EnumVariable) self.assertEqual(self.obj.prop1, 'spam') self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = 'bacon' self.assertEqual(self.obj.prop1, 'sausage') self.obj.prop1 = 'chorizo' self.assertEqual(self.obj.prop1, 'spam') self.assertRaises(ValueError, setattr, self.obj, 'prop1', 'sausage') self.assertRaises(ValueError, setattr, self.obj, 'prop1', 1) def test_mysql_enum(self): column = MysqlEnum(set={'foo', 'bar'}, default='foo') self.assertEqual(column._variable_kwargs['_set'], set(['foo', 'bar'])) class EnumTest(object): __storm_table__ = 'testtable' prop1 = MysqlEnum(set={'foo', 'bar'}, default='foo', primary=True) obj = EnumTest() self.assertEqual(obj.prop1, 'foo') obj.prop1 = 'bar' self.assertEqual(obj.prop1, 'bar') self.assertRaises(ValueError, setattr, obj, 'prop1', 'baz') self.assertRaises(ValueError, setattr, obj, 'prop1', 1) def test_uuid(self): value1 = uuid.UUID('{b50cb608-450d-469b-9e11-6d18c916d3d0}') value2 = uuid.UUID('{3b01f35c-8368-484e-bc70-406cde3ea693}') self.setup(UUID, default=value1, allow_none=False) self.commons(UUIDVariable) self.assertEqual(self.obj.prop1, value1) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = value1 self.assertEqual(self.obj.prop1, value1) self.obj.prop1 = value2 self.assertEqual(self.obj.prop1, value2) self.assertRaises( TypeError, setattr, self.obj, 'prop1', '{b50cb608-450d-469b-9e11-6d18c916d3d0}' ) def test_fraction(self): self.setup(Fraction, default=fraction('8/1'), allow_none=False) self.commons(FractionVariable) self.assertEqual(self.obj.prop1, fraction('8/1')) self.assertRaises(NoneError, setattr, self.obj, 'prop1', None) self.obj.prop2 = None self.assertEqual(self.obj.prop2, None) self.obj.prop1 = fraction('1.20') self.assertEqual(self.obj.prop1, fraction('6/5')) self.obj.prop1 = fraction('1.40') self.assertEqual(self.obj.prop1, fraction('7/5')) self.assertRaises(TypeError, setattr, self.obj, 'prop1', 1) def test_variable_factory_arguments(self): class Class(object): __database_table__ = 'test' id = Int(primary=True) validator_args = [] def validator(obj, attr, value): validator_args[:] = obj, attr, value return value for func, cls, value in [ (Bool, BoolVariable, True), (Int, IntVariable, 1), (Float, FloatVariable, 1.1), (RawStr, RawStrVariable, b('str')), (Unicode, UnicodeVariable, u('unicode')), (DateTime, DateTimeVariable, datetime.now()), (Date, DateVariable, date.today()), (Time, TimeVariable, datetime.now().time())]: # Test no default and allow_none=True. Class.prop = func(name='name') column = Class.prop.__get__(None, Class) self.assertEquals(column.name, 'name') self.assertEquals(column.table, Class) variable = column.variable_factory() self.assertTrue(isinstance(variable, cls)) self.assertEquals(variable.get(), None) variable.set(None) self.assertEquals(variable.get(), None) # Test default and allow_none=False. Class.prop = func(name='name', default=value, allow_none=False) column = Class.prop.__get__(None, Class) self.assertEquals(column.name, 'name') self.assertEquals(column.table, Class) variable = column.variable_factory() self.assertTrue(isinstance(variable, cls)) self.assertRaises(NoneError, variable.set, None) self.assertEquals(variable.get(), value) # Test validator. Class.prop = func(name='name', validator=validator, default=value) column = Class.prop.__get__(None, Class) self.assertEquals(column.name, 'name') self.assertEquals(column.table, Class) del validator_args[:] variable = column.variable_factory() self.assertTrue(isinstance(variable, cls)) # Validator is not called on instantiation. self.assertEquals(validator_args, []) # But is when setting the variable. variable.set(value) self.assertEquals(validator_args, [None, 'prop', value]) class TxORMPropertyRegistryTest(unittest.TestCase): def setUp(self): class Class(object): __database_table__ = 'mytable' prop1 = Property('field1', primary=True) prop2 = Property() class SubClass(Class): __database_table__ = 'mysubtable' self.Class = Class self.SubClass = SubClass self.AnotherClass = type('Class', (Class,), {}) self.registry = PropertyRegistry() def test_get_empty(self): self.assertRaises(PropertyPathError, self.registry.get, 'unexistent') def test_get(self): self.registry.add_class(self.Class) prop1 = self.registry.get('prop1') prop2 = self.registry.get('prop2') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) def test_get_with_class_name(self): self.registry.add_class(self.Class) prop1 = self.registry.get('Class.prop1') prop2 = self.registry.get('Class.prop2') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) def test_get_with_two_classes(self): self.registry.add_class(self.Class) self.registry.add_class(self.SubClass) prop1 = self.registry.get('Class.prop1') prop2 = self.registry.get('Class.prop2') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) prop1 = self.registry.get('SubClass.prop1') prop2 = self.registry.get('SubClass.prop2') self.assertTrue(prop1 is self.SubClass.prop1) self.assertTrue(prop2 is self.SubClass.prop2) def test_get_ambiguous(self): self.AnotherClass.__module__ += '.foo' self.registry.add_class(self.Class) self.registry.add_class(self.SubClass) self.registry.add_class(self.AnotherClass) self.assertRaises(PropertyPathError, self.registry.get, 'Class.prop1') self.assertRaises(PropertyPathError, self.registry.get, 'Class.prop2') prop1 = self.registry.get('SubClass.prop1') prop2 = self.registry.get('SubClass.prop2') self.assertTrue(prop1 is self.SubClass.prop1) self.assertTrue(prop2 is self.SubClass.prop2) def test_get_ambiguous_but_different_path(self): self.AnotherClass.__module__ += '.foo' self.registry.add_class(self.Class) self.registry.add_class(self.SubClass) self.registry.add_class(self.AnotherClass) prop1 = self.registry.get('test_property.Class.prop1') prop2 = self.registry.get('test_property.Class.prop2') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) prop1 = self.registry.get('SubClass.prop1') prop2 = self.registry.get('SubClass.prop2') self.assertTrue(prop1 is self.SubClass.prop1) self.assertTrue(prop2 is self.SubClass.prop2) prop1 = self.registry.get('foo.Class.prop1') prop2 = self.registry.get('foo.Class.prop2') self.assertTrue(prop1 is self.AnotherClass.prop1) self.assertTrue(prop2 is self.AnotherClass.prop2) def test_get_ambiguous_but_different_path_with_namespace(self): self.AnotherClass.__module__ += '.foo' self.registry.add_class(self.Class) self.registry.add_class(self.SubClass) self.registry.add_class(self.AnotherClass) prop1 = self.registry.get('Class.prop1', 'test.test_property') prop2 = self.registry.get('Class.prop2', 'test.test_property.bar') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) prop1 = self.registry.get( 'Class.prop1', 'txorm.test.test_property.foo') prop2 = self.registry.get( 'Class.prop2', 'txorm.test.test_property.foo.bar') self.assertTrue(prop1 is self.AnotherClass.prop1) self.assertTrue(prop2 is self.AnotherClass.prop2) def test_class_is_collectable(self): self.AnotherClass.__module__ += '.foo' self.registry.add_class(self.Class) self.registry.add_class(self.AnotherClass) del self.AnotherClass gc.collect() prop1 = self.registry.get('prop1') prop2 = self.registry.get('prop2') self.assertTrue(prop1 is self.Class.prop1) self.assertTrue(prop2 is self.Class.prop2) def test_add_property(self): self.registry.add_property(self.Class, self.Class.prop1, 'custom_name') prop1 = self.registry.get('Class.custom_name') self.assertEquals(prop1, self.Class.prop1) self.assertRaises(PropertyPathError, self.registry.get, 'Class.prop1') def test_storm_compatibility(self): class StormClass(self.Class): __storm_tale__ = 'stormtable' prop1 = Property('storm_field1', primary=True) self.registry.add_class(StormClass) prop1 = self.registry.get('StormClass.prop1') self.assertTrue(prop1 is StormClass.prop1)
from ebooklib import epub import configparser def ebook_init(config): ebook = epub.EpubBook() sec = 'toc' ebook.set_identifier(config.get(sec, 'id')) ebook.set_title(config.get(sec, 'title')) ebook.set_language(config.get(sec, 'language')) for auth in config.get(sec, 'author').split(','): ebook.add_author(auth) return ebook def give_css(config): # return the css style sheet if (config.has_option('toc', 'css')): with open(config.get('toc', 'css'), 'r') as f: read_data = f.read() return read_data return """ @namespace epub "http://www.idpf.org/2007/ops"; body { font-family: Verdana, Helvetica, Arial, sans-serif; } h1 { text-align: center; } h2 { text-align: left; text-transform: uppercase; font-weight: 200; } ol { list-style-type: none; margin: 0; } ol > li { margin-top: 0.3em; } ol > li > span { font-weight: bold; } ol > li > ol { margin-left: 0.5em; } """ def give_intro(config): # return the Introduction if (config.has_option('toc', 'intro')): with open(config.get('toc', 'intro'), 'r') as f: read_data = f.read() return read_data return """ <html> <head> <title>Introduction</title> <link rel="stylesheet" href="style/main.css" type="text/css" /> </head> <body> <h1>{}</h1> <p><b>By: {}</b></p> <p>{}</p> </body> </html> """.format( config.get('toc', 'title'), config.get('toc', 'author'), config.get('toc', 'synopsis') ) def extract_chapter(chapter): from lxml import html # open the chapter file, return contents as string with open(chapter, 'r') as f: read_data = f.read() tree = html.fromstring(read_data) title = tree.xpath('//title/text()')[0] return (title.strip(), read_data) def main(argv=None): from sys import argv as sys_argv from os.path import join, abspath, isfile, exists, basename from os import remove import argparse parser = argparse.ArgumentParser(description='Generate epub from Light Novel Chapters') parser.add_argument('config', help="specify config file") parser.add_argument('--input', '-i', help="specify input directory") if argv is None: argv = sys_argv args = parser.parse_args(argv[1:]) config = configparser.SafeConfigParser() try: config.read(args.config) except configparser.Error: return 1 if not config.has_section('toc'): return 1 if not config.has_option('toc', 'order'): return 1 if (config.has_option('DEFAULT', 'chapter-directory') and (args.input == "" or args.input is None) ): args.input = config.get('DEFAULT', 'chapter-directory') order = config.get('toc', 'order').split(',') ebook = ebook_init(config) doc_style = epub.EpubItem( uid="doc_style", file_name="style/min.css", media_type="text/css", content=give_css(config) ) ebook.add_item(doc_style) intro_ch = epub.EpubHtml(title="Introduction", file_name="intro.xhtml") intro_ch.add_item(doc_style) intro_ch.content = give_intro(config) ebook.add_item(intro_ch) toc = [epub.Link('intro.xhtml', 'Introduction', 'intro')] chapters = [] included_files = set() for sec in order: sec = sec.strip() if not config.has_section(sec): continue sec_chapters = [] sec_title = config.get(sec, 'title') sec_start = "" sec_end = "" skip_explicit_chapters = False if (config.has_option(sec, 'chapters')): ch_files = config.get(sec, 'chapter-files').split(',') if (config.has_option(sec, 'epub_skip_chapters')): skip_explicit_chapters = config.getboolean(sec, 'epub_skip_chapters') # Skip processing explicit chapters when requested for ch_file in ch_files: if skip_explicit_chapters: continue ch_file = ch_file.strip() filename = join(abspath(args.input), ch_file) if (not isfile(filename)): continue if filename in included_files: continue ch_title, ch_content = extract_chapter(filename) ch = epub.EpubHtml(title=ch_title, file_name=ch_file) ch.add_item(doc_style) ch.content = ch_content ebook.add_item(ch) sec_chapters.append(ch) included_files.add(filename) if (config.has_option(sec, 'start')): sec_start = config.get(sec, 'start').strip() if (config.has_option(sec, 'end')): sec_end = config.get(sec, 'end').strip() if (sec_start == "" or sec_end == ""): if (sec_start == ""): sec_start = sec_end else: sec_end = sec_start if (sec_start != "" and sec_end != ""): sec_start = int(sec_start) sec_end = int(sec_end) ch_range = range(sec_start, sec_end + 1) vol = 1 if (config.has_option(sec, 'volume')): vol = config.get(sec, 'volume') x_file = config.get(sec, 'chapter-file') for ch_num in ch_range: ch_file = x_file.format(volume=vol, chapter=ch_num) filename = join(abspath(args.input), ch_file) if (not isfile(filename)): continue if filename in included_files: continue ch_title, ch_content = extract_chapter(filename) ch = epub.EpubHtml(title=ch_title.strip(), file_name=ch_file) ch.add_item(doc_style) ch.content = ch_content ebook.add_item(ch) sec_chapters.append(ch) included_files.add(filename) if (len(sec_chapters) >= 1): if sec_title: toc.append((epub.Section(sec_title),sec_chapters)) else: toc.extend(sec_chapters) #~ elif (len(sec_chapters) == 1): #~ toc.append(epub.Link(ch_file, ch_title, basename(ch_file))) chapters.extend(sec_chapters) ebook.toc = toc ebook.add_item(epub.EpubNcx()) #ebook.add_item(epub.EpubNav()) nav_page = epub.EpubNav(uid='book_toc', file_name='toc.xhtml') nav_page.add_item(doc_style) ebook.add_item(nav_page) ebook.spine = [intro_ch, nav_page] + chapters epub_filename = config.get('toc', 'epub') if exists(epub_filename): remove(epub_filename) epub.write_epub(epub_filename, ebook, {}) if __name__ == '__main__': from sys import exit, hexversion if hexversion < 0x03020000: sys.stderr.write("ERROR: Requires Python 3.2.0 or newer\n") exit(1) exit(main())
import unittest import sys from concurrent.futures import Future from wshubsapi.hubs_inspector import HubsInspector from Test.testingUtils import restore_test_resources, create_compiler_uploader_mock, create_sender_mock from libs.Version import Version from libs.WSCommunication.Hubs.VersionsHandlerHub import VersionsHandlerHub import libs.WSCommunication.Hubs from flexmock import flexmock, flexmock_teardown class TestVersionsHandlerHub(unittest.TestCase): def setUp(self): HubsInspector.inspect_implemented_hubs(force_reconstruction=True) Version.read_version_values() self.versionsHandlerHub = HubsInspector.get_hub_instance(VersionsHandlerHub) """ :type : VersionsHandlerHub""" self.libUpdater = self.versionsHandlerHub.lib_updater self.updater = self.versionsHandlerHub.w2b_updater self.sender = create_sender_mock() self.compileUploaderMock, self.CompileUploaderConstructorMock = create_compiler_uploader_mock() self.testLibVersion = "1.1.1" restore_test_resources() def tearDown(self): flexmock_teardown() def test_getVersion_returnsAVersionStringFormat(self): version = self.versionsHandlerHub.get_version() self.assertRegexpMatches(version, '^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$') def test_setLibVersion_doesNotDownloadLibsIfHasRightVersion(self): flexmock(self.libUpdater, is_necessary_to_update=lambda **kwargs: False).should_receive("update").never() self.libUpdater.current_version_info.version = self.testLibVersion self.versionsHandlerHub.set_lib_version(self.testLibVersion) def test_setLibVersion_DownloadsLibsIfHasNotTheRightVersion(self): Version.bitbloq_libs = "0.0.1" self.libUpdater = flexmock(self.libUpdater).should_receive("update").once() self.versionsHandlerHub.set_lib_version(self.testLibVersion) def test_setLibVersion_returnsTrue(self): self.libUpdater = flexmock(self.libUpdater, update=lambda x: None) self.versionsHandlerHub.set_lib_version(self.testLibVersion) def test_setWeb2boardVersion_returnsTrue(self): result = Future() result.set_result(True) flexmock(self.updater).should_receive("download_version").and_return(result).once() self.versionsHandlerHub.set_web2board_version("0.0.1")
import pytest import guitarpro as gp def testHashable(): song = gp.Song() hash(song) anotherSong = gp.Song() assert song == anotherSong assert hash(song) == hash(anotherSong) coda = gp.DirectionSign('Coda') segno = gp.DirectionSign('Segno') assert coda != segno assert hash(coda) != hash(segno) @pytest.mark.parametrize('value', [1, 2, 4, 8, 16, 32, 64]) @pytest.mark.parametrize('isDotted', [False, True]) @pytest.mark.parametrize('tuplet', gp.Tuplet.supportedTuplets) def testDuration(value, isDotted, tuplet): dur = gp.Duration(value, isDotted=isDotted, tuplet=gp.Tuplet(*tuplet)) time = dur.time newDur = gp.Duration.fromTime(time) assert isinstance(newDur.value, int) assert time == newDur.time def testBeatStartInMeasure(): song = gp.Song() measure = song.tracks[0].measures[0] voice = measure.voices[0] beat = gp.Beat(voice, start=measure.start) beat2 = gp.Beat(voice, start=measure.start + beat.duration.time) voice.beats.append(beat) assert beat.startInMeasure == 0 assert beat2.startInMeasure == 960 with pytest.raises(AttributeError): beat2.realStart def testGuitarString(): assert str(gp.GuitarString(number=1, value=0)) == 'C-1' assert str(gp.GuitarString(number=1, value=40)) == 'E2' assert str(gp.GuitarString(number=1, value=64)) == 'E4'
import plumed import numpy as np assert plumed.getNumArgs() == 1 assert plumed.getNumExArgs() == 2 def bias(arg, force, extra): assert plumed.getNumArgs() == 1 assert plumed.getNumExArgs() == 2 assert isinstance(arg, np.ndarray) assert isinstance(force, np.ndarray) assert isinstance(extra, np.ndarray) assert arg.dtype == np.float64 assert force.dtype == np.float64 assert extra.dtype == np.float64 assert arg.shape == (1,) assert force.shape == (1,) assert extra.shape == (2,) assert arg[0] == -1 assert np.isnan(force[0]) assert extra[0] == 0.001 assert extra[1] == 42 force[:] = 0.0 return 0.0
from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path with open('ottemplatepython/__init__.py') as fid: for line in fid: if line.startswith('__version__'): version = line.strip().split()[-1][1:-1] break """ http://python-packaging.readthedocs.org/en/latest/minimal.html """ here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='ottemplatepython', version=version, packages=find_packages(), # extras_require = { # 'joblib': ["joblib>=0.9.3"], # 'ipyparallel': ["ipyparallel>=5.0.1"], # 'pathos': ["pathos>=0.2.0"] # }, author="First Name", author_email="name@phimeca.com", description="General purpose OpenTURNS python wrapper tools", long_description=long_description, setup_requires=['pytest-runner'], tests_require=['pytest'], zip_safe=False )
"""Pyblish Endpoint Server""" import os import logging import threading import flask import flask.ext.restful import mocking import resource import service as service_mod log = logging.getLogger("endpoint") prefix = "/pyblish/v1" resource_map = { "/state": resource.StateApi, "/client": resource.ClientApi, "/hello": resource.HelloApi, } endpoint_map = { "/state": "state", "/client": "client", "/hello": "hello", } current_server = None current_server_thread = None def create_app(): log.info("Creating app") app = flask.Flask(__name__) app.config["TESTING"] = True api = flask.ext.restful.Api(app) log.info("Mapping URIs") for uri, _resource in resource_map.items(): endpoint = endpoint_map.get(uri) api.add_resource(_resource, prefix + uri, endpoint=endpoint) log.info("App created") return app, api def start_production_server(port, service, **kwargs): """Start production server Arguments: port (int): Port at which to listen for requests service (EndpointService): Service exposed at port. Each host implements its own service. """ # Lessen web-server output log = logging.getLogger("werkzeug") log.setLevel(logging.WARNING) service_mod.register_service(service, force=True) app, api = create_app() app.run(port=port, threaded=True) global current_server current_server = app def start_async_production_server(port, service): """Start production server in a separate thread For arguments, see func:`start_production_server` """ def worker(): start_production_server(port, service, threaded=True) t = threading.Thread(target=worker) t.daemon = True t.start() global current_server_thread current_server_thread = t def start_debug_server(port, delay=0.5, **kwargs): """Start debug server This server uses a mocked up service to fake the actual behaviour and data of a generic host; incuding faked time it takes to perform a task. Arguments: port (int): Port at which to listen for requests """ # Log to console formatter = logging.Formatter("%(levelname)-8s %(message)s") handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) log.setLevel(logging.DEBUG) os.environ["ENDPOINT_PORT"] = str(port) Service = mocking.MockService Service.SLEEP_DURATION = delay service_mod.register_service(Service) # Expose vendor libraries to external Python process # triggered by running Flask in debug-mode. package_dir = os.path.dirname(__file__) vendor_dir = os.path.join(package_dir, "vendor") if not os.environ["PYTHONPATH"]: os.environ["PYTHONPATH"] = "" os.environ["PYTHONPATH"] += os.pathsep + vendor_dir app, api = create_app() app.run(debug=True, port=port, threaded=True) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--port", type=int, default=6000, help="Port to use") parser.add_argument("--delay", type=float, default=0.5, help="Higher value means slower") args = parser.parse_args() start_debug_server(**args.__dict__)
import os import sys sys.path.append('..') import mupif.pyroutil import mupif.util import subprocess sys.path.append('../examples') import threading from exconfig import ExConfig cfg = ExConfig() import logging log = logging.getLogger() threading.current_thread().setName('Pyro5-NameServer') def main(): # Initializating variables nshost = cfg.nshost nsport = cfg.nsport log.warning(f"Starting nameserver on {cfg.nshost}:{cfg.nsport}") # os.environ['PYRO_SERIALIZERS_ACCEPTED'] = 'serpent,json,marshal,pickle' # os.environ['PYRO_PICKLE_PROTOCOL_VERSION'] = '2' # equivalent, does not need subprocess and shell etc import Pyro5.configure Pyro5.configure.SERIALIZER='serpent' Pyro5.configure.PYRO_SERVERTYPE='multiplex' Pyro5.configure.PYRO_SSL=0 log.warning(Pyro5.configure.global_config.dump()) import Pyro5.nameserver Pyro5.nameserver.start_ns_loop(nshost,nsport) if __name__ == '__main__': main()
import argparse from migen import * from migen.genlib.resetsync import AsyncResetSynchronizer from migen.genlib.cdc import MultiReg from migen.build.generic_platform import * from migen.build.xilinx.vivado import XilinxVivadoToolchain from migen.build.xilinx.ise import XilinxISEToolchain from misoc.interconnect.csr import * from misoc.cores import gpio from misoc.targets.kc705 import MiniSoC, soc_kc705_args, soc_kc705_argdict from misoc.integration.builder import builder_args, builder_argdict from artiq.gateware.amp import AMPSoC, build_artiq_soc from artiq.gateware import rtio, nist_clock, nist_qc2 from artiq.gateware.rtio.phy import (ttl_simple, ttl_serdes_7series, dds, spi) from artiq import __version__ as artiq_version class _RTIOCRG(Module, AutoCSR): def __init__(self, platform, rtio_internal_clk): self._clock_sel = CSRStorage() self._pll_reset = CSRStorage(reset=1) self._pll_locked = CSRStatus() self.clock_domains.cd_rtio = ClockDomain() self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) # 10 MHz when using 125MHz input self.clock_domains.cd_ext_clkout = ClockDomain(reset_less=True) ext_clkout = platform.request("user_sma_gpio_p_33") self.sync.ext_clkout += ext_clkout.eq(~ext_clkout) rtio_external_clk = Signal() user_sma_clock = platform.request("user_sma_clock") platform.add_period_constraint(user_sma_clock.p, 8.0) self.specials += Instance("IBUFDS", i_I=user_sma_clock.p, i_IB=user_sma_clock.n, o_O=rtio_external_clk) pll_locked = Signal() rtio_clk = Signal() rtiox4_clk = Signal() ext_clkout_clk = Signal() self.specials += [ Instance("PLLE2_ADV", p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked, p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=8.0, p_CLKIN2_PERIOD=8.0, i_CLKIN1=rtio_internal_clk, i_CLKIN2=rtio_external_clk, # Warning: CLKINSEL=0 means CLKIN2 is selected i_CLKINSEL=~self._clock_sel.storage, # VCO @ 1GHz when using 125MHz input p_CLKFBOUT_MULT=8, p_DIVCLK_DIVIDE=1, i_CLKFBIN=self.cd_rtio.clk, i_RST=self._pll_reset.storage, o_CLKFBOUT=rtio_clk, p_CLKOUT0_DIVIDE=2, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=rtiox4_clk, p_CLKOUT1_DIVIDE=50, p_CLKOUT1_PHASE=0.0, o_CLKOUT1=ext_clkout_clk), Instance("BUFG", i_I=rtio_clk, o_O=self.cd_rtio.clk), Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), Instance("BUFG", i_I=ext_clkout_clk, o_O=self.cd_ext_clkout.clk), AsyncResetSynchronizer(self.cd_rtio, ~pll_locked), MultiReg(pll_locked, self._pll_locked.status) ] _sma33_io = [ ("user_sma_gpio_p_33", 0, Pins("Y23"), IOStandard("LVCMOS33")), ("user_sma_gpio_n_33", 0, Pins("Y24"), IOStandard("LVCMOS33")), ] _ams101_dac = [ ("ams101_dac", 0, Subsignal("ldac", Pins("XADC:GPIO0")), Subsignal("clk", Pins("XADC:GPIO1")), Subsignal("mosi", Pins("XADC:GPIO2")), Subsignal("cs_n", Pins("XADC:GPIO3")), IOStandard("LVTTL") ) ] _sdcard_spi_33 = [ ("sdcard_spi_33", 0, Subsignal("miso", Pins("AC20"), Misc("PULLUP")), Subsignal("clk", Pins("AB23")), Subsignal("mosi", Pins("AB22")), Subsignal("cs_n", Pins("AC21")), IOStandard("LVCMOS33") ) ] class _NIST_Ions(MiniSoC, AMPSoC): mem_map = { "cri_con": 0x10000000, "rtio": 0x20000000, "rtio_dma": 0x30000000, "mailbox": 0x70000000 } mem_map.update(MiniSoC.mem_map) def __init__(self, cpu_type="or1k", **kwargs): MiniSoC.__init__(self, cpu_type=cpu_type, sdram_controller_type="minicon", l2_size=128*1024, ident=artiq_version, ethmac_nrxslots=4, ethmac_ntxslots=4, **kwargs) AMPSoC.__init__(self) if isinstance(self.platform.toolchain, XilinxVivadoToolchain): self.platform.toolchain.bitstream_commands.extend([ "set_property BITSTREAM.GENERAL.COMPRESS True [current_design]", ]) if isinstance(self.platform.toolchain, XilinxISEToolchain): self.platform.toolchain.bitgen_opt += " -g compress" self.submodules.leds = gpio.GPIOOut(Cat( self.platform.request("user_led", 0), self.platform.request("user_led", 1))) self.csr_devices.append("leds") self.platform.add_extension(_sma33_io) self.platform.add_extension(_ams101_dac) self.platform.add_extension(_sdcard_spi_33) i2c = self.platform.request("i2c") self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) self.csr_devices.append("i2c") self.config["I2C_BUS_COUNT"] = 1 self.config["HAS_DDS"] = None def add_rtio(self, rtio_channels): self.submodules.rtio_crg = _RTIOCRG(self.platform, self.crg.cd_sys.clk) self.csr_devices.append("rtio_crg") self.submodules.rtio_core = rtio.Core(rtio_channels) self.csr_devices.append("rtio_core") self.submodules.rtio = rtio.KernelInitiator() self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( rtio.DMA(self.get_native_sdram_if())) self.register_kernel_cpu_csrdevice("rtio") self.register_kernel_cpu_csrdevice("rtio_dma") self.submodules.cri_con = rtio.CRIInterconnectShared( [self.rtio.cri, self.rtio_dma.cri], [self.rtio_core.cri]) self.register_kernel_cpu_csrdevice("cri_con") self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) self.csr_devices.append("rtio_moninj") self.rtio_crg.cd_rtio.clk.attr.add("keep") self.platform.add_period_constraint(self.rtio_crg.cd_rtio.clk, 8.) self.platform.add_false_path_constraints( self.crg.cd_sys.clk, self.rtio_crg.cd_rtio.clk) self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_core.cri, self.get_native_sdram_if()) self.csr_devices.append("rtio_analyzer") class NIST_CLOCK(_NIST_Ions): """ NIST clock hardware, with old backplane and 11 DDS channels """ def __init__(self, cpu_type="or1k", **kwargs): _NIST_Ions.__init__(self, cpu_type, **kwargs) platform = self.platform platform.add_extension(nist_clock.fmc_adapter_io) rtio_channels = [] for i in range(16): if i % 4 == 3: phy = ttl_serdes_7series.InOut_8X(platform.request("ttl", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=512)) else: phy = ttl_serdes_7series.Output_8X(platform.request("ttl", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) for i in range(2): phy = ttl_serdes_7series.InOut_8X(platform.request("pmt", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=512)) phy = ttl_serdes_7series.InOut_8X(platform.request("user_sma_gpio_n_33")) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=512)) phy = ttl_simple.Output(platform.request("user_led", 2)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) ams101_dac = self.platform.request("ams101_dac", 0) phy = ttl_simple.Output(ams101_dac.ldac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) phy = ttl_simple.ClockGen(platform.request("la32_p")) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) phy = spi.SPIMaster(ams101_dac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( phy, ofifo_depth=4, ififo_depth=4)) for i in range(3): phy = spi.SPIMaster(self.platform.request("spi", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( phy, ofifo_depth=128, ififo_depth=128)) phy = spi.SPIMaster(platform.request("sdcard_spi_33", 0)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( phy, ofifo_depth=4, ififo_depth=4)) phy = dds.AD9914(platform.request("dds"), 11, onehot=True) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ofifo_depth=512, ififo_depth=4)) self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) rtio_channels.append(rtio.LogChannel()) self.add_rtio(rtio_channels) class NIST_QC2(_NIST_Ions): """ NIST QC2 hardware, as used in Quantum I and Quantum II, with new backplane and 24 DDS channels. Two backplanes are used. """ def __init__(self, cpu_type="or1k", **kwargs): _NIST_Ions.__init__(self, cpu_type, **kwargs) platform = self.platform platform.add_extension(nist_qc2.fmc_adapter_io) rtio_channels = [] clock_generators = [] # All TTL channels are In+Out capable for i in range(40): phy = ttl_serdes_7series.InOut_8X( platform.request("ttl", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=512)) # CLK0, CLK1 are for clock generators, on backplane SMP connectors for i in range(2): phy = ttl_simple.ClockGen( platform.request("clkout", i)) self.submodules += phy clock_generators.append(rtio.Channel.from_phy(phy)) # user SMA on KC705 board phy = ttl_serdes_7series.InOut_8X(platform.request("user_sma_gpio_n_33")) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=512)) phy = ttl_simple.Output(platform.request("user_led", 2)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) # AMS101 DAC on KC705 XADC header - optional ams101_dac = self.platform.request("ams101_dac", 0) phy = ttl_simple.Output(ams101_dac.ldac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) # add clock generators after TTLs rtio_channels += clock_generators phy = spi.SPIMaster(ams101_dac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( phy, ofifo_depth=4, ififo_depth=4)) for i in range(4): phy = spi.SPIMaster(self.platform.request("spi", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( phy, ofifo_depth=128, ififo_depth=128)) for backplane_offset in range(2): phy = dds.AD9914( platform.request("dds", backplane_offset), 12, onehot=True) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy, ofifo_depth=512, ififo_depth=4)) self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) rtio_channels.append(rtio.LogChannel()) self.add_rtio(rtio_channels) def main(): parser = argparse.ArgumentParser( description="ARTIQ device binary builder / single-FPGA KC705-based " "systems with AD9 DDS (NIST Ions hardware)") builder_args(parser) soc_kc705_args(parser) parser.add_argument("-H", "--hw-adapter", default="nist_clock", help="hardware adapter type: " "nist_clock/nist_qc2 " "(default: %(default)s)") args = parser.parse_args() hw_adapter = args.hw_adapter.lower() if hw_adapter == "nist_clock": cls = NIST_CLOCK elif hw_adapter == "nist_qc2": cls = NIST_QC2 else: raise SystemExit("Invalid hardware adapter string (-H/--hw-adapter)") soc = cls(**soc_kc705_argdict(args)) build_artiq_soc(soc, builder_argdict(args)) if __name__ == "__main__": main()
dc = {'a' : 'a-ele', 'b' : 'b-ele', 'c' : 'c-ele'} dd = {'d' : 'd-x', 'e' : 'e-x'} print "id(dc) = [%d], dc = [%s]" % (id(dc), str(dc)) print "id(dd) = [%d], dd = [%s]" % (id(dd), str(dd)) dc.update(dd) print "id(dc) = [%d], dc = [%s]" % (id(dc), str(dc)) print "id(dd) = [%d], dd = [%s]" % (id(dd), str(dd))
""" The code is designed to be a server running on RPi waiting for client to initiate drone launching Creator: Mana Saedan Date Last Edited: 22 June 2017 Development Logs: - Server waiting for client to connect - Data format from client (32-byte-string): mode,height,roll,pitch,yaw,angle,cheksum,... Mode: 1 byte Height: 5 bytes in unit of millimeter Roll, Pitch, Yaw: Each has 3 bytes number from 0-200 Angle: 3 bytes indicate angle if smarphone (front) with respect to north pole Checksum: - Mode byte meaning Bit 0 = Arm/Disarm Bit 1 = Takeoff/Landing Bit 2,3 ==> 00=Hoover, 01=Self frame, 10=Earth fame, 11=Smartphone frame """ import sys import time import socket from QuadCopter import QuadCopter def getch(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch aircraft = QuadCopter() def DecodeTCP(Mode, Height, Roll, Pitch, Yaw, Angle): global aircraft #Check whether the aircraft is armed if (Mode & 0x01): if not(aircraft.isArm()): print "Aricraft starting" aircraft.Start() #Check flight mode for taking off if (Mode & 0x02): aircraft.Takeoff(Height/1000.0) else: aircraft.Landing() else: aircraft.Landing() #Check whether the aircraft is landed while not(aircraft.IsLanded()): time.sleep(0.1) #Stop aircraft and disarm motors aircraft.Stop() def UserKeypress(): global sock global Status print "Press key to exit" getch() # Clean up the connection connection.close() #Disarm aircraft if it is armed DecodeTCP(0,0,0,0,0,0) Status = False if (aircraft.ConnectDevices(False, False) != 0): print "Unable connect aircraft devices" ''' try: thread.start_new_thread(UserKeypress, ()) except: print "Error: User Keypress Thread was not created" ''' sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = ('0.0.0.0', 10000) print >>sys.stderr, 'Starting up on %s port %s' % server_address sock.bind(server_address) sock.listen(1) Status = True while Status: #Wait for a connection print >>sys.stderr, 'Waiting for a connection' connection, client_address = sock.accept() try: #Display client address print >>sys.stderr, 'Connection from', client_address #Loop indefinite waiting for client while True: #Read 23 bytes from client txt_data = connection.recv(32) #Exit loop when no data receive or client is disconnected if not txt_data: break #Upon receiving 32 bytes, decode text data if len(txt_data)>0: #Remove carrirage and line feed txt_data = txt_data.replace('\n', '').replace('\r', '') #Split data from text data = txt_data.split(',') print len(data) if (len(data) < 7): #Do nothing if received incompleted data package pass else: #Check the package validity, data[6] chksum=0 #Go through data from 'Mode' to 'Angle' for i in range(6): for each_char in data[i]: chksum = chksum + int(each_char) #When checksum is valid process the aircraft command here if (chksum == int(data[6])): DecodeTCP(int(data[0]), int(data[1]), int(data[2]), int(data[3]), int(data[4]), int(data[5])) except: print >>sys.stderr, 'Server closed. Press any key to quit' # Clean up the connection connection.close() #Disarm aircraft if it is armed DecodeTCP(0,0,0,0,0,0) pass finally: print >>sys.stderr, 'Server closed. Press any key to quit' # Clean up the connection connection.close() #Disarm aircraft if it is armed DecodeTCP(0,0,0,0,0,0)
from csmtiser.config import load_config_file import sys if len(sys.argv) > 1: config=load_config_file(sys.argv[1]) else: config=load_config_file() import os sys.stdout.write('Deleting old models in the working directory\n') os.system('rm -rf '+config.working_dir+'/corpus') os.system('rm -rf '+config.working_dir+'/model') os.system('rm -rf '+config.working_dir+'/giza.norm-orig') os.system('rm -rf '+config.working_dir+'/giza.orig-norm') os.system('rm -rf '+config.working_dir+'/train.log') os.system('rm -rf '+config.working_dir+'/mert-work/') lms=config.lms[:] config.lms.insert(0,config.working_dir+'/train.norm') for index,pth in enumerate(config.lms): sys.stdout.write('Building a LM from '+pth+'\n') if index>0: pth2=config.working_dir+'/lm_'+str(index-1)+'.proc' else: pth2=pth os.system(config.kenlm+'/lmplz -o '+str(config.lm_order)+' --discount_fallback < '+pth2+' 1> '+pth2+'.arpa 2>> '+config.working_dir+'/train.log') os.system(config.kenlm+'/build_binary '+pth2+'.arpa '+pth2+'.blm >> '+config.working_dir+'/train.log 2>&1') sys.stdout.write('Building the untuned system\n') sys.stdout.flush() os.system(config.moses_scripts+'/training/train-model.perl -root-dir '+config.working_dir+' -corpus '+config.working_dir+'/train -f orig -e norm -alignment grow-diag-final-and -lm 0:'+str(config.lm_order)+':'+config.working_dir+'/train.norm.blm:8 -cores '+str(config.num_cores)+' --mgiza -mgiza-cpus '+str(config.num_cores)+' -external-bin-dir '+config.mgiza+' >> '+config.working_dir+'/train.log 2>&1') sys.stdout.write('Updating the moses.ini file\n') ini=open(config.working_dir+'/model/moses.ini').read().replace('[distortion-limit]\n6','[distortion-limit]\n0') modini=open(config.working_dir+'/model/moses.mod.ini','w') for line in ini.strip().split('\n'): if line.startswith('Distortion') or line.startswith('LexicalReordering'): continue else: modini.write(line+'\n') if line.startswith('KENLM'): for index in range(1,len(config.lms)): modini.write('KENLM lazyken=0 name=LM'+str(index)+' factor=0 path='+config.working_dir+'/lm_'+str(index-1)+'.proc.blm order='+str(config.lm_order)+'\n') for index in range(1,len(config.lms)): modini.write('LM'+str(index)+'= 0.5\n') modini.close() sys.stdout.write('Tuning the system\n') sys.stdout.flush() os.system(config.moses_scripts+'training/mert-moses.pl '+config.working_dir+'/dev.orig '+config.working_dir+'/dev.norm '+config.moses+'/moses '+config.working_dir+'/model/moses.mod.ini --mertdir '+config.moses+' --working-dir '+config.working_dir+'/mert-work/ --mertargs="--sctype WER" --decoder-flags="-threads '+str(config.num_cores)+'" >> '+config.working_dir+'/train.log 2>&1') sys.stdout.write('Finished\n') sys.stdout.flush()
from __future__ import print_function from dolfin import * from dolfin_adjoint import * import sys dolfin.set_log_level(ERROR) n = 10 mesh = UnitIntervalMesh(n) V = FunctionSpace(mesh, "CG", 2) ic = project(Expression("sin(2*pi*x[0])", degree=1), V) u = ic.copy(deepcopy=True) def main(nu): u_next = Function(V) v = TestFunction(V) timestep = Constant(1.0/n, name="Timestep") F = ((u_next - u)/timestep*v + u_next*u_next.dx(0)*v + nu*u_next.dx(0)*v.dx(0))*dx bc = DirichletBC(V, 0.0, "on_boundary") t = 0.0 end = 0.1 while (t <= end): solve(F == 0, u_next, bc) u.assign(u_next) t += float(timestep) adj_inc_timestep() def eval_cb(j, m): print("j = %f, m = %f." % (j, float(m))) def derivative_cb(j, dj, m): print("j = %f, dj = %f, m = %f." % (j, dj, float(m))) def replay_cb(var, data, m): #print "Got data for variable %s at m = %f." % (var, float(m)) pass if __name__ == "__main__": nu = Constant(0.0001, name="Nu") # Run the forward model once to have the annotation main(nu) J = Functional(inner(u, u)*dx*dt[FINISH_TIME]) # Run the optimisation reduced_functional = ReducedFunctional(J, ConstantControl("Nu"), eval_cb_post= eval_cb, derivative_cb_post=derivative_cb, replay_cb=replay_cb, scale=2.0) try: nu_opt = minimize(reduced_functional, 'SLSQP') tol = 1e-4 if reduced_functional(nu_opt) > tol: print('Test failed: Optimised functional value exceeds tolerance: ', reduced_functional(nu_opt), ' > ', tol, '.') sys.exit(1) except ImportError: info_red("No suitable scipy version found. Aborting test.")
""" This is the boilerplate default configuration file. Changes and additions to settings should be done in the config module located in the application root rather than this config. """ config = { 'webapp2_extras.sessions' : {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'}, 'webapp2_extras.auth' : {'user_model': 'boilerplate.models.User', 'cookie_name': 'session_name'}, 'webapp2_extras.jinja2' : {'template_path': ['templates','boilerplate/templates', 'admin/templates'], 'environment_args': {'extensions': ['jinja2.ext.i18n']}}, 'app_name' : "The Arky", 'app_lang' : 'en', 'locales' : ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ'], 'contact_sender' : "PUT_SENDER_EMAIL_HERE", 'contact_recipient' : "tjunhao.90@gmail.com", 'aes_key' : "12_24_32_BYTES_KEY_FOR_PASSWORDS", 'salt' : "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_", 'twitter_consumer_key' : 'wBtHqd4a3IqZN89J1TCeog', 'twitter_consumer_secret' : 'aH9isB1ZI2zM8gYMWrEhOI2DQaCXhn59PQCdyheMg', 'fb_api_key' : '136496959884393', 'fb_secret' : '70623320f6537fec08eb4a308ddc54b9', 'linkedin_api' : 'xv9iudz1frb8', 'linkedin_secret' : 'lPdAnwbrlOFViozl', 'github_server' : 'github.com', 'github_redirect_uri' : 'http://www.example.com/social_login/github/complete', 'github_client_id' : 'bf270aa784452945c2d9', 'github_client_secret' : '9e80bd5c451605437dbffb03e22af4036d8d645f', 'captcha_public_key' : "6Lf3-uISAAAAAJJmMkUjTP_Pjg7iXCVadduKEbl2", 'captcha_private_key' : "6Lf3-uISAAAAAFxZOHfmSw2Kydxfk6K_vOfXP5rW", 'google_analytics_domain' : "YOUR_PRIMARY_DOMAIN (e.g. google.com)", 'google_analytics_code' : "UA-XXXXX-X", 'error_templates' : { 403: 'errors/default_error.html', 404: 'errors/default_error.html', 500: 'errors/default_error.html', }, 'enable_federated_login' : True, 'base_layout' : 'base.html', 'send_mail_developer' : False, 'developers' : ( ('Tan Jun Hao', 'bb111189@gmail.com') ), 'log_email' : False, 'log_visit' : False, } # end config
""" Parsing for router status entries, the information for individual routers within a network status document. This information is provided from a few sources... * control port via 'GETINFO ns/\*' and 'GETINFO md/\*' queries * router entries in a network status document, like the cached-consensus **Module Overview:** :: RouterStatusEntry - Common parent for router status entries |- RouterStatusEntryV2 - Entry for a network status v2 document |- RouterStatusEntryV3 - Entry for a network status v3 document +- RouterStatusEntryMicroV3 - Entry for a microdescriptor flavored v3 document """ import base64 import binascii import stem.exit_policy import stem.prereq import stem.util.str_tools from stem.descriptor import ( KEYWORD_LINE, Descriptor, _value, _values, _get_descriptor_components, _read_until_keywords, ) def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()): """ Reads a range of the document_file containing some number of entry_class instances. We deliminate the entry_class entries by the keyword on their first line (entry_keyword). When finished the document is left at the end_position. Either an end_position or section_end_keywords must be provided. :param file document_file: file with network status document content :param bool validate: checks the validity of the document's contents if **True**, skips these checks otherwise :param class entry_class: class to construct instance for :param str entry_keyword: first keyword for the entry instances :param int start_position: start of the section, default is the current position :param int end_position: end of the section :param tuple section_end_keywords: keyword(s) that deliminate the end of the section if no end_position was provided :param tuple extra_args: extra arguments for the entry_class (after the content and validate flag) :returns: iterator over entry_class instances :raises: * **ValueError** if the contents is malformed and validate is **True** * **IOError** if the file can't be read """ if start_position: document_file.seek(start_position) else: start_position = document_file.tell() # check if we're starting at the end of the section (ie, there's no entries to read) if section_end_keywords: first_keyword = None line_match = KEYWORD_LINE.match(stem.util.str_tools._to_unicode(document_file.readline())) if line_match: first_keyword = line_match.groups()[0] document_file.seek(start_position) if first_keyword in section_end_keywords: return while end_position is None or document_file.tell() < end_position: desc_lines, ending_keyword = _read_until_keywords( (entry_keyword,) + section_end_keywords, document_file, ignore_first = True, end_position = end_position, include_ending_keyword = True ) desc_content = bytes.join(b'', desc_lines) if desc_content: yield entry_class(desc_content, validate, *extra_args) # check if we stopped at the end of the section if ending_keyword in section_end_keywords: break else: break def _parse_r_line(descriptor, entries): # Parses a RouterStatusEntry's 'r' line. They're very nearly identical for # all current entry types (v2, v3, and microdescriptor v3) with one little # wrinkle: only the microdescriptor flavor excludes a 'digest' field. # # For v2 and v3 router status entries: # "r" nickname identity digest publication IP ORPort DirPort # example: r mauer BD7xbfsCFku3+tgybEZsg8Yjhvw itcuKQ6PuPLJ7m/Oi928WjO2j8g 2012-06-22 13:19:32 80.101.105.103 9001 0 # # For v3 microdescriptor router status entries: # "r" nickname identity publication IP ORPort DirPort # example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030 value = _value('r', entries) include_digest = not isinstance(descriptor, RouterStatusEntryMicroV3) r_comp = value.split(' ') # inject a None for the digest to normalize the field positioning if not include_digest: r_comp.insert(2, None) if len(r_comp) < 8: expected_field_count = 'eight' if include_digest else 'seven' raise ValueError("%s 'r' line must have %s values: r %s" % (descriptor._name(), expected_field_count, value)) if not stem.util.tor_tools.is_valid_nickname(r_comp[0]): raise ValueError("%s nickname isn't valid: %s" % (descriptor._name(), r_comp[0])) elif not stem.util.connection.is_valid_ipv4_address(r_comp[5]): raise ValueError("%s address isn't a valid IPv4 address: %s" % (descriptor._name(), r_comp[5])) elif not stem.util.connection.is_valid_port(r_comp[6]): raise ValueError('%s ORPort is invalid: %s' % (descriptor._name(), r_comp[6])) elif not stem.util.connection.is_valid_port(r_comp[7], allow_zero = True): raise ValueError('%s DirPort is invalid: %s' % (descriptor._name(), r_comp[7])) descriptor.nickname = r_comp[0] descriptor.fingerprint = _base64_to_hex(r_comp[1]) if include_digest: descriptor.digest = _base64_to_hex(r_comp[2]) descriptor.address = r_comp[5] descriptor.or_port = int(r_comp[6]) descriptor.dir_port = None if r_comp[7] == '0' else int(r_comp[7]) try: published = '%s %s' % (r_comp[3], r_comp[4]) descriptor.published = stem.util.str_tools._parse_timestamp(published) except ValueError: raise ValueError("Publication time time wasn't parsable: r %s" % value) def _parse_a_line(descriptor, entries): # "a" SP address ":" portlist # example: a [2001:888:2133:0:82:94:251:204]:9001 or_addresses = [] for value in _values('a', entries): if ':' not in value: raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value)) address, port = value.rsplit(':', 1) if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True): raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value)) if stem.util.connection.is_valid_port(port): or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True))) else: raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value)) descriptor.or_addresses = or_addresses def _parse_s_line(descriptor, entries): # "s" Flags # example: s Named Running Stable Valid value = _value('s', entries) flags = [] if value == '' else value.split(' ') descriptor.flags = flags for flag in flags: if flags.count(flag) > 1: raise ValueError('%s had duplicate flags: s %s' % (descriptor._name(), value)) elif flag == '': raise ValueError("%s had extra whitespace on its 's' line: s %s" % (descriptor._name(), value)) def _parse_v_line(descriptor, entries): # "v" version # example: v Tor 0.2.2.35 # # The spec says that if this starts with "Tor " then what follows is a # tor version. If not then it has "upgraded to a more sophisticated # protocol versioning system". value = _value('v', entries) descriptor.version_line = value if value.startswith('Tor '): try: descriptor.version = stem.version._get_version(value[4:]) except ValueError as exc: raise ValueError('%s has a malformed tor version (%s): v %s' % (descriptor._name(), exc, value)) def _parse_w_line(descriptor, entries): # "w" "Bandwidth=" INT ["Measured=" INT] ["Unmeasured=1"] # example: w Bandwidth=7980 value = _value('w', entries) w_comp = value.split(' ') if len(w_comp) < 1: raise ValueError("%s 'w' line is blank: w %s" % (descriptor._name(), value)) elif not w_comp[0].startswith('Bandwidth='): raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value)) bandwidth = None measured = None is_unmeasured = False unrecognized_bandwidth_entries = [] for w_entry in w_comp: if '=' in w_entry: w_key, w_value = w_entry.split('=', 1) else: w_key, w_value = w_entry, None if w_key == 'Bandwidth': if not (w_value and w_value.isdigit()): raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) bandwidth = int(w_value) elif w_key == 'Measured': if not (w_value and w_value.isdigit()): raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) measured = int(w_value) elif w_key == 'Unmeasured': if w_value != '1': raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value)) is_unmeasured = True else: unrecognized_bandwidth_entries.append(w_entry) descriptor.bandwidth = bandwidth descriptor.measured = measured descriptor.is_unmeasured = is_unmeasured descriptor.unrecognized_bandwidth_entries = unrecognized_bandwidth_entries def _parse_p_line(descriptor, entries): # "p" ("accept" / "reject") PortList # # examples: # # p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001 # p reject 1-65535 value = _value('p', entries) try: descriptor.exit_policy = stem.exit_policy.MicroExitPolicy(value) except ValueError as exc: raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value)) def _parse_id_line(descriptor, entries): # "id" "ed25519" ed25519-identity # # examples: # # id ed25519 none # id ed25519 8RH34kO07Pp+XYwzdoATVyCibIvmbslUjRkAm7J4IA8 value = _value('id', entries) if value: if not (descriptor.document and descriptor.document.is_vote): vote_status = 'vote' if descriptor.document else '<undefined document>' raise ValueError("%s 'id' line should only appear in votes (appeared in a %s): id %s" % (descriptor._name(), vote_status, value)) value_comp = value.split() if len(value_comp) >= 2: descriptor.identifier_type = value_comp[0] descriptor.identifier = value_comp[1] else: raise ValueError("'id' lines should contain both the key type and digest: id %s" % value) def _parse_m_line(descriptor, entries): # "m" methods 1*(algorithm "=" digest) # example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs all_hashes = [] for value in _values('m', entries): m_comp = value.split(' ') if not (descriptor.document and descriptor.document.is_vote): vote_status = 'vote' if descriptor.document else '<undefined document>' raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (descriptor._name(), vote_status, value)) elif len(m_comp) < 1: raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (descriptor._name(), value)) try: methods = [int(entry) for entry in m_comp[0].split(',')] except ValueError: raise ValueError('%s microdescriptor methods should be a series of comma separated integers: m %s' % (descriptor._name(), value)) hashes = {} for entry in m_comp[1:]: if '=' not in entry: raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (descriptor._name(), value)) hash_name, digest = entry.split('=', 1) hashes[hash_name] = digest all_hashes.append((methods, hashes)) descriptor.microdescriptor_hashes = all_hashes def _parse_microdescriptor_m_line(descriptor, entries): # "m" digest # example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70 descriptor.digest = _base64_to_hex(_value('m', entries), check_if_fingerprint = False) def _base64_to_hex(identity, check_if_fingerprint = True): """ Decodes a base64 value to hex. For example... :: >>> _base64_to_hex('p1aag7VwarGxqctS7/fS0y5FU+s') 'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB' :param str identity: encoded fingerprint from the consensus :param bool check_if_fingerprint: asserts that the result is a fingerprint if **True** :returns: **str** with the uppercase hex encoding of the relay's fingerprint :raises: **ValueError** if the result isn't a valid fingerprint """ # trailing equal signs were stripped from the identity missing_padding = len(identity) % 4 identity += '=' * missing_padding try: identity_decoded = base64.b64decode(stem.util.str_tools._to_bytes(identity)) except (TypeError, binascii.Error): raise ValueError("Unable to decode identity string '%s'" % identity) fingerprint = binascii.b2a_hex(identity_decoded).upper() if stem.prereq.is_python_3(): fingerprint = stem.util.str_tools._to_unicode(fingerprint) if check_if_fingerprint: if not stem.util.tor_tools.is_valid_fingerprint(fingerprint): raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint)) return fingerprint class RouterStatusEntry(Descriptor): """ Information about an individual router stored within a network status document. This is the common parent for concrete status entry types. :var stem.descriptor.networkstatus.NetworkStatusDocument document: **\*** document that this descriptor came from :var str nickname: **\*** router's nickname :var str fingerprint: **\*** router's fingerprint :var datetime published: **\*** router's publication :var str address: **\*** router's IP address :var int or_port: **\*** router's ORPort :var int dir_port: **\*** router's DirPort :var list flags: **\*** list of :data:`~stem.Flag` associated with the relay :var stem.version.Version version: parsed version of tor, this is **None** if the relay's using a new versioning scheme :var str version_line: versioning information reported by the relay """ ATTRIBUTES = { 'nickname': (None, _parse_r_line), 'fingerprint': (None, _parse_r_line), 'published': (None, _parse_r_line), 'address': (None, _parse_r_line), 'or_port': (None, _parse_r_line), 'dir_port': (None, _parse_r_line), 'flags': (None, _parse_s_line), 'version_line': (None, _parse_v_line), 'version': (None, _parse_v_line), } PARSER_FOR_LINE = { 'r': _parse_r_line, 's': _parse_s_line, 'v': _parse_v_line, } def __init__(self, content, validate = False, document = None): """ Parse a router descriptor in a network status document. :param str content: router descriptor content to be parsed :param NetworkStatusDocument document: document this descriptor came from :param bool validate: checks the validity of the content if **True**, skips these checks otherwise :raises: **ValueError** if the descriptor data is invalid """ super(RouterStatusEntry, self).__init__(content, lazy_load = not validate) self.document = document entries = _get_descriptor_components(content, validate) if validate: for keyword in self._required_fields(): if keyword not in entries: raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self))) for keyword in self._single_fields(): if keyword in entries and len(entries[keyword]) > 1: raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self))) if 'r' != list(entries.keys())[0]: raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self))) self._parse(entries, validate) else: self._entries = entries def _name(self, is_plural = False): """ Name for this descriptor type. """ return 'Router status entries' if is_plural else 'Router status entry' def _required_fields(self): """ Provides lines that must appear in the descriptor. """ return () def _single_fields(self): """ Provides lines that can only appear in the descriptor once. """ return () def _compare(self, other, method): if not isinstance(other, RouterStatusEntry): return False return method(str(self).strip(), str(other).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ne__(self, other): return not self == other def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) class RouterStatusEntryV2(RouterStatusEntry): """ Information about an individual router stored within a version 2 network status document. :var str digest: **\*** router's upper-case hex digest **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined """ ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ 'digest': (None, _parse_r_line), }) def _name(self, is_plural = False): return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)' def _required_fields(self): return ('r') def _single_fields(self): return ('r', 's', 'v') def _compare(self, other, method): if not isinstance(other, RouterStatusEntryV2): return False return method(str(self).strip(), str(other).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ne__(self, other): return not self == other def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) class RouterStatusEntryV3(RouterStatusEntry): """ Information about an individual router stored within a version 3 network status document. :var list or_addresses: **\*** relay's OR addresses, this is a tuple listing of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) :var str identifier_type: identity digest key type :var str identifier: base64 encoded identity digest :var str digest: **\*** router's upper-case hex digest :var int bandwidth: bandwidth claimed by the relay (in kb/s) :var int measured: bandwidth measured to be available by the relay, this is a unit-less heuristic generated by the Bandwidth authoritites to weight relay selection :var bool is_unmeasured: bandwidth measurement isn't based on three or more measurements :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting information that isn't yet recognized :var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy :var list microdescriptor_hashes: **\*** tuples of two values, the list of consensus methods for generating a set of digests and the 'algorithm => digest' mappings **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined .. versionchanged:: 1.5.0 Added the identifier and identifier_type attributes. """ ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ 'digest': (None, _parse_r_line), 'or_addresses': ([], _parse_a_line), 'identifier_type': (None, _parse_id_line), 'identifier': (None, _parse_id_line), 'bandwidth': (None, _parse_w_line), 'measured': (None, _parse_w_line), 'is_unmeasured': (False, _parse_w_line), 'unrecognized_bandwidth_entries': ([], _parse_w_line), 'exit_policy': (None, _parse_p_line), 'microdescriptor_hashes': ([], _parse_m_line), }) PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{ 'a': _parse_a_line, 'w': _parse_w_line, 'p': _parse_p_line, 'id': _parse_id_line, 'm': _parse_m_line, }) def _name(self, is_plural = False): return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)' def _required_fields(self): return ('r', 's') def _single_fields(self): return ('r', 's', 'v', 'w', 'p') def _compare(self, other, method): if not isinstance(other, RouterStatusEntryV3): return False return method(str(self).strip(), str(other).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ne__(self, other): return not self == other def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) class RouterStatusEntryMicroV3(RouterStatusEntry): """ Information about an individual router stored within a microdescriptor flavored network status document. :var int bandwidth: bandwidth claimed by the relay (in kb/s) :var int measured: bandwidth measured to be available by the relay :var bool is_unmeasured: bandwidth measurement isn't based on three or more measurements :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting information that isn't yet recognized :var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined """ ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ 'bandwidth': (None, _parse_w_line), 'measured': (None, _parse_w_line), 'is_unmeasured': (False, _parse_w_line), 'unrecognized_bandwidth_entries': ([], _parse_w_line), 'digest': (None, _parse_microdescriptor_m_line), }) PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{ 'w': _parse_w_line, 'm': _parse_microdescriptor_m_line, }) def _name(self, is_plural = False): return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)' def _required_fields(self): return ('r', 's', 'm') def _single_fields(self): return ('r', 's', 'v', 'w', 'm') def _compare(self, other, method): if not isinstance(other, RouterStatusEntryMicroV3): return False return method(str(self).strip(), str(other).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ne__(self, other): return not self == other def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o)
import sqlite3 def collate_reverse(string1, string2): if string1 == string2: return 0 elif string1 < string2: return 1 else: return -1 con = sqlite3.connect(":memory:") con.create_collation("reverse", collate_reverse) cur = con.cursor() cur.execute("create table test(x)") cur.executemany("insert into test(x) values (?)", [("a",), ("b",)]) cur.execute("select x from test order by x collate reverse") for row in cur: print(row) con.close()
predict2020_value_list=[6.961153333332845, 6.882159999997782, 6.84168666666892, 6.869866666668145, 7.013566666669362, 7.2024000000003525, 7.2634200000021, 7.31390666666465, 7.3549733333350105, 7.260779999999784, 7.232080000001247, 7.299973333334833] range_excharge = eval(input("请输入一个月份[1-12]:")) assert 1<=range_excharge<=12, '[Error] [%s] is not a valid month' % range_excharge predict2020_value_list_range = predict2020_value_list[:range_excharge] lowest = predict2020_value_list_range.index(min(predict2020_value_list_range)) + 1 print("从2020年1月至旅行出发时,建议在{0}月换汇,汇率较低\n最好的换汇时间是{0}月 ==> 汇率:{1:.5f}" .format(lowest,predict2020_value_list_range[lowest-1]))
{ 'name': 'Project Move Activity Tasks', 'version': '11.0.1.0.0', 'author': 'Savoir-faire Linux, Odoo Community Association (OCA)', 'maintainer': 'Savoir-faire Linux', 'website': 'http://www.savoirfairelinux.com', 'license': 'AGPL-3', 'category': 'Project Management', 'summary': 'Project Event module', 'depends': [ 'project_event', ], 'external_dependencies': { 'python': [], }, 'data': [ 'wizard/project_move_activity_tasks_wizard.xml', 'views/project_task_view.xml' ], 'installable': True, 'application': False, }