repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
idrogeno/IdroMips
lib/python/Plugins/SystemPlugins/DeviceManager/ExtraMessageBox.py
19
4039
from enigma import * from Screens.Screen import Screen from Components.ActionMap import ActionMap from Components.Sources.List import List from Tools.Directories import resolveFilename, SCOPE_CURRENT_PLUGIN from Tools.LoadPixmap import LoadPixmap from Components.Label import Label def MessageBoxEntry(name, picture): pixmap = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/" + picture)); if not pixmap: pixmap = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/empty.png")); return (pixmap, name) class ExtraMessageBox(Screen): skin = """ <screen name="ExtraMessageBox" position="center,center" size="460,430" title=" "> <widget name="message" position="10,10" size="440,25" font="Regular;20" /> <widget source="menu" render="Listbox" position="20,90" size="420,360" scrollbarMode="showOnDemand"> <convert type="TemplatedMultiContent"> {"template": [ MultiContentEntryPixmapAlphaTest(pos = (5, 0), size = (48, 48), png = 0), MultiContentEntryText(pos = (65, 10), size = (425, 38), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1), ], "fonts": [gFont("Regular", 22)], "itemHeight": 48 } </convert> </widget> <applet type="onLayoutFinish"> # this should be factored out into some helper code, but currently demonstrates applets. from enigma import eSize, ePoint orgwidth = self.instance.size().width() orgheight = self.instance.size().height() orgpos = self.instance.position() textsize = self[&quot;message&quot;].getSize() # y size still must be fixed in font stuff... if self[&quot;message&quot;].getText() != &quot;&quot;: textsize = (textsize[0] + 80, textsize[1] + 60) else: textsize = (textsize[0] + 80, textsize[1] + 4) count = len(self.list) if count &gt; 7: count = 7 offset = 48 * count wsizex = textsize[0] + 80 wsizey = textsize[1] + offset + 20 if (460 &gt; wsizex): wsizex = 460 wsize = (wsizex, wsizey) # resize self.instance.resize(eSize(*wsize)) # resize label self[&quot;message&quot;].instance.resize(eSize(*textsize)) # move list listsize = (wsizex - 20, 48 * count) self[&quot;menu&quot;].downstream_elements.downstream_elements.instance.move(ePoint(10, textsize[1] + 10)) self[&quot;menu&quot;].downstream_elements.downstream_elements.instance.resize(eSize(*listsize)) # center window newwidth = wsize[0] newheight = wsize[1] self.instance.move(ePoint(orgpos.x() + (orgwidth - newwidth)/2, orgpos.y() + (orgheight - newheight)/2)) </applet> </screen>""" def __init__(self, session, message = "", title = "", menulist = [], type = 0, exitid = -1, default = 0, timeout = 0): # type exist for compability... will be ignored Screen.__init__(self, session) self.session = session self.ctitle = title self.exitid = exitid self.default = default self.timeout = timeout self.elapsed = 0 self.list = [] for item in menulist: self.list.append(MessageBoxEntry(item[0], item[1])) self['menu'] = List(self.list) self["menu"].onSelectionChanged.append(self.selectionChanged) self["message"] = Label(message) self["actions"] = ActionMap(["SetupActions"], { "ok": self.ok, "cancel": self.cancel }, -2) self.onLayoutFinish.append(self.layoutFinished) self.timer = eTimer() self.timer.callback.append(self.timeoutStep) if self.timeout > 0: self.timer.start(1000, 1) def selectionChanged(self): self.timer.stop() self.setTitle(self.ctitle) def timeoutStep(self): self.elapsed += 1 if self.elapsed == self.timeout: self.ok() else: self.setTitle("%s - %d" % (self.ctitle, self.timeout - self.elapsed)) self.timer.start(1000, 1) def layoutFinished(self): if self.timeout > 0: self.setTitle("%s - %d" % (self.ctitle, self.timeout)) else: self.setTitle(self.ctitle) self['menu'].setCurrentIndex(self.default) def ok(self): index = self['menu'].getIndex() self.close(index) def cancel(self): if self.exitid > -1: self.close(self.exitid)
gpl-2.0
calcuttj/nuisance
scripts/plotnuismin.py
3
7757
from ROOT import * import os import sys gColorList = [kRed, kGreen, kBlue, kYellow, kOrange, kBlack] def DrawDataMC(keyname, filelist): # Extract Data data = None for readfile in filelist: print keyname data = readfile[0].Get(keyname) if not data: continue break if not data: print "Data not found for : ", keyname sys.exit(-1) # Main Data Formatting data.SetTitle(keyname) data.SetLineColor(kBlack) data.SetLineWidth(2) # Extract MC singlemclist = [] singledatalist = [] for i, mcfile in enumerate(allfiles): print mcfile[0] # Extract individual MC mckey = keyname.replace("_data","_MC") singlemc = mcfile[0].Get(mckey) if singlemc: singlemc = singlemc.Clone(mcfile[1]+"_MC") singlemc.SetLineColor( gColorList[i] ) singlemc.SetLineWidth(2) singlemc.SetTitle( mcfile[1] + " (" + str(singlemc.GetTitle().strip()) + ") " ) singlemclist.append(singlemc.Clone()) del singlemc # Extra individual data (optional) singledata = mcfile[0].Get(keyname) if singledata: singledata = singledata.Clone(mcfile[1] + "_DATA") singledata.SetLineColor( kBlack ) singledata.SetLineWidth(2) singledata.SetTitle( "^-> Saved Data" ) singledatalist.append(singledata.Clone()) del singledata # Assign Ranges miny = 99999.9 maxy = 0.0 for i in range(data.GetNbinsX()): miny = min([data.GetBinContent(i+1) - data.GetBinError(i+1),miny]) maxy = max([data.GetBinContent(i+1) + data.GetBinError(i+1),maxy]) for singlemc in singlemclist: miny = min([singlemc.GetMinimum(),miny]) maxy = max([singlemc.GetMaximum(),maxy]) for singledata in singledatalist: miny = min([singledata.GetMinimum(),miny]) maxy = max([singledata.GetMaximum(),maxy]) widthy = maxy - miny # Assign Ranges to data if "1D" in keyname: data.GetYaxis().SetRangeUser( miny - 0.1*widthy, maxy + 0.3*widthy) elif "2D" in keyname: data.GetZaxis().SetRangeUser( miny - 0.1*widthy, maxy + 0.3*widthy) # Draw Plots 1D if "1D" in keyname: data.Draw("E1") for mc in singlemclist: mc.Draw("SAME HIST") # Draw Plots 2D elif "2D" in keyname: data.Draw("E1") for mc in singlemclist: mc.Draw("SAME LEGO") # Build Legend leg = gPad.BuildLegend(0.45,0.65,0.8,0.85) leg.SetFillStyle(0) leg.SetFillColorAlpha(0,0.0) leg.SetBorderSize(0) gStyle.SetOptTitle(1) gPad.SetGridx(0) gPad.SetGridy(0) gPad.Update() singlemclist.append(data) return singlemclist def DrawFitDialsPlot(allfiles): singlemclist = [] singlelimitlist = [] for i, mcfile in enumerate(allfiles): singlemc = mcfile[0].Get("fit_dials") if not singlemc: continue # Setup fit result singlemc = singlemc.Clone(mcfile[1]+"_FIT") singlemc.SetLineColor( gColorList[i] ) singlemc.SetFillColorAlpha( gColorList[i], 0.3 ) singlemc.SetLineWidth(2) singlemc.SetTitle( mcfile[1] ) singlemclist.append(singlemc.Clone()) del singlemc # Setup Limits singlestart = mcfile[0].Get("start_dials") singlemin = mcfile[0].Get("min_dials") singlemax = mcfile[0].Get("max_dials") print singlestart, singlemin, singlemax singlestart.SetLineColor(gColorList[i]) singlestart.SetLineWidth(1) singlestart.SetLineStyle(7) singlelimitlist.append(singlestart.Clone()) singlemin.SetLineColor(gColorList[i]) singlemin.SetLineWidth(2) singlemin.SetLineStyle(5) singlelimitlist.append(singlemin.Clone()) singlemax.SetLineColor(gColorList[i]) singlemax.SetLineWidth(2) singlemax.SetLineStyle(5) singlelimitlist.append(singlemax.Clone()) # Assign Ranges miny = 99999.9 maxy = 0.0 for singlemc in singlemclist: miny = min([singlemc.GetMinimum(),miny]) maxy = max([singlemc.GetMaximum(),maxy]) for singlelimit in singlelimitlist: miny = min([singlelimit.GetMinimum(),miny]) maxy = max([singlelimit.GetMaximum(),maxy]) widthy = maxy - miny # Assign Ranges to data data = singlemclist[0] data.GetYaxis().SetRangeUser( miny - 0.1*widthy, maxy + 0.3*widthy) # Draw our limits for i, mc in enumerate(singlemclist): if i == 0: mc.Draw("E2") else: mc.Draw("SAME E2") leg = gPad.BuildLegend(0.7,0.8,1.0,1.0) for limit in singlelimitlist: limit.Draw("SAME HIST") for mc in singlemclist: mc.Draw("SAME E2") startline = TLine(0.6,0.6,0.8,0.8) limitline = TLine(0.6,0.6,0.8,0.8) startline.SetLineStyle(7) limitline.SetLineStyle(5) leg.AddEntry(startline, "Start", "l") leg.AddEntry(limitline, "Limits", "l") linehists = [] for mc in singlemclist: mcline = mc.Clone() mcline.SetFillStyle(0) mcline.Draw("SAME HIST") linehists.append(mcline) leg.Draw("SAME") gPad.Update() return [singlemclist, singlelimitlist, linehists, leg] if __name__=="__main__": c1 = TCanvas("c1","c1",800,600) c1.cd() # Make filelist allfiles = [] for i in xrange(2, len(sys.argv)): print "Reading ", i, sys.argv[i] # split by comma splitname = sys.argv[i].split(",") # Get First if (os.path.isfile(splitname[0])): # Get File newfile = (TFile(splitname[0],"READ")) if not newfile: print "File is not a ROOT file : ", splitname[0] sys.exit() # Set Name name = splitname[0].replace(".root","") if len(splitname) > 1: name = splitname[1] allfiles.append([newfile, name]) print allfiles # Parse Unique Keys uniquekeys = [] for readfile in allfiles: for readkey in readfile[0].GetListOfKeys(): if not (readkey.GetName().endswith("_data")): continue if readkey.GetName() not in uniquekeys: uniquekeys.append(readkey.GetName()) print uniquekeys # Setup First Page leg = TLegend(0.1,0.1,0.9,0.9) for i, readfile in enumerate(allfiles): hist = TH1D(readfile[1],readfile[1],1,0,1) hist.SetLineColor(gColorList[i % len(gColorList)]) hist.SetLineWidth(2) leg.AddEntry(hist, readfile[1], "l") leg.Draw() gPad.Update() outputfile = sys.argv[1] c1.Print(outputfile + "(") plotlist = DrawFitDialsPlot(allfiles) c1.Print(outputfile) # Loop through unique keys for readkey in uniquekeys: # Draw datamclist = DrawDataMC(readkey, allfiles) # Save c1.Print(outputfile) # Now save the legend again to close... leg.Draw() gPad.Update() gPad.Print(outputfile + ")")
gpl-3.0
manoj24rana/MobileIPv6
src/fd-net-device/bindings/modulegen__gcc_LP64.py
17
325365
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.fd_net_device', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class] module.add_class('AsciiTraceHelper', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class] module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## system-mutex.h (module 'core'): ns3::CriticalSection [class] module.add_class('CriticalSection', import_from_module='ns.core') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate', import_from_module='ns.network') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## pcap-file.h (module 'network'): ns3::PcapFile [class] module.add_class('PcapFile', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelper [class] module.add_class('PcapHelper', import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration] module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network') ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class] module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## system-condition.h (module 'core'): ns3::SystemCondition [class] module.add_class('SystemCondition', import_from_module='ns.core') ## system-mutex.h (module 'core'): ns3::SystemMutex [class] module.add_class('SystemMutex', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper [class] module.add_class('FdNetDeviceHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']]) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class] module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## system-thread.h (module 'core'): ns3::SystemThread [class] module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper [class] module.add_class('EmuFdNetDeviceHelper', parent=root_module['ns3::FdNetDeviceHelper']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## unix-fd-reader.h (module 'core'): ns3::FdReader [class] module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## net-device.h (module 'network'): ns3::NetDeviceQueue [class] module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class] module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## net-device.h (module 'network'): ns3::QueueItem [class] module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper [class] module.add_class('TapFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice [class] module.add_class('FdNetDevice', parent=root_module['ns3::NetDevice']) ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode [enumeration] module.add_enum('EncapsulationMode', ['DIX', 'LLC', 'DIXPI'], outer_class=root_module['ns3::FdNetDevice']) ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader [class] module.add_class('FdNetDeviceFdReader', parent=root_module['ns3::FdReader']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper']) register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile']) register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper']) register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition']) register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3FdNetDeviceHelper_methods(root_module, root_module['ns3::FdNetDeviceHelper']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EmuFdNetDeviceHelper_methods(root_module, root_module['ns3::EmuFdNetDeviceHelper']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue']) register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem']) register_Ns3TapFdNetDeviceHelper_methods(root_module, root_module['ns3::TapFdNetDeviceHelper']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3FdNetDevice_methods(root_module, root_module['ns3::FdNetDevice']) register_Ns3FdNetDeviceFdReader_methods(root_module, root_module['ns3::FdNetDeviceFdReader']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AsciiTraceHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function] cls.add_method('CreateFileStream', 'ns3::Ptr< ns3::OutputStreamWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')]) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function] cls.add_method('EnableAsciiAll', 'void', [param('std::string', 'prefix')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('EnableAsciiAll', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3CriticalSection_methods(root_module, cls): ## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor] cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')]) ## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor] cls.add_constructor([param('ns3::SystemMutex &', 'mutex')]) return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBitsTxTime(uint32_t bits) const [member function] cls.add_method('CalculateBitsTxTime', 'ns3::Time', [param('uint32_t', 'bits')], is_const=True) ## data-rate.h (module 'network'): ns3::Time ns3::DataRate::CalculateBytesTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateBytesTxTime', 'ns3::Time', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], deprecated=True, is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PcapFile_methods(root_module, cls): ## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor] cls.add_constructor([]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t & packets, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function] cls.add_method('Diff', 'bool', [param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t &', 'packets'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')], is_static=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function] cls.add_method('GetSwapMode', 'bool', []) ## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false, bool nanosecMode=false) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false'), param('bool', 'nanosecMode', default_value='false')]) ## pcap-file.h (module 'network'): bool ns3::PcapFile::IsNanoSecMode() [member function] cls.add_method('IsNanoSecMode', 'bool', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function] cls.add_method('Read', 'void', [param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable] cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True) ## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable] cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True) return def register_Ns3PcapHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=0) [member function] cls.add_method('CreateFile', 'ns3::Ptr< ns3::PcapFileWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='0')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3PcapHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function] cls.add_method('EnablePcapAll', 'void', [param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'delay')], is_static=True) return def register_Ns3SystemCondition_methods(root_module, cls): ## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')]) ## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor] cls.add_constructor([]) ## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function] cls.add_method('Broadcast', 'void', []) ## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function] cls.add_method('GetCondition', 'bool', []) ## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function] cls.add_method('SetCondition', 'void', [param('bool', 'condition')]) ## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function] cls.add_method('Signal', 'void', []) ## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function] cls.add_method('TimedWait', 'bool', [param('uint64_t', 'ns')]) ## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function] cls.add_method('Wait', 'void', []) return def register_Ns3SystemMutex_methods(root_module, cls): ## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')]) ## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor] cls.add_constructor([]) ## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function] cls.add_method('Lock', 'void', []) ## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function] cls.add_method('Unlock', 'void', []) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3FdNetDeviceHelper_methods(root_module, cls): ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper(ns3::FdNetDeviceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::FdNetDeviceHelper const &', 'arg0')]) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper() [constructor] cls.add_constructor([]) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, is_virtual=True) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(std::string name) const [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('std::string', 'name')], is_const=True, is_virtual=True) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::NodeContainer const & c) const [member function] cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer const &', 'c')], is_const=True, is_virtual=True) ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')]) ## fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::FdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('InstallPriv', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='protected', is_virtual=True) ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], visibility='private', is_virtual=True) ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], visibility='private', is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PcapFileWrapper_methods(root_module, cls): ## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor] cls.add_constructor([]) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')]) ## pcap-file-wrapper.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PcapFileWrapper::Read(ns3::Time & t) [member function] cls.add_method('Read', 'ns3::Ptr< ns3::Packet >', [param('ns3::Time &', 't')]) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SystemThread_methods(root_module, cls): ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemThread const &', 'arg0')]) ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor] cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function] cls.add_method('Equals', 'bool', [param('pthread_t', 'id')], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function] cls.add_method('Join', 'void', []) ## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function] cls.add_method('Self', 'pthread_t', [], is_static=True) ## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EmuFdNetDeviceHelper_methods(root_module, cls): ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper(ns3::EmuFdNetDeviceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmuFdNetDeviceHelper const &', 'arg0')]) ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper() [constructor] cls.add_constructor([]) ## emu-fd-net-device-helper.h (module 'fd-net-device'): std::string ns3::EmuFdNetDeviceHelper::GetDeviceName() [member function] cls.add_method('GetDeviceName', 'std::string', []) ## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetDeviceName(std::string deviceName) [member function] cls.add_method('SetDeviceName', 'void', [param('std::string', 'deviceName')]) ## emu-fd-net-device-helper.h (module 'fd-net-device'): int ns3::EmuFdNetDeviceHelper::CreateFileDescriptor() const [member function] cls.add_method('CreateFileDescriptor', 'int', [], is_const=True, visibility='protected', is_virtual=True) ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::EmuFdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('InstallPriv', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='protected', is_virtual=True) ## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr<ns3::FdNetDevice> device) const [member function] cls.add_method('SetFileDescriptor', 'void', [param('ns3::Ptr< ns3::FdNetDevice >', 'device')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3FdReader_methods(root_module, cls): ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::FdReader const &', 'arg0')]) ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor] cls.add_constructor([]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function] cls.add_method('Start', 'void', [param('int', 'fd'), param('ns3::Callback< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')]) ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function] cls.add_method('Stop', 'void', []) ## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NetDeviceQueue_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::HasWakeCallbackSet() const [member function] cls.add_method('HasWakeCallbackSet', 'bool', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function] cls.add_method('IsStopped', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetWakeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function] cls.add_method('Start', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function] cls.add_method('Stop', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function] cls.add_method('Wake', 'void', [], is_virtual=True) return def register_Ns3NetDeviceQueueInterface_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetSelectedQueue(ns3::Ptr<ns3::QueueItem> item) const [member function] cls.add_method('GetSelectedQueue', 'uint8_t', [param('ns3::Ptr< ns3::QueueItem >', 'item')], is_const=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function] cls.add_method('GetTxQueue', 'ns3::Ptr< ns3::NetDeviceQueue >', [param('uint8_t', 'i')], is_const=True) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetTxQueuesN() const [member function] cls.add_method('GetTxQueuesN', 'uint8_t', [], is_const=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDeviceQueueInterface::IsQueueDiscInstalled() const [member function] cls.add_method('IsQueueDiscInstalled', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetQueueDiscInstalled(bool installed) [member function] cls.add_method('SetQueueDiscInstalled', 'void', [param('bool', 'installed')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetSelectQueueCallback', 'void', [param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function] cls.add_method('SetTxQueuesN', 'void', [param('uint8_t', 'numTxQueues')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function] cls.add_method('GetLocalTime', 'ns3::Time', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3QueueItem_methods(root_module, cls): cls.add_output_stream_operator() ## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')]) ## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function] cls.add_method('GetPacketSize', 'uint32_t', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) return def register_Ns3TapFdNetDeviceHelper_methods(root_module, cls): ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper(ns3::TapFdNetDeviceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::TapFdNetDeviceHelper const &', 'arg0')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper() [constructor] cls.add_constructor([]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetModePi(bool pi) [member function] cls.add_method('SetModePi', 'void', [param('bool', 'pi')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Address(ns3::Ipv4Address address) [member function] cls.add_method('SetTapIpv4Address', 'void', [param('ns3::Ipv4Address', 'address')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Mask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetTapIpv4Mask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Address(ns3::Ipv6Address address) [member function] cls.add_method('SetTapIpv6Address', 'void', [param('ns3::Ipv6Address', 'address')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Prefix(int prefix) [member function] cls.add_method('SetTapIpv6Prefix', 'void', [param('int', 'prefix')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapMacAddress(ns3::Mac48Address mac) [member function] cls.add_method('SetTapMacAddress', 'void', [param('ns3::Mac48Address', 'mac')]) ## tap-fd-net-device-helper.h (module 'fd-net-device'): int ns3::TapFdNetDeviceHelper::CreateFileDescriptor() const [member function] cls.add_method('CreateFileDescriptor', 'int', [], is_const=True, visibility='protected', is_virtual=True) ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::TapFdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('InstallPriv', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='protected', is_virtual=True) ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr<ns3::FdNetDevice> device) const [member function] cls.add_method('SetFileDescriptor', 'void', [param('ns3::Ptr< ns3::FdNetDevice >', 'device')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3FdNetDevice_methods(root_module, cls): ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::FdNetDevice() [constructor] cls.add_constructor([]) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Ptr<ns3::Channel> ns3::FdNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode ns3::FdNetDevice::GetEncapsulationMode() const [member function] cls.add_method('GetEncapsulationMode', 'ns3::FdNetDevice::EncapsulationMode', [], is_const=True) ## fd-net-device.h (module 'fd-net-device'): uint32_t ns3::FdNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): uint16_t ns3::FdNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): ns3::Ptr<ns3::Node> ns3::FdNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): static ns3::TypeId ns3::FdNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetEncapsulationMode(ns3::FdNetDevice::EncapsulationMode mode) [member function] cls.add_method('SetEncapsulationMode', 'void', [param('ns3::FdNetDevice::EncapsulationMode', 'mode')]) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetFileDescriptor(int fd) [member function] cls.add_method('SetFileDescriptor', 'void', [param('int', 'fd')]) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsBroadcast(bool broadcast) [member function] cls.add_method('SetIsBroadcast', 'void', [param('bool', 'broadcast')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsMulticast(bool multicast) [member function] cls.add_method('SetIsMulticast', 'void', [param('bool', 'multicast')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Start(ns3::Time tStart) [member function] cls.add_method('Start', 'void', [param('ns3::Time', 'tStart')]) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Stop(ns3::Time tStop) [member function] cls.add_method('Stop', 'void', [param('ns3::Time', 'tStop')]) ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3FdNetDeviceFdReader_methods(root_module, cls): ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader(ns3::FdNetDeviceFdReader const & arg0) [copy constructor] cls.add_constructor([param('ns3::FdNetDeviceFdReader const &', 'arg0')]) ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader() [constructor] cls.add_constructor([]) ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDeviceFdReader::SetBufferSize(uint32_t bufferSize) [member function] cls.add_method('SetBufferSize', 'void', [param('uint32_t', 'bufferSize')]) ## fd-net-device.h (module 'fd-net-device'): ns3::FdReader::Data ns3::FdNetDeviceFdReader::DoRead() [member function] cls.add_method('DoRead', 'ns3::FdReader::Data', [], visibility='private', is_virtual=True) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
gangadhar-kadam/nassimlib
webnotes/utils/datautils.py
34
4907
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import webnotes from webnotes import msgprint, _ import json import csv, cStringIO from webnotes.utils import encode, cstr, cint, flt def read_csv_content_from_uploaded_file(ignore_encoding=False): if getattr(webnotes, "uploaded_file", None): with open(webnotes.uploaded_file, "r") as upfile: fcontent = upfile.read() else: from webnotes.utils.file_manager import get_uploaded_content fname, fcontent = get_uploaded_content() return read_csv_content(fcontent, ignore_encoding) def read_csv_content_from_attached_file(doc): fileid = webnotes.conn.get_value("File Data", {"attached_to_doctype": doc.doctype, "attached_to_name":doc.name}, "name") if not fileid: msgprint("File not attached!") raise Exception try: from webnotes.utils.file_manager import get_file fname, fcontent = get_file(fileid) return read_csv_content(fcontent, webnotes.form_dict.get('ignore_encoding_errors')) except Exception, e: webnotes.msgprint("""Unable to open attached file. Please try again.""") raise Exception def read_csv_content(fcontent, ignore_encoding=False): rows = [] decoded = False for encoding in ["utf-8", "windows-1250", "windows-1252"]: try: fcontent = unicode(fcontent, encoding) decoded = True break except UnicodeDecodeError, e: continue if not decoded: webnotes.msgprint(webnotes._("Unknown file encoding. Tried utf-8, windows-1250, windows-1252."), raise_exception=True) try: reader = csv.reader(fcontent.encode("utf-8").splitlines(True)) # decode everything rows = [[unicode(val, "utf-8").strip() for val in row] for row in reader] return rows except Exception, e: webnotes.msgprint("Not a valid Comma Separated Value (CSV File)") raise @webnotes.whitelist() def send_csv_to_client(args): if isinstance(args, basestring): args = json.loads(args) args = webnotes._dict(args) webnotes.response["result"] = cstr(to_csv(args.data)) webnotes.response["doctype"] = args.filename webnotes.response["type"] = "csv" def to_csv(data): writer = UnicodeWriter() for row in data: writer.writerow(row) return writer.getvalue() class UnicodeWriter: def __init__(self, encoding="utf-8"): self.encoding = encoding self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, quoting=csv.QUOTE_NONNUMERIC) def writerow(self, row): row = encode(row, self.encoding) self.writer.writerow(row) def getvalue(self): return self.queue.getvalue() def check_record(d, parenttype=None, doctype_dl=None): """check for mandatory, select options, dates. these should ideally be in doclist""" from webnotes.utils.dateutils import parse_date if parenttype and not d.get('parent'): webnotes.msgprint(_("Parent is required."), raise_exception=1) if not doctype_dl: doctype_dl = webnotes.model.doctype.get(d.doctype) for key in d: docfield = doctype_dl.get_field(key) val = d[key] if docfield: if docfield.reqd and (val=='' or val==None): webnotes.msgprint("%s is mandatory." % docfield.label, raise_exception=1) if docfield.fieldtype=='Select' and val and docfield.options: if docfield.options.startswith('link:'): link_doctype = docfield.options.split(':')[1] if not webnotes.conn.exists(link_doctype, val): webnotes.msgprint("%s: %s must be a valid %s" % (docfield.label, val, link_doctype), raise_exception=1) elif docfield.options == "attach_files:": pass elif val not in docfield.options.split('\n'): webnotes.msgprint("%s must be one of: %s" % (docfield.label, ", ".join(filter(None, docfield.options.split("\n")))), raise_exception=1) if val and docfield.fieldtype=='Date': d[key] = parse_date(val) elif val and docfield.fieldtype in ["Int", "Check"]: d[key] = cint(val) elif val and docfield.fieldtype in ["Currency", "Float"]: d[key] = flt(val) def import_doc(d, doctype, overwrite, row_idx, submit=False, ignore_links=False): """import main (non child) document""" if d.get("name") and webnotes.conn.exists(doctype, d['name']): if overwrite: bean = webnotes.bean(doctype, d['name']) bean.ignore_links = ignore_links bean.doc.fields.update(d) if d.get("docstatus") == 1: bean.update_after_submit() else: bean.save() return 'Updated row (#%d) %s' % (row_idx + 1, getlink(doctype, d['name'])) else: return 'Ignored row (#%d) %s (exists)' % (row_idx + 1, getlink(doctype, d['name'])) else: bean = webnotes.bean([d]) bean.ignore_links = ignore_links bean.insert() if submit: bean.submit() return 'Inserted row (#%d) %s' % (row_idx + 1, getlink(doctype, bean.doc.fields['name'])) def getlink(doctype, name): return '<a href="#Form/%(doctype)s/%(name)s">%(name)s</a>' % locals()
mit
appsembler/django-souvenirs
setup.py
1
1248
from __future__ import absolute_import, unicode_literals from codecs import open import os from setuptools import setup, find_packages import souvenirs as app def read(fname): top = os.path.dirname(__file__) with open(os.path.join(top, fname), encoding='utf-8') as f: return f.read() setup( name="django-souvenirs", version=app.__version__, description='Django app for efficiently measuring usage', long_description=read('README.rst'), license='MIT', platforms=['OS Independent'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='django reusable souvenirs metrics usage tracking'.split(), author='Aron Griffis', author_email='aron@scampersand.com', url="https://github.com/appsembler/django-souvenirs", packages=find_packages(), install_requires=['tabulate'], )
mit
vwvww/servo
tests/wpt/mozilla/tests/webgl/conformance-2.0.0/py/lint/lint.py
47
6937
#! /usr/bin/env python import os import subprocess import re import sys import fnmatch import commands from collections import defaultdict from optparse import OptionParser lint_root = os.path.dirname(os.path.abspath(__file__)) repo_root = os.path.dirname(os.path.dirname(lint_root)) def git(command, *args): args = list(args) proc_kwargs = {"cwd": repo_root} command_line = ["git", command] + args try: return subprocess.check_output(command_line, **proc_kwargs) except subprocess.CalledProcessError: raise def iter_files(flag=False, floder=""): if floder != "" and floder != None: os.chdir(repo_root) for pardir, subdir, files in os.walk(floder): for item in subdir + files: if not os.path.isdir(os.path.join(pardir, item)): yield os.path.join(pardir, item) os.chdir(lint_root) else: if not flag: os.chdir(repo_root) for pardir, subdir, files in os.walk(repo_root): for item in subdir + files: if not os.path.isdir(os.path.join(pardir, item)): yield os.path.join(pardir, item).split(repo_root + "/")[1] os.chdir(lint_root) else: for item in git("diff", "--name-status", "HEAD~1").strip().split("\n"): status = item.split("\t") if status[0].strip() != "D": yield status[1] def check_filename_space(path): bname = os.path.basename(path) if re.compile(" ").search(bname): return [("FILENAME WHITESPACE", "Filename of %s contains white space" % path, None)] return [] def check_permission(path): bname = os.path.basename(path) if not re.compile('\.py$|\.sh$').search(bname): if os.access(os.path.join(repo_root, path), os.X_OK): return [("UNNECESSARY EXECUTABLE PERMISSION", "%s contains unnecessary executable permission" % path, None)] return [] def parse_whitelist_file(filename): data = defaultdict(lambda:defaultdict(set)) with open(filename) as f: for line in f: line = line.strip() if not line or line.startswith("#"): continue parts = [item.strip() for item in line.split(":")] if len(parts) == 2: parts.append(None) else: parts[-1] = int(parts[-1]) error_type, file_match, line_number = parts data[file_match][error_type].add(line_number) def inner(path, errors): whitelisted = [False for item in xrange(len(errors))] for file_match, whitelist_errors in data.iteritems(): if fnmatch.fnmatch(path, file_match): for i, (error_type, msg, line) in enumerate(errors): if "*" in whitelist_errors: whitelisted[i] = True elif error_type in whitelist_errors: allowed_lines = whitelist_errors[error_type] if None in allowed_lines or line in allowed_lines: whitelisted[i] = True return [item for i, item in enumerate(errors) if not whitelisted[i]] return inner _whitelist_fn = None def whitelist_errors(path, errors): global _whitelist_fn if _whitelist_fn is None: _whitelist_fn = parse_whitelist_file(os.path.join(lint_root, "lint.whitelist")) return _whitelist_fn(path, errors) class Regexp(object): pattern = None file_extensions = None error = None _re = None def __init__(self): self._re = re.compile(self.pattern) def applies(self, path): return (self.file_extensions is None or os.path.splitext(path)[1] in self.file_extensions) def search(self, line): return self._re.search(line) class TrailingWhitespaceRegexp(Regexp): pattern = " $" error = "TRAILING WHITESPACE" class TabsRegexp(Regexp): pattern = "^\t" error = "INDENT TABS" class CRRegexp(Regexp): pattern = "\r$" error = "CR AT EOL" regexps = [item() for item in [TrailingWhitespaceRegexp, TabsRegexp, CRRegexp]] def check_regexp_line(path, f): errors = [] applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)] for i, line in enumerate(f): for regexp in applicable_regexps: if regexp.search(line): errors.append((regexp.error, "%s line %i" % (path, i+1), i+1)) return errors def output_errors(errors): for error_type, error, line_number in errors: print "%s: %s" % (error_type, error) def output_error_count(error_count): if not error_count: return by_type = " ".join("%s: %d" % item for item in error_count.iteritems()) count = sum(error_count.values()) if count == 1: print "There was 1 error (%s)" % (by_type,) else: print "There were %d errors (%s)" % (count, by_type) def main(): global repo_root error_count = defaultdict(int) parser = OptionParser() parser.add_option('-p', '--pull', dest="pull_request", action='store_true', default=False) parser.add_option("-d", '--dir', dest="dir", help="specify the checking dir, e.g. tools") parser.add_option("-r", '--repo', dest="repo", help="specify the repo, e.g. WebGL") options, args = parser.parse_args() if options.pull_request == True: options.pull_request = "WebGL" repo_root = repo_root.replace("WebGL/sdk/tests", options.pull_request) if options.repo == "" or options.repo == None: options.repo = "WebGL/sdk/tests" repo_root = repo_root.replace("WebGL/sdk/tests", options.repo) def run_lint(path, fn, *args): errors = whitelist_errors(path, fn(path, *args)) output_errors(errors) for error_type, error, line in errors: error_count[error_type] += 1 for path in iter_files(options.pull_request, options.dir): abs_path = os.path.join(repo_root, path) if not os.path.exists(abs_path): continue for path_fn in file_path_lints: run_lint(path, path_fn) for state_fn in file_state_lints: run_lint(path, state_fn) if not os.path.isdir(abs_path): if re.compile('\.html$|\.htm$|\.xhtml$|\.xhtm$|\.frag$|\.vert$|\.js$').search(abs_path): with open(abs_path) as f: for file_fn in file_content_lints: run_lint(path, file_fn, f) f.seek(0) output_error_count(error_count) return sum(error_count.itervalues()) file_path_lints = [check_filename_space] file_content_lints = [check_regexp_line] file_state_lints = [check_permission] if __name__ == "__main__": error_count = main() if error_count > 0: sys.exit(1)
mpl-2.0
T-R0D/JustForFun
adventofcode/Day22/wizard_simulator_20xx.py
1
7445
import copy class GameState(object): def __init__(self, player, boss): self.turn = 0 self.mana_used = 0 self.player = copy.deepcopy(player) self.boss = copy.deepcopy(boss) self.shield_effect = 0 self.poison_effect = 0 self.recharge_effect = 0 self.history = [] @classmethod def from_state(cls, other): new_state = cls(other.player, other.boss) new_state.turn = other.turn new_state.mana_used = other.mana_used new_state.shield_effect = other.shield_effect new_state.poison_effect = other.poison_effect new_state.recharge_effect = other.recharge_effect new_state.history = copy.deepcopy(other.history) return new_state def apply_effects(self): if self.shield_effect: self.shield_effect -= 1 if self.poison_effect: self.boss.hp -= 3 self.poison_effect -= 1 self.history.append("Poison does 3 damage to boss") if self.recharge_effect: self.player.mana += 101 self.recharge_effect -= 1 self.history.append("Recharge gives player 101 mana") def magic_missile(self): new_state = GameState.from_state(self) new_state.mana_used += 53 new_state.player.mana -= 53 new_state.boss.hp -= 4 new_state.turn += 1 new_state.history.append("Magic Missile does 4 damage to boss") return new_state def drain(self): new_state = GameState.from_state(self) new_state.mana_used += 73 new_state.player.mana -= 73 new_state.player.hp += 2 new_state.boss.hp -= 2 new_state.turn += 1 new_state.history.append("Drain transfers 2 hp from boss to player") return new_state def shield(self): new_state = GameState.from_state(self) new_state.mana_used += 113 new_state.player.mana -= 113 new_state.shield_effect = 6 new_state.turn += 1 new_state.history.append("Player casts Shield") return new_state def poison(self): new_state = GameState.from_state(self) new_state.mana_used += 173 new_state.player.mana -= 173 new_state.poison_effect = 6 new_state.turn += 1 new_state.history.append("Player casts Poison") return new_state def recharge(self): new_state = GameState.from_state(self) new_state.mana_used += 229 new_state.player.mana -= 229 new_state.recharge_effect = 5 new_state.turn += 1 new_state.history.append("Player casts Recharge") return new_state def boss_attack(self): new_state = GameState.from_state(self) damage = new_state.boss.attack if self.shield_effect: damage -= 7 new_state.player.hp -= damage new_state.turn += 1 new_state.history.append("Boss does {} damage to Player".format(damage)) return new_state def __str__(self): return "turn: {} {} - player: {} {} - boss: {} - {} {} {}".format( self.turn, self.mana_used, self.player.hp, self.player.mana, self.boss.hp, self.shield_effect, self.poison_effect, self.recharge_effect) class Player(object): def __init__(self, hp, mana, armor): self.hp = int(hp) self.mana = int(mana) self.armor = int(armor) class Boss(object): def __init__(self, hp, attack): self.hp = int(hp) self.attack = int(attack) def main(): boss = None with open('input.txt') as input_file: parts = input_file.read().replace(' ', '').replace('\n', ':').split(':') boss = Boss(parts[1], parts[3]) part_1(boss) part_2(boss) def part_1(boss): initial_state = GameState(Player(50, 500, 0), boss) stack = [initial_state] best = 9999 explored = set() while stack: current_state = stack.pop() current_state.history.append(str(current_state)) if str(current_state) in explored or current_state.mana_used > best or \ current_state.player.hp <= 0: continue explored.add(str(current_state)) if current_state.boss.hp <= 0: if current_state.mana_used < best: best = current_state.mana_used else: current_state.apply_effects() if current_state.boss.hp <= 0: if current_state.mana_used < best: best = current_state.mana_used if current_state.turn % 2 == 0: available_mana = current_state.player.mana if available_mana > 53: stack.append(current_state.magic_missile()) if available_mana > 73: stack.append(current_state.drain()) if available_mana > 113 and current_state.shield_effect == 0: stack.append(current_state.shield()) if available_mana > 173 and current_state.poison_effect == 0: stack.append(current_state.poison()) if available_mana > 229 and current_state.recharge_effect == 0: stack.append(current_state.recharge()) else: stack.append(current_state.boss_attack()) print("The boss can be defeated spending {} mana.".format(best)) def part_2(boss): initial_state = GameState(Player(50, 500, 0), boss) stack = [initial_state] best = 999999999999999 explored = set() while stack: current_state = stack.pop() current_state.history.append(str(current_state)) if str(current_state) in explored or current_state.mana_used > best or \ current_state.player.hp <= 0: continue explored.add(str(current_state)) if current_state.boss.hp <= 0 and current_state.mana_used < best: best = current_state.mana_used else: if current_state.turn % 2 == 0: current_state.player.hp -= 1 if current_state.player.hp <= 0: continue current_state.apply_effects() if current_state.boss.hp <= 0 and current_state.mana_used < best: best = current_state.mana_used available_mana = current_state.player.mana if available_mana > 113 and current_state.shield_effect == 0: stack.append(current_state.shield()) if available_mana > 229 and current_state.recharge_effect == 0: stack.append(current_state.recharge()) if available_mana > 173 and current_state.poison_effect == 0: stack.append(current_state.poison()) if available_mana > 73: stack.append(current_state.drain()) if available_mana > 53: stack.append(current_state.magic_missile()) else: current_state.apply_effects() if current_state.boss.hp <= 0 and current_state.mana_used < best: best = current_state.mana_used stack.append(current_state.boss_attack()) print("The boss can be defeated spending {} mana in hard mode.".format(best)) if __name__ == '__main__': main()
gpl-2.0
hottwaj/django
django/db/backends/postgresql/client.py
346
2112
import os import subprocess from django.core.files.temp import NamedTemporaryFile from django.db.backends.base.client import BaseDatabaseClient from django.utils.six import print_ def _escape_pgpass(txt): """ Escape a fragment of a PostgreSQL .pgpass file. """ return txt.replace('\\', '\\\\').replace(':', '\\:') class DatabaseClient(BaseDatabaseClient): executable_name = 'psql' @classmethod def runshell_db(cls, settings_dict): args = [cls.executable_name] host = settings_dict.get('HOST', '') port = settings_dict.get('PORT', '') name = settings_dict.get('NAME', '') user = settings_dict.get('USER', '') passwd = settings_dict.get('PASSWORD', '') if user: args += ['-U', user] if host: args += ['-h', host] if port: args += ['-p', str(port)] args += [name] temp_pgpass = None try: if passwd: # Create temporary .pgpass file. temp_pgpass = NamedTemporaryFile(mode='w+') try: print_( _escape_pgpass(host) or '*', str(port) or '*', _escape_pgpass(name) or '*', _escape_pgpass(user) or '*', _escape_pgpass(passwd), file=temp_pgpass, sep=':', flush=True, ) os.environ['PGPASSFILE'] = temp_pgpass.name except UnicodeEncodeError: # If the current locale can't encode the data, we let # the user input the password manually. pass subprocess.call(args) finally: if temp_pgpass: temp_pgpass.close() if 'PGPASSFILE' in os.environ: # unit tests need cleanup del os.environ['PGPASSFILE'] def runshell(self): DatabaseClient.runshell_db(self.connection.settings_dict)
bsd-3-clause
adrienbrault/home-assistant
homeassistant/components/stiebel_eltron/climate.py
5
5661
"""Support for stiebel_eltron climate platform.""" import logging from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_ECO, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from . import DOMAIN as STE_DOMAIN DEPENDENCIES = ["stiebel_eltron"] _LOGGER = logging.getLogger(__name__) PRESET_DAY = "day" PRESET_SETBACK = "setback" PRESET_EMERGENCY = "emergency" SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE SUPPORT_HVAC = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF] SUPPORT_PRESET = [PRESET_ECO, PRESET_DAY, PRESET_EMERGENCY, PRESET_SETBACK] # Mapping STIEBEL ELTRON states to homeassistant states/preset. STE_TO_HA_HVAC = { "AUTOMATIC": HVAC_MODE_AUTO, "MANUAL MODE": HVAC_MODE_HEAT, "STANDBY": HVAC_MODE_AUTO, "DAY MODE": HVAC_MODE_AUTO, "SETBACK MODE": HVAC_MODE_AUTO, "DHW": HVAC_MODE_OFF, "EMERGENCY OPERATION": HVAC_MODE_AUTO, } STE_TO_HA_PRESET = { "STANDBY": PRESET_ECO, "DAY MODE": PRESET_DAY, "SETBACK MODE": PRESET_SETBACK, "EMERGENCY OPERATION": PRESET_EMERGENCY, } HA_TO_STE_HVAC = { HVAC_MODE_AUTO: "AUTOMATIC", HVAC_MODE_HEAT: "MANUAL MODE", HVAC_MODE_OFF: "DHW", } HA_TO_STE_PRESET = {k: i for i, k in STE_TO_HA_PRESET.items()} def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the StiebelEltron platform.""" name = hass.data[STE_DOMAIN]["name"] ste_data = hass.data[STE_DOMAIN]["ste_data"] add_entities([StiebelEltron(name, ste_data)], True) class StiebelEltron(ClimateEntity): """Representation of a STIEBEL ELTRON heat pump.""" def __init__(self, name, ste_data): """Initialize the unit.""" self._name = name self._target_temperature = None self._current_temperature = None self._current_humidity = None self._operation = None self._filter_alarm = None self._force_update = False self._ste_data = ste_data @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS def update(self): """Update unit attributes.""" self._ste_data.update(no_throttle=self._force_update) self._force_update = False self._target_temperature = self._ste_data.api.get_target_temp() self._current_temperature = self._ste_data.api.get_current_temp() self._current_humidity = self._ste_data.api.get_current_humidity() self._filter_alarm = self._ste_data.api.get_filter_alarm_status() self._operation = self._ste_data.api.get_operation() _LOGGER.debug( "Update %s, current temp: %s", self._name, self._current_temperature ) @property def extra_state_attributes(self): """Return device specific state attributes.""" return {"filter_alarm": self._filter_alarm} @property def name(self): """Return the name of the climate device.""" return self._name # Handle SUPPORT_TARGET_TEMPERATURE @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def current_temperature(self): """Return the current temperature.""" return self._current_temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._target_temperature @property def target_temperature_step(self): """Return the supported step of target temperature.""" return 0.1 @property def min_temp(self): """Return the minimum temperature.""" return 10.0 @property def max_temp(self): """Return the maximum temperature.""" return 30.0 @property def current_humidity(self): """Return the current humidity.""" return float(f"{self._current_humidity:.1f}") @property def hvac_modes(self): """List of the operation modes.""" return SUPPORT_HVAC @property def hvac_mode(self): """Return current operation ie. heat, cool, idle.""" return STE_TO_HA_HVAC.get(self._operation) @property def preset_mode(self): """Return the current preset mode, e.g., home, away, temp.""" return STE_TO_HA_PRESET.get(self._operation) @property def preset_modes(self): """Return a list of available preset modes.""" return SUPPORT_PRESET def set_hvac_mode(self, hvac_mode): """Set new operation mode.""" if self.preset_mode: return new_mode = HA_TO_STE_HVAC.get(hvac_mode) _LOGGER.debug("set_hvac_mode: %s -> %s", self._operation, new_mode) self._ste_data.api.set_operation(new_mode) self._force_update = True def set_temperature(self, **kwargs): """Set new target temperature.""" target_temperature = kwargs.get(ATTR_TEMPERATURE) if target_temperature is not None: _LOGGER.debug("set_temperature: %s", target_temperature) self._ste_data.api.set_target_temp(target_temperature) self._force_update = True def set_preset_mode(self, preset_mode: str): """Set new preset mode.""" new_mode = HA_TO_STE_PRESET.get(preset_mode) _LOGGER.debug("set_hvac_mode: %s -> %s", self._operation, new_mode) self._ste_data.api.set_operation(new_mode) self._force_update = True
mit
bowang/tensorflow
tensorflow/tools/dist_test/python/census_widendeep.py
42
11900
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distributed training and evaluation of a wide and deep model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import json import os import sys from six.moves import urllib import tensorflow as tf from tensorflow.contrib.learn.python.learn import learn_runner from tensorflow.contrib.learn.python.learn.estimators import run_config # Constants: Data download URLs TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data" TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test" # Define features for the model def census_model_config(): """Configuration for the census Wide & Deep model. Returns: columns: Column names to retrieve from the data source label_column: Name of the label column wide_columns: List of wide columns deep_columns: List of deep columns categorical_column_names: Names of the categorical columns continuous_column_names: Names of the continuous columns """ # 1. Categorical base columns. gender = tf.contrib.layers.sparse_column_with_keys( column_name="gender", keys=["female", "male"]) race = tf.contrib.layers.sparse_column_with_keys( column_name="race", keys=["Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "Other", "White"]) education = tf.contrib.layers.sparse_column_with_hash_bucket( "education", hash_bucket_size=1000) marital_status = tf.contrib.layers.sparse_column_with_hash_bucket( "marital_status", hash_bucket_size=100) relationship = tf.contrib.layers.sparse_column_with_hash_bucket( "relationship", hash_bucket_size=100) workclass = tf.contrib.layers.sparse_column_with_hash_bucket( "workclass", hash_bucket_size=100) occupation = tf.contrib.layers.sparse_column_with_hash_bucket( "occupation", hash_bucket_size=1000) native_country = tf.contrib.layers.sparse_column_with_hash_bucket( "native_country", hash_bucket_size=1000) # 2. Continuous base columns. age = tf.contrib.layers.real_valued_column("age") age_buckets = tf.contrib.layers.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) education_num = tf.contrib.layers.real_valued_column("education_num") capital_gain = tf.contrib.layers.real_valued_column("capital_gain") capital_loss = tf.contrib.layers.real_valued_column("capital_loss") hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week") wide_columns = [ gender, native_country, education, occupation, workclass, marital_status, relationship, age_buckets, tf.contrib.layers.crossed_column([education, occupation], hash_bucket_size=int(1e4)), tf.contrib.layers.crossed_column([native_country, occupation], hash_bucket_size=int(1e4)), tf.contrib.layers.crossed_column([age_buckets, race, occupation], hash_bucket_size=int(1e6))] deep_columns = [ tf.contrib.layers.embedding_column(workclass, dimension=8), tf.contrib.layers.embedding_column(education, dimension=8), tf.contrib.layers.embedding_column(marital_status, dimension=8), tf.contrib.layers.embedding_column(gender, dimension=8), tf.contrib.layers.embedding_column(relationship, dimension=8), tf.contrib.layers.embedding_column(race, dimension=8), tf.contrib.layers.embedding_column(native_country, dimension=8), tf.contrib.layers.embedding_column(occupation, dimension=8), age, education_num, capital_gain, capital_loss, hours_per_week] # Define the column names for the data sets. columns = ["age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket"] label_column = "label" categorical_columns = ["workclass", "education", "marital_status", "occupation", "relationship", "race", "gender", "native_country"] continuous_columns = ["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"] return (columns, label_column, wide_columns, deep_columns, categorical_columns, continuous_columns) class CensusDataSource(object): """Source of census data.""" def __init__(self, data_dir, train_data_url, test_data_url, columns, label_column, categorical_columns, continuous_columns): """Constructor of CensusDataSource. Args: data_dir: Directory to save/load the data files train_data_url: URL from which the training data can be downloaded test_data_url: URL from which the test data can be downloaded columns: Columns to retrieve from the data files (A list of strings) label_column: Name of the label column categorical_columns: Names of the categorical columns (A list of strings) continuous_columns: Names of the continuous columns (A list of strings) """ # Retrieve data from disk (if available) or download from the web. train_file_path = os.path.join(data_dir, "adult.data") if os.path.isfile(train_file_path): print("Loading training data from file: %s" % train_file_path) train_file = open(train_file_path) else: urllib.urlretrieve(train_data_url, train_file_path) test_file_path = os.path.join(data_dir, "adult.test") if os.path.isfile(test_file_path): print("Loading test data from file: %s" % test_file_path) test_file = open(test_file_path) else: test_file = open(test_file_path) urllib.urlretrieve(test_data_url, test_file_path) # Read the training and testing data sets into Pandas DataFrame. import pandas # pylint: disable=g-import-not-at-top self._df_train = pandas.read_csv(train_file, names=columns, skipinitialspace=True) self._df_test = pandas.read_csv(test_file, names=columns, skipinitialspace=True, skiprows=1) # Remove the NaN values in the last rows of the tables self._df_train = self._df_train[:-1] self._df_test = self._df_test[:-1] # Apply the threshold to get the labels. income_thresh = lambda x: ">50K" in x self._df_train[label_column] = ( self._df_train["income_bracket"].apply(income_thresh)).astype(int) self._df_test[label_column] = ( self._df_test["income_bracket"].apply(income_thresh)).astype(int) self.label_column = label_column self.categorical_columns = categorical_columns self.continuous_columns = continuous_columns def input_train_fn(self): return self._input_fn(self._df_train) def input_test_fn(self): return self._input_fn(self._df_test) # TODO(cais): Turn into minibatch feeder def _input_fn(self, df): """Input data function. Creates a dictionary mapping from each continuous feature column name (k) to the values of that column stored in a constant Tensor. Args: df: data feed Returns: feature columns and labels """ continuous_cols = {k: tf.constant(df[k].values) for k in self.continuous_columns} # Creates a dictionary mapping from each categorical feature column name (k) # to the values of that column stored in a tf.SparseTensor. categorical_cols = { k: tf.SparseTensor( indices=[[i, 0] for i in range(df[k].size)], values=df[k].values, dense_shape=[df[k].size, 1]) for k in self.categorical_columns} # Merges the two dictionaries into one. feature_cols = dict(continuous_cols.items() + categorical_cols.items()) # Converts the label column into a constant Tensor. label = tf.constant(df[self.label_column].values) # Returns the feature columns and the label. return feature_cols, label def _create_experiment_fn(output_dir): # pylint: disable=unused-argument """Experiment creation function.""" (columns, label_column, wide_columns, deep_columns, categorical_columns, continuous_columns) = census_model_config() census_data_source = CensusDataSource(FLAGS.data_dir, TRAIN_DATA_URL, TEST_DATA_URL, columns, label_column, categorical_columns, continuous_columns) os.environ["TF_CONFIG"] = json.dumps({ "cluster": { tf.contrib.learn.TaskType.PS: ["fake_ps"] * FLAGS.num_parameter_servers }, "task": { "index": FLAGS.worker_index } }) config = run_config.RunConfig(master=FLAGS.master_grpc_url) estimator = tf.contrib.learn.DNNLinearCombinedClassifier( model_dir=FLAGS.model_dir, linear_feature_columns=wide_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=[5], config=config) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=census_data_source.input_train_fn, eval_input_fn=census_data_source.input_test_fn, train_steps=FLAGS.train_steps, eval_steps=FLAGS.eval_steps ) def main(unused_argv): print("Worker index: %d" % FLAGS.worker_index) learn_runner.run(experiment_fn=_create_experiment_fn, output_dir=FLAGS.output_dir, schedule=FLAGS.schedule) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--data_dir", type=str, default="/tmp/census-data", help="Directory for storing the cesnsus data" ) parser.add_argument( "--model_dir", type=str, default="/tmp/census_wide_and_deep_model", help="Directory for storing the model" ) parser.add_argument( "--output_dir", type=str, default="", help="Base output directory." ) parser.add_argument( "--schedule", type=str, default="local_run", help="Schedule to run for this experiment." ) parser.add_argument( "--master_grpc_url", type=str, default="", help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222" ) parser.add_argument( "--num_parameter_servers", type=int, default=0, help="Number of parameter servers" ) parser.add_argument( "--worker_index", type=int, default=0, help="Worker index (>=0)" ) parser.add_argument( "--train_steps", type=int, default=1000, help="Number of training steps" ) parser.add_argument( "--eval_steps", type=int, default=1, help="Number of evaluation steps" ) global FLAGS # pylint:disable=global-at-module-level FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
cainmatt/django
tests/utils_tests/test_datastructures.py
262
4154
""" Tests for stuff in django.utils.datastructures. """ import copy from django.test import SimpleTestCase from django.utils import six from django.utils.datastructures import ( DictWrapper, ImmutableList, MultiValueDict, MultiValueDictKeyError, OrderedSet, ) class OrderedSetTests(SimpleTestCase): def test_bool(self): # Refs #23664 s = OrderedSet() self.assertFalse(s) s.add(1) self.assertTrue(s) def test_len(self): s = OrderedSet() self.assertEqual(len(s), 0) s.add(1) s.add(2) s.add(2) self.assertEqual(len(s), 2) class MultiValueDictTests(SimpleTestCase): def test_multivaluedict(self): d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']}) self.assertEqual(d['name'], 'Simon') self.assertEqual(d.get('name'), 'Simon') self.assertEqual(d.getlist('name'), ['Adrian', 'Simon']) self.assertEqual( sorted(six.iteritems(d)), [('name', 'Simon'), ('position', 'Developer')] ) self.assertEqual( sorted(six.iterlists(d)), [('name', ['Adrian', 'Simon']), ('position', ['Developer'])] ) six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname', d.__getitem__, 'lastname') self.assertEqual(d.get('lastname'), None) self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent') self.assertEqual(d.getlist('lastname'), []) self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']), ['Adrian', 'Simon']) d.setlist('lastname', ['Holovaty', 'Willison']) self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison']) self.assertEqual(sorted(six.itervalues(d)), ['Developer', 'Simon', 'Willison']) def test_appendlist(self): d = MultiValueDict() d.appendlist('name', 'Adrian') d.appendlist('name', 'Simon') self.assertEqual(d.getlist('name'), ['Adrian', 'Simon']) def test_copy(self): for copy_func in [copy.copy, lambda d: d.copy()]: d1 = MultiValueDict({ "developers": ["Carl", "Fred"] }) self.assertEqual(d1["developers"], "Fred") d2 = copy_func(d1) d2.update({"developers": "Groucho"}) self.assertEqual(d2["developers"], "Groucho") self.assertEqual(d1["developers"], "Fred") d1 = MultiValueDict({ "key": [[]] }) self.assertEqual(d1["key"], []) d2 = copy_func(d1) d2["key"].append("Penguin") self.assertEqual(d1["key"], ["Penguin"]) self.assertEqual(d2["key"], ["Penguin"]) def test_dict_translation(self): mvd = MultiValueDict({ 'devs': ['Bob', 'Joe'], 'pm': ['Rory'], }) d = mvd.dict() self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd))) for key in six.iterkeys(mvd): self.assertEqual(d[key], mvd[key]) self.assertEqual({}, MultiValueDict().dict()) class ImmutableListTests(SimpleTestCase): def test_sort(self): d = ImmutableList(range(10)) # AttributeError: ImmutableList object is immutable. self.assertRaisesMessage(AttributeError, 'ImmutableList object is immutable.', d.sort) self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)') def test_custom_warning(self): d = ImmutableList(range(10), warning="Object is immutable!") self.assertEqual(d[1], 1) # AttributeError: Object is immutable! self.assertRaisesMessage(AttributeError, 'Object is immutable!', d.__setitem__, 1, 'test') class DictWrapperTests(SimpleTestCase): def test_dictwrapper(self): f = lambda x: "*%s" % x d = DictWrapper({'a': 'a'}, f, 'xx_') self.assertEqual( "Normal: %(a)s. Modified: %(xx_a)s" % d, 'Normal: a. Modified: *a' )
bsd-3-clause
SDHM/vitess
py/vtdb/cursorv3.py
6
4294
# Copyright 2012, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. from vtdb import cursor from vtdb import dbexceptions class Cursor(object): _conn = None tablet_type = None arraysize = 1 lastrowid = None rowcount = 0 results = None description = None index = None def __init__(self, connection, tablet_type): self._conn = connection self.tablet_type = tablet_type def close(self): self.results = None def commit(self): return self._conn.commit() def begin(self): return self._conn.begin() def rollback(self): return self._conn.rollback() def execute(self, sql, bind_variables): self.rowcount = 0 self.results = None self.description = None self.lastrowid = None sql_check = sql.strip().lower() if sql_check == 'begin': self.begin() return elif sql_check == 'commit': self.commit() return elif sql_check == 'rollback': self.rollback() return self.results, self.rowcount, self.lastrowid, self.description = self._conn._execute( sql, bind_variables, self.tablet_type) self.index = 0 return self.rowcount def fetchone(self): if self.results is None: raise dbexceptions.ProgrammingError('fetch called before execute') if self.index >= len(self.results): return None self.index += 1 return self.results[self.index-1] def fetchmany(self, size=None): if self.results is None: raise dbexceptions.ProgrammingError('fetch called before execute') if self.index >= len(self.results): return [] if size is None: size = self.arraysize res = self.results[self.index:self.index+size] self.index += size return res def fetchall(self): if self.results is None: raise dbexceptions.ProgrammingError('fetch called before execute') return self.fetchmany(len(self.results)-self.index) def callproc(self): raise dbexceptions.NotSupportedError def executemany(self, *pargs): raise dbexceptions.NotSupportedError def nextset(self): raise dbexceptions.NotSupportedError def setinputsizes(self, sizes): pass def setoutputsize(self, size, column=None): pass @property def rownumber(self): return self.index def __iter__(self): return self def next(self): val = self.fetchone() if val is None: raise StopIteration return val class StreamCursor(Cursor): arraysize = 1 conversions = None connection = None description = None index = None fetchmany_done = False def execute(self, sql, bind_variables, **kargs): self.description = None x, y, z, self.description = self._conn._stream_execute( sql, bind_variables, self.tablet_type) self.index = 0 return 0 def fetchone(self): if self.description is None: raise dbexceptions.ProgrammingError('fetch called before execute') self.index += 1 return self._conn._stream_next() # fetchmany can be called until it returns no rows. Returning less rows # than what we asked for is also an indication we ran out, but the cursor # API in PEP249 is silent about that. def fetchmany(self, size=None): if size is None: size = self.arraysize result = [] if self.fetchmany_done: self.fetchmany_done = False return result for i in xrange(size): row = self.fetchone() if row is None: self.fetchmany_done = True break result.append(row) return result def fetchall(self): result = [] while True: row = self.fetchone() if row is None: break result.append(row) return result def callproc(self): raise dbexceptions.NotSupportedError def executemany(self, *pargs): raise dbexceptions.NotSupportedError def nextset(self): raise dbexceptions.NotSupportedError def setinputsizes(self, sizes): pass def setoutputsize(self, size, column=None): pass @property def rownumber(self): return self.index def __iter__(self): return self def next(self): val = self.fetchone() if val is None: raise StopIteration return val
bsd-3-clause
mralext20/apex-sigma
sigma/plugin.py
1
2143
import warnings class PluginMount(type): def __init__(cls, name, bases, attrs): if not hasattr(cls, 'plugins'): cls.plugins = [] else: cls.plugins.append(cls) class Plugin(object, metaclass=PluginMount): is_global = False def __init__(self, client): self.client = client self.prefix = client.prefix self.db = client.db async def _on_ready(self): await self.on_ready() async def on_ready(self): pass async def _on_message(self, message, pfx): warnings.filterwarnings("ignore", category=ResourceWarning) self.channel = message.channel self.author = message.author async def reply(*args): await self.client.send_message(self.channel, *args) self.reply = reply await self.on_message(message, pfx) async def on_message(self, message, pfx): pass async def on_message_edit(self, before, after): pass async def on_message_delete(self, message): pass async def on_channel_create(self, channel): pass async def on_channel_update(self, before, after): pass async def on_channel_delete(self, channel): pass async def on_member_join(self, member): pass async def on_member_remove(self, member): pass async def on_member_update(self, before, after): pass async def on_server_join(self, server): pass async def on_server_update(self, before, after): pass async def on_server_role_create(self, server, role): pass async def on_server_role_delete(self, server, role): pass async def on_server_role_update(self, server, role): pass async def _on_voice_state_update(self, before, after): await self.on_voice_state_update(before, after) async def on_voice_state_update(self, before, after): pass async def on_member_ban(self, member): pass async def on_member_unban(self, member): pass async def on_typing(self, channel, user, when): pass
mpl-2.0
jgoerzen/pygopherd
pygopherd/handlers/base.py
1
6592
# pygopherd -- Gopher-based protocol server in Python # module: base handler code # Copyright (C) 2002 John Goerzen # <jgoerzen@complete.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import socketserver import re import os, stat, os.path, mimetypes from pygopherd import protocols, gopherentry rootpath = None class VFS_Real: def __init__(self, config, chain = None): """This implementation does not chain.""" self.config = config def iswritable(self, selector): return 1 def unlink(self, selector): os.unlink(self.getfspath(selector)) def stat(self, selector): return os.stat(self.getfspath(selector)) def isdir(self, selector): return os.path.isdir(self.getfspath(selector)) def isfile(self, selector): return os.path.isfile(self.getfspath(selector)) def exists(self, selector): return os.path.exists(self.getfspath(selector)) def open(self, selector, *args, **kwargs): return open(*(self.getfspath(selector),) + args, **kwargs) def listdir(self, selector): return os.listdir(self.getfspath(selector)) def getrootpath(self): global rootpath if not rootpath: rootpath = self.config.get("pygopherd", "root") return rootpath def getfspath(self, selector): """Gets the filesystem path corresponding to the selector.""" fspath = self.getrootpath() + selector # Strip off trailing slash. if fspath[-1] == '/': fspath = fspath[0:-1] return fspath def copyto(self, name, fd): rfile = self.open(name, 'rb') while 1: data = rfile.read(4096) if not len(data): break fd.write(data) rfile.close class BaseHandler: """Skeleton handler -- includes commonly-used routines.""" def __init__(self, selector, searchrequest, protocol, config, statresult, vfs = None): """Parameters are: selector -- requested selector. The selector must always start with a slash and never end with a slash UNLESS it is a one-char selector that contains only a slash. This should be handled by the default protocol. config -- config object.""" self.selector = selector self.searchrequest = searchrequest self.protocol = protocol self.config = config self.statresult = statresult self.fspath = None self.entry = None self.searchrequest = searchrequest if not vfs: self.vfs = VFS_Real(self.config) else: self.vfs = vfs def isrequestforme(self): """Called by multiplexers or other handlers. The default implementation is just: return self.isrequestsecure() and self.canhandlerequest() """ return self.isrequestsecure() and self.canhandlerequest() def isrequestsecure(self): """An auxiliary to canhandlerequest. In order for this handler to be selected for handling a given request, both the securitycheck and the canhandlerequest should be invoked. The securitycheck is intended to be a short, small, quick check -- usually not even looking at the filesystem. Here is a default. Returns true if the request is secure, false if not. By default, we eliminate ./, ../, and // This is split out from canhandlerequest becase it could be too easy to forget about it there.""" return (self.selector.find("./") == -1) and \ (self.selector.find("..") == -1) and \ (self.selector.find("//") == -1) and \ (self.selector.find(".\\") == -1) and \ (self.selector.find("\\\\") == -1) and \ (self.selector.find("\0") == -1) def canhandlerequest(self): """Decides whether or not a given request is valid for this handler. Should be overridden by all subclasses.""" return 0 def getentry(self): """Returns an entry object for this request.""" if not self.entry: self.entry = gopherentry.GopherEntry(self.selector, self.config) return self.entry def getfspath(self): if not self.fspath: self.fspath = self.vfs.getfspath(self.getselector()) return self.fspath def getselector(self): """Returns the selector we are handling.""" return self.selector def gethandler(self): """Returns the handler to use to process this request. For all but special cases (rewriting handleres, for instance), this should return self.""" return self ## The next three are the publically-exposed interface -- the ones ## called by things other than handlers. def prepare(self): """Prepares for a write. Ie, opens a file. This is used so that the protocols can try to detect an error before transmitting a result. Must always be called before write.""" pass def isdir(self): """Returns true if this handler is handling a directory; false otherwise. Not valid unless prepare has been called.""" return 0 def write(self, wfile): """Writes out the request if isdir() returns false. You should NOT call write if isdir() returns true! Should be overridden by files.""" if self.isdir(): raise Exception("Attempt to use write for a directory") def getdirlist(self): """Returns a list-like object (list, iterator, tuple, generator, etc) that contains as its elements the gopherentry objects corresponding to each item in the directory. Valid only if self.isdir() returns true.""" if not self.isdir(): raise Exception("Attempt to use getdir for a file.") return []
gpl-2.0
bud4/samba
python/samba/tests/samba_tool/dnscmd.py
3
36946
# Unix SMB/CIFS implementation. # Copyright (C) Andrew Bartlett <abartlet@catalyst.net.nz> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import ldb from samba.auth import system_session from samba.samdb import SamDB from samba.ndr import ndr_unpack, ndr_pack from samba.dcerpc import dnsp from samba.tests.samba_tool.base import SambaToolCmdTest class DnsCmdTestCase(SambaToolCmdTest): def setUp(self): super(DnsCmdTestCase, self).setUp() self.dburl = "ldap://%s" % os.environ["SERVER"] self.creds_string = "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]) self.samdb = self.getSamDB("-H", self.dburl, self.creds_string) self.config_dn = str(self.samdb.get_config_basedn()) self.testip = "192.168.0.193" self.testip2 = "192.168.0.194" self.addZone() # Note: SOA types don't work (and shouldn't), as we only have one zone per DNS record. good_dns = ["SAMDOM.EXAMPLE.COM", "1.EXAMPLE.COM", "%sEXAMPLE.COM" % ("1."*100), "EXAMPLE", "\n.COM", "!@#$%^&*()_", "HIGH\xFFBYTE", "@.EXAMPLE.COM", "."] bad_dns = ["...", ".EXAMPLE.COM", ".EXAMPLE.", "", "SAMDOM..EXAMPLE.COM"] good_mx = ["SAMDOM.EXAMPLE.COM 65530"] bad_mx = ["SAMDOM.EXAMPLE.COM -1", "SAMDOM.EXAMPLE.COM", " ", "SAMDOM.EXAMPLE.COM 1 1", "SAMDOM.EXAMPLE.COM SAMDOM.EXAMPLE.COM"] good_srv = ["SAMDOM.EXAMPLE.COM 65530 65530 65530"] bad_srv = ["SAMDOM.EXAMPLE.COM 0 65536 0", "SAMDOM.EXAMPLE.COM 0 0 65536", "SAMDOM.EXAMPLE.COM 65536 0 0" ] for bad_dn in bad_dns: bad_mx.append("%s 1" % bad_dn) bad_srv.append("%s 0 0 0" % bad_dn) for good_dn in good_dns: good_mx.append("%s 1" % good_dn) good_srv.append("%s 0 0 0" % good_dn) self.good_records = { "A":["192.168.0.1", "255.255.255.255"], "AAAA":["1234:5678:9ABC:DEF0:0000:0000:0000:0000", "0000:0000:0000:0000:0000:0000:0000:0000", "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0", "1234:1234:1234::", "1234:5678:9ABC:DEF0::", "0000:0000::0000", "1234::5678:9ABC:0000:0000:0000:0000", "::1", "::", "1:1:1:1:1:1:1:1"], "PTR":good_dns, "CNAME":good_dns, "NS":good_dns, "MX":good_mx, "SRV":good_srv, "TXT":["text", "", "@#!", "\n"] } self.bad_records = { "A":["192.168.0.500", "255.255.255.255/32"], "AAAA":["GGGG:1234:5678:9ABC:0000:0000:0000:0000", "0000:0000:0000:0000:0000:0000:0000:0000/1", "AAAA:AAAA:AAAA:AAAA:G000:0000:0000:1234", "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0:1234", "1234:5678:9ABC:DEF0:1234:5678:9ABC", "1111::1111::1111"], "PTR":bad_dns, "CNAME":bad_dns, "NS":bad_dns, "MX":bad_mx, "SRV":bad_srv } def tearDown(self): self.deleteZone() super(DnsCmdTestCase, self).tearDown() def resetZone(self): self.deleteZone() self.addZone() def addZone(self): self.zone = "zone" result, out, err = self.runsubcmd("dns", "zonecreate", os.environ["SERVER"], self.zone, self.creds_string) self.assertCmdSuccess(result, out, err) def deleteZone(self): result, out, err = self.runsubcmd("dns", "zonedelete", os.environ["SERVER"], self.zone, self.creds_string) self.assertCmdSuccess(result, out, err) def get_record_from_db(self, zone_name, record_name): zones = self.samdb.search(base="DC=DomainDnsZones,%s" % self.samdb.get_default_basedn(), scope=ldb.SCOPE_SUBTREE, expression="(objectClass=dnsZone)", attrs=["cn"]) for zone in zones: if zone_name in str(zone.dn): zone_dn = zone.dn break records = self.samdb.search(base=zone_dn, scope=ldb.SCOPE_SUBTREE, expression="(objectClass=dnsNode)", attrs=["dnsRecord"]) for old_packed_record in records: if record_name in str(old_packed_record.dn): return (old_packed_record.dn, ndr_unpack(dnsp.DnssrvRpcRecord, old_packed_record["dnsRecord"][0])) def test_rank_none(self): record_str = "192.168.50.50" record_type_str = "A" result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", record_type_str, record_str, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add record '%s' with type %s." % (record_str, record_type_str)) dn, record = self.get_record_from_db(self.zone, "testrecord") record.rank = 0 # DNS_RANK_NONE res = self.samdb.dns_replace_by_dn(dn, [record]) if res is not None: self.fail("Unable to update dns record to have DNS_RANK_NONE.") errors = [] # The record should still exist result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", record_type_str, self.creds_string) try: self.assertCmdSuccess(result, out, err, "Failed to query for a record" \ "which had DNS_RANK_NONE.") self.assertTrue("testrecord" in out and record_str in out, "Query for a record which had DNS_RANK_NONE" \ "succeeded but produced no resulting records.") except AssertionError, e: # Windows produces no resulting records pass # We should not be able to add a duplicate result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", record_type_str, record_str, self.creds_string) try: self.assertCmdFail(result, "Successfully added duplicate record" \ "of one which had DNS_RANK_NONE.") except AssertionError, e: errors.append(e) # We should be able to delete it result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", record_type_str, record_str, self.creds_string) try: self.assertCmdSuccess(result, out, err, "Failed to delete record" \ "which had DNS_RANK_NONE.") except AssertionError, e: errors.append(e) # Now the record should not exist result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", record_type_str, self.creds_string) try: self.assertCmdFail(result, "Successfully queried for deleted record" \ "which had DNS_RANK_NONE.") except AssertionError, e: errors.append(e) if len(errors) > 0: err_str = "Failed appropriate behaviour with DNS_RANK_NONE:" for error in errors: err_str = err_str + "\n" + str(error) raise AssertionError(err_str) def test_accept_valid_commands(self): """ For all good records, attempt to add, query and delete them. """ num_failures = 0 failure_msgs = [] for dnstype in self.good_records: for record in self.good_records[dnstype]: try: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add" \ "record %s with type %s." % (record, dnstype)) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", dnstype, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to query" \ "record %s with qualifier %s." % (record, dnstype)) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to remove" \ "record %s with type %s." % (record, dnstype)) except AssertionError as e: num_failures = num_failures + 1 failure_msgs.append(e) if num_failures > 0: for msg in failure_msgs: print(msg) self.fail("Failed to accept valid commands. %d total failures." \ "Errors above." % num_failures) def test_reject_invalid_commands(self): """ For all bad records, attempt to add them and update to them, making sure that both operations fail. """ num_failures = 0 failure_msgs = [] # Add invalid records and make sure they fail to be added for dnstype in self.bad_records: for record in self.bad_records[dnstype]: try: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdFail(result, "Successfully added invalid" \ "record '%s' of type '%s'." % (record, dnstype)) except AssertionError as e: num_failures = num_failures + 1 failure_msgs.append(e) self.resetZone() try: result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdFail(result, "Successfully deleted invalid" \ "record '%s' of type '%s' which" \ "shouldn't exist." % (record, dnstype)) except AssertionError as e: num_failures = num_failures + 1 failure_msgs.append(e) self.resetZone() # Update valid records to invalid ones and make sure they # fail to be updated for dnstype in self.bad_records: for bad_record in self.bad_records[dnstype]: good_record = self.good_records[dnstype][0] try: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype, good_record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add " \ "record '%s' with type %s." % (record, dnstype)) result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", dnstype, good_record, bad_record, self.creds_string) self.assertCmdFail(result, "Successfully updated valid " \ "record '%s' of type '%s' to invalid " \ "record '%s' of the same type." % (good_record, dnstype, bad_record)) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype, good_record, self.creds_string) self.assertCmdSuccess(result, out, err, "Could not delete " \ "valid record '%s' of type '%s'." % (good_record, dnstype)) except AssertionError as e: num_failures = num_failures + 1 failure_msgs.append(e) self.resetZone() if num_failures > 0: for msg in failure_msgs: print(msg) self.fail("Failed to reject invalid commands. %d total failures. " \ "Errors above." % num_failures) def test_update_invalid_type(self): """ Make sure that a record can't be updated to one of a different type. """ for dnstype1 in self.good_records: record1 = self.good_records[dnstype1][0] result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype1, record1, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add " \ "record %s with type %s." % (record1, dnstype1)) for dnstype2 in self.good_records: record2 = self.good_records[dnstype2][0] # Make sure that record2 isn't a valid entry of dnstype1. # For example, any A-type will also be a valid TXT-type. result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype1, record2, self.creds_string) try: self.assertCmdFail(result) except AssertionError: continue # Don't check this one, because record2 _is_ a valid entry of dnstype1. # Check both ways: Give the current type and try to update, # and give the new type and try to update. result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", dnstype1, record1, record2, self.creds_string) self.assertCmdFail(result, "Successfully updated record '%s' " \ "to '%s', even though the latter is of " \ "type '%s' where '%s' was expected." % (record1, record2, dnstype2, dnstype1)) result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", dnstype2, record1, record2, self.creds_string) self.assertCmdFail(result, "Successfully updated record " \ "'%s' to '%s', even though the former " \ "is of type '%s' where '%s' was expected." % (record1, record2, dnstype1, dnstype2)) def test_update_valid_type(self): for dnstype in self.good_records: for record in self.good_records[dnstype]: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add " \ "record %s with type %s." % (record, dnstype)) # Update the record to be the same. result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, record, self.creds_string) self.assertCmdFail(result, "Successfully updated record " \ "'%s' to be exactly the same." % record) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype, record, self.creds_string) self.assertCmdSuccess(result, out, err, "Could not delete " \ "valid record '%s' of type '%s'." % (record, dnstype)) for record in self.good_records["SRV"]: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", "SRV", record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add " \ "record %s with type 'SRV'." % record) split = record.split(' ') new_bit = str(int(split[3]) + 1) new_record = '%s %s %s %s' % (split[0], split[1], split[2], new_bit) result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", "SRV", record, new_record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to update record " \ "'%s' of type '%s' to '%s'." % (record, "SRV", new_record)) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", "SRV", self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to query for " \ "record '%s' of type '%s'." % (new_record, "SRV")) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", "SRV", new_record, self.creds_string) self.assertCmdSuccess(result, out, err, "Could not delete " \ "valid record '%s' of type '%s'." % (new_record, "SRV")) # Since 'dns update' takes the current value as a parameter, make sure # we can't enter the wrong current value for a given record. for dnstype in self.good_records: if len(self.good_records[dnstype]) < 3: continue # Not enough records of this type to do this test used_record = self.good_records[dnstype][0] unused_record = self.good_records[dnstype][1] new_record = self.good_records[dnstype][2] result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype, used_record, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add record %s " \ "with type %s." % (used_record, dnstype)) result, out, err = self.runsubcmd("dns", "update", os.environ["SERVER"], self.zone, "testrecord", dnstype, unused_record, new_record, self.creds_string) self.assertCmdFail(result, "Successfully updated record '%s' " \ "from '%s' to '%s', even though the given " \ "source record is incorrect." % (used_record, unused_record, new_record)) def test_invalid_types(self): result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", "SOA", "test", self.creds_string) self.assertCmdFail(result, "Successfully added record of type SOA, " \ "when this type should not be available.") self.assertTrue("type SOA is not supported" in err, "Invalid error message '%s' when attempting to " \ "add record of type SOA." % err) def test_add_overlapping_different_type(self): """ Make sure that we can add an entry with the same name as an existing one but a different type. """ i = 0 for dnstype1 in self.good_records: record1 = self.good_records[dnstype1][0] for dnstype2 in self.good_records: # Only do some subset of dns types, otherwise it takes a long time. i += 1 if i % 4 != 0: continue if dnstype1 == dnstype2: continue record2 = self.good_records[dnstype2][0] result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype1, record1, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add record " \ "'%s' of type '%s'." % (record1, dnstype1)) result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", dnstype2, record2, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to add record " \ "'%s' of type '%s' when a record '%s' " \ "of type '%s' with the same name exists." % (record1, dnstype1, record2, dnstype2)) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", dnstype1, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to query for " \ "record '%s' of type '%s' when a new " \ "record '%s' of type '%s' with the same " \ "name was added." % (record1, dnstype1, record2, dnstype2)) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", dnstype2, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to query " \ "record '%s' of type '%s' which should " \ "have been added with the same name as " \ "record '%s' of type '%s'." % (record2, dnstype2, record1, dnstype1)) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype1, record1, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to delete " \ "record '%s' of type '%s'." % (record1, dnstype1)) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", dnstype2, record2, self.creds_string) self.assertCmdSuccess(result, out, err, "Failed to delete " \ "record '%s' of type '%s'." % (record2, dnstype2)) def test_query_deleted_record(self): self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", "A", self.testip, self.creds_string) self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", "A", self.testip, self.creds_string) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", "A", self.creds_string) self.assertCmdFail(result) def test_add_duplicate_record(self): for record_type in self.good_records: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", record_type, self.good_records[record_type][0], self.creds_string) self.assertCmdSuccess(result, out, err) result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", record_type, self.good_records[record_type][0], self.creds_string) self.assertCmdFail(result) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", record_type, self.creds_string) self.assertCmdSuccess(result, out, err) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", record_type, self.good_records[record_type][0], self.creds_string) self.assertCmdSuccess(result, out, err) def test_remove_deleted_record(self): self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, "testrecord", "A", self.testip, self.creds_string) self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", "A", self.testip, self.creds_string) # Attempting to delete a record that has already been deleted or has never existed should fail result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord", "A", self.testip, self.creds_string) self.assertCmdFail(result) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, "testrecord", "A", self.creds_string) self.assertCmdFail(result) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, "testrecord2", "A", self.testip, self.creds_string) self.assertCmdFail(result) def test_dns_wildcards(self): """ Ensure that DNS wild card entries can be added deleted and queried """ num_failures = 0 failure_msgs = [] records = [("*.", "MISS", "A", "1.1.1.1"), ("*.SAMDOM", "MISS.SAMDOM", "A", "1.1.1.2")] for (name, miss, dnstype, record) in records: try: result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone, name, dnstype, record, self.creds_string) self.assertCmdSuccess( result, out, err, ("Failed to add record %s (%s) with type %s." % (name, record, dnstype))) result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, name, dnstype, self.creds_string) self.assertCmdSuccess( result, out, err, ("Failed to query record %s with qualifier %s." % (record, dnstype))) # dns tool does not perform dns wildcard search if the name # does not match result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"], self.zone, miss, dnstype, self.creds_string) self.assertCmdFail( result, ("Failed to query record %s with qualifier %s." % (record, dnstype))) result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone, name, dnstype, record, self.creds_string) self.assertCmdSuccess( result, out, err, ("Failed to remove record %s with type %s." % (record, dnstype))) except AssertionError as e: num_failures = num_failures + 1 failure_msgs.append(e) if num_failures > 0: for msg in failure_msgs: print(msg) self.fail("Failed to accept valid commands. %d total failures." "Errors above." % num_failures)
gpl-3.0
aspectron/jsx
build/tools/gyp/test/mac/gyptest-rpath.py
242
1310
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled correctly. """ import TestGyp import re import subprocess import sys if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) CHDIR = 'rpath' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) def GetRpaths(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'cmd LC_RPATH.*?path (.*?) \(offset \d+\)', re.DOTALL) proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0] assert not proc.returncode return r.findall(o) if (GetRpaths('libdefault_rpath.dylib') != []): test.fail_test() if (GetRpaths('libexplicit_rpath.dylib') != ['@executable_path/.']): test.fail_test() if (GetRpaths('libexplicit_rpaths_escaped.dylib') != ['First rpath', 'Second rpath']): test.fail_test() if (GetRpaths('My Framework.framework/My Framework') != ['@loader_path/.']): test.fail_test() if (GetRpaths('executable') != ['@executable_path/.']): test.fail_test() test.pass_test()
mit
pinax/pinax-notifications
pinax/notifications/backends/base.py
1
1800
from django.contrib.sites.models import Site from django.template.loader import render_to_string from ..conf import settings from ..hooks import hookset class BaseBackend: """ The base backend. """ def __init__(self, medium_id, spam_sensitivity=None): self.medium_id = medium_id if spam_sensitivity is not None: self.spam_sensitivity = spam_sensitivity def can_send(self, user, notice_type, scoping): """ Determines whether this backend is allowed to send a notification to the given user and notice_type. """ return hookset.notice_setting_for_user(user, notice_type, self.medium_id, scoping).send def deliver(self, recipient, sender, notice_type, extra_context): """ Deliver a notification to the given recipient. """ raise NotImplementedError() def get_formatted_messages(self, formats, label, context): """ Returns a dictionary with the format identifier as the key. The values are are fully rendered templates with the given context. """ format_templates = {} for fmt in formats: format_templates[fmt] = render_to_string(( f"pinax/notifications/{label}/{fmt}", f"pinax/notifications/{fmt}"), context) return format_templates def default_context(self): use_ssl = getattr(settings, "PINAX_USE_SSL", False) default_http_protocol = "https" if use_ssl else "http" current_site = Site.objects.get_current() base_url = f"{default_http_protocol}://{current_site.domain}" return { "default_http_protocol": default_http_protocol, "current_site": current_site, "base_url": base_url }
mit
ltilve/ChromiumGStreamerBackend
third_party/protobuf/python/mox.py
603
38237
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is used for testing. The original is at: # http://code.google.com/p/pymox/ """Mox, an object-mocking framework for Python. Mox works in the record-replay-verify paradigm. When you first create a mock object, it is in record mode. You then programmatically set the expected behavior of the mock object (what methods are to be called on it, with what parameters, what they should return, and in what order). Once you have set up the expected mock behavior, you put it in replay mode. Now the mock responds to method calls just as you told it to. If an unexpected method (or an expected method with unexpected parameters) is called, then an exception will be raised. Once you are done interacting with the mock, you need to verify that all the expected interactions occured. (Maybe your code exited prematurely without calling some cleanup method!) The verify phase ensures that every expected method was called; otherwise, an exception will be raised. Suggested usage / workflow: # Create Mox factory my_mox = Mox() # Create a mock data access object mock_dao = my_mox.CreateMock(DAOClass) # Set up expected behavior mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person) mock_dao.DeletePerson(person) # Put mocks in replay mode my_mox.ReplayAll() # Inject mock object and run test controller.SetDao(mock_dao) controller.DeletePersonById('1') # Verify all methods were called as expected my_mox.VerifyAll() """ from collections import deque import re import types import unittest import stubout class Error(AssertionError): """Base exception for this module.""" pass class ExpectedMethodCallsError(Error): """Raised when Verify() is called before all expected methods have been called """ def __init__(self, expected_methods): """Init exception. Args: # expected_methods: A sequence of MockMethod objects that should have been # called. expected_methods: [MockMethod] Raises: ValueError: if expected_methods contains no methods. """ if not expected_methods: raise ValueError("There must be at least one expected method") Error.__init__(self) self._expected_methods = expected_methods def __str__(self): calls = "\n".join(["%3d. %s" % (i, m) for i, m in enumerate(self._expected_methods)]) return "Verify: Expected methods never called:\n%s" % (calls,) class UnexpectedMethodCallError(Error): """Raised when an unexpected method is called. This can occur if a method is called with incorrect parameters, or out of the specified order. """ def __init__(self, unexpected_method, expected): """Init exception. Args: # unexpected_method: MockMethod that was called but was not at the head of # the expected_method queue. # expected: MockMethod or UnorderedGroup the method should have # been in. unexpected_method: MockMethod expected: MockMethod or UnorderedGroup """ Error.__init__(self) self._unexpected_method = unexpected_method self._expected = expected def __str__(self): return "Unexpected method call: %s. Expecting: %s" % \ (self._unexpected_method, self._expected) class UnknownMethodCallError(Error): """Raised if an unknown method is requested of the mock object.""" def __init__(self, unknown_method_name): """Init exception. Args: # unknown_method_name: Method call that is not part of the mocked class's # public interface. unknown_method_name: str """ Error.__init__(self) self._unknown_method_name = unknown_method_name def __str__(self): return "Method called is not a member of the object: %s" % \ self._unknown_method_name class Mox(object): """Mox: a factory for creating mock objects.""" # A list of types that should be stubbed out with MockObjects (as # opposed to MockAnythings). _USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType, types.ObjectType, types.TypeType] def __init__(self): """Initialize a new Mox.""" self._mock_objects = [] self.stubs = stubout.StubOutForTesting() def CreateMock(self, class_to_mock): """Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class Returns: MockObject that can be used as the class_to_mock would be. """ new_mock = MockObject(class_to_mock) self._mock_objects.append(new_mock) return new_mock def CreateMockAnything(self): """Create a mock that will accept any method calls. This does not enforce an interface. """ new_mock = MockAnything() self._mock_objects.append(new_mock) return new_mock def ReplayAll(self): """Set all mock objects to replay mode.""" for mock_obj in self._mock_objects: mock_obj._Replay() def VerifyAll(self): """Call verify on all mock objects created.""" for mock_obj in self._mock_objects: mock_obj._Verify() def ResetAll(self): """Call reset on all mock objects. This does not unset stubs.""" for mock_obj in self._mock_objects: mock_obj._Reset() def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): """Replace a method, attribute, etc. with a Mock. This will replace a class or module with a MockObject, and everything else (method, function, etc) with a MockAnything. This can be overridden to always use a MockAnything by setting use_mock_anything to True. Args: obj: A Python object (class, module, instance, callable). attr_name: str. The name of the attribute to replace with a mock. use_mock_anything: bool. True if a MockAnything should be used regardless of the type of attribute. """ attr_to_replace = getattr(obj, attr_name) if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything: stub = self.CreateMock(attr_to_replace) else: stub = self.CreateMockAnything() self.stubs.Set(obj, attr_name, stub) def UnsetStubs(self): """Restore stubs to their original state.""" self.stubs.UnsetAll() def Replay(*args): """Put mocks into Replay mode. Args: # args is any number of mocks to put into replay mode. """ for mock in args: mock._Replay() def Verify(*args): """Verify mocks. Args: # args is any number of mocks to be verified. """ for mock in args: mock._Verify() def Reset(*args): """Reset mocks. Args: # args is any number of mocks to be reset. """ for mock in args: mock._Reset() class MockAnything: """A mock that can be used to mock anything. This is helpful for mocking classes that do not provide a public interface. """ def __init__(self): """ """ self._Reset() def __getattr__(self, method_name): """Intercept method calls on this object. A new MockMethod is returned that is aware of the MockAnything's state (record or replay). The call will be recorded or replayed by the MockMethod's __call__. Args: # method name: the name of the method being called. method_name: str Returns: A new MockMethod aware of MockAnything's state (record or replay). """ return self._CreateMockMethod(method_name) def _CreateMockMethod(self, method_name): """Create a new mock method call and return it. Args: # method name: the name of the method being called. method_name: str Returns: A new MockMethod aware of MockAnything's state (record or replay). """ return MockMethod(method_name, self._expected_calls_queue, self._replay_mode) def __nonzero__(self): """Return 1 for nonzero so the mock can be used as a conditional.""" return 1 def __eq__(self, rhs): """Provide custom logic to compare objects.""" return (isinstance(rhs, MockAnything) and self._replay_mode == rhs._replay_mode and self._expected_calls_queue == rhs._expected_calls_queue) def __ne__(self, rhs): """Provide custom logic to compare objects.""" return not self == rhs def _Replay(self): """Start replaying expected method calls.""" self._replay_mode = True def _Verify(self): """Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue. """ # If the list of expected calls is not empty, raise an exception if self._expected_calls_queue: # The last MultipleTimesGroup is not popped from the queue. if (len(self._expected_calls_queue) == 1 and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue) def _Reset(self): """Reset the state of this mock to record mode with an empty queue.""" # Maintain a list of method calls we are expecting self._expected_calls_queue = deque() # Make sure we are in setup mode, not replay mode self._replay_mode = False class MockObject(MockAnything, object): """A mock object that simulates the public/protected interface of a class.""" def __init__(self, class_to_mock): """Initialize a mock object. This determines the methods and properties of the class and stores them. Args: # class_to_mock: class to be mocked class_to_mock: class """ # This is used to hack around the mixin/inheritance of MockAnything, which # is not a proper object (it can be anything. :-) MockAnything.__dict__['__init__'](self) # Get a list of all the public and special methods we should mock. self._known_methods = set() self._known_vars = set() self._class_to_mock = class_to_mock for method in dir(class_to_mock): if callable(getattr(class_to_mock, method)): self._known_methods.add(method) else: self._known_vars.add(method) def __getattr__(self, name): """Intercept attribute request on this object. If the attribute is a public class variable, it will be returned and not recorded as a call. If the attribute is not a variable, it is handled like a method call. The method name is checked against the set of mockable methods, and a new MockMethod is returned that is aware of the MockObject's state (record or replay). The call will be recorded or replayed by the MockMethod's __call__. Args: # name: the name of the attribute being requested. name: str Returns: Either a class variable or a new MockMethod that is aware of the state of the mock (record or replay). Raises: UnknownMethodCallError if the MockObject does not mock the requested method. """ if name in self._known_vars: return getattr(self._class_to_mock, name) if name in self._known_methods: return self._CreateMockMethod(name) raise UnknownMethodCallError(name) def __eq__(self, rhs): """Provide custom logic to compare objects.""" return (isinstance(rhs, MockObject) and self._class_to_mock == rhs._class_to_mock and self._replay_mode == rhs._replay_mode and self._expected_calls_queue == rhs._expected_calls_queue) def __setitem__(self, key, value): """Provide custom logic for mocking classes that support item assignment. Args: key: Key to set the value for. value: Value to set. Returns: Expected return value in replay mode. A MockMethod object for the __setitem__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class does not support item assignment. UnexpectedMethodCallError if the object does not expect the call to __setitem__. """ setitem = self._class_to_mock.__dict__.get('__setitem__', None) # Verify the class supports item assignment. if setitem is None: raise TypeError('object does not support item assignment') # If we are in replay mode then simply call the mock __setitem__ method. if self._replay_mode: return MockMethod('__setitem__', self._expected_calls_queue, self._replay_mode)(key, value) # Otherwise, create a mock method __setitem__. return self._CreateMockMethod('__setitem__')(key, value) def __getitem__(self, key): """Provide custom logic for mocking classes that are subscriptable. Args: key: Key to return the value for. Returns: Expected return value in replay mode. A MockMethod object for the __getitem__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class is not subscriptable. UnexpectedMethodCallError if the object does not expect the call to __setitem__. """ getitem = self._class_to_mock.__dict__.get('__getitem__', None) # Verify the class supports item assignment. if getitem is None: raise TypeError('unsubscriptable object') # If we are in replay mode then simply call the mock __getitem__ method. if self._replay_mode: return MockMethod('__getitem__', self._expected_calls_queue, self._replay_mode)(key) # Otherwise, create a mock method __getitem__. return self._CreateMockMethod('__getitem__')(key) def __call__(self, *params, **named_params): """Provide custom logic for mocking classes that are callable.""" # Verify the class we are mocking is callable callable = self._class_to_mock.__dict__.get('__call__', None) if callable is None: raise TypeError('Not callable') # Because the call is happening directly on this object instead of a method, # the call on the mock method is made right here mock_method = self._CreateMockMethod('__call__') return mock_method(*params, **named_params) @property def __class__(self): """Return the class that is being mocked.""" return self._class_to_mock class MockMethod(object): """Callable mock method. A MockMethod should act exactly like the method it mocks, accepting parameters and returning a value, or throwing an exception (as specified). When this method is called, it can optionally verify whether the called method (name and signature) matches the expected method. """ def __init__(self, method_name, call_queue, replay_mode): """Construct a new mock method. Args: # method_name: the name of the method # call_queue: deque of calls, verify this call against the head, or add # this call to the queue. # replay_mode: False if we are recording, True if we are verifying calls # against the call queue. method_name: str call_queue: list or deque replay_mode: bool """ self._name = method_name self._call_queue = call_queue if not isinstance(call_queue, deque): self._call_queue = deque(self._call_queue) self._replay_mode = replay_mode self._params = None self._named_params = None self._return_value = None self._exception = None self._side_effects = None def __call__(self, *params, **named_params): """Log parameters and return the specified return value. If the Mock(Anything/Object) associated with this call is in record mode, this MockMethod will be pushed onto the expected call queue. If the mock is in replay mode, this will pop a MockMethod off the top of the queue and verify this call is equal to the expected call. Raises: UnexpectedMethodCall if this call is supposed to match an expected method call and it does not. """ self._params = params self._named_params = named_params if not self._replay_mode: self._call_queue.append(self) return self expected_method = self._VerifyMethodCall() if expected_method._side_effects: expected_method._side_effects(*params, **named_params) if expected_method._exception: raise expected_method._exception return expected_method._return_value def __getattr__(self, name): """Raise an AttributeError with a helpful message.""" raise AttributeError('MockMethod has no attribute "%s". ' 'Did you remember to put your mocks in replay mode?' % name) def _PopNextMethod(self): """Pop the next method from our call queue.""" try: return self._call_queue.popleft() except IndexError: raise UnexpectedMethodCallError(self, None) def _VerifyMethodCall(self): """Verify the called method is expected. This can be an ordered method, or part of an unordered set. Returns: The expected mock method. Raises: UnexpectedMethodCall if the method called was not expected. """ expected = self._PopNextMethod() # Loop here, because we might have a MethodGroup followed by another # group. while isinstance(expected, MethodGroup): expected, method = expected.MethodCalled(self) if method is not None: return method # This is a mock method, so just check equality. if expected != self: raise UnexpectedMethodCallError(self, expected) return expected def __str__(self): params = ', '.join( [repr(p) for p in self._params or []] + ['%s=%r' % x for x in sorted((self._named_params or {}).items())]) desc = "%s(%s) -> %r" % (self._name, params, self._return_value) return desc def __eq__(self, rhs): """Test whether this MockMethod is equivalent to another MockMethod. Args: # rhs: the right hand side of the test rhs: MockMethod """ return (isinstance(rhs, MockMethod) and self._name == rhs._name and self._params == rhs._params and self._named_params == rhs._named_params) def __ne__(self, rhs): """Test whether this MockMethod is not equivalent to another MockMethod. Args: # rhs: the right hand side of the test rhs: MockMethod """ return not self == rhs def GetPossibleGroup(self): """Returns a possible group from the end of the call queue or None if no other methods are on the stack. """ # Remove this method from the tail of the queue so we can add it to a group. this_method = self._call_queue.pop() assert this_method == self # Determine if the tail of the queue is a group, or just a regular ordered # mock method. group = None try: group = self._call_queue[-1] except IndexError: pass return group def _CheckAndCreateNewGroup(self, group_name, group_class): """Checks if the last method (a possible group) is an instance of our group_class. Adds the current method to this group or creates a new one. Args: group_name: the name of the group. group_class: the class used to create instance of this new group """ group = self.GetPossibleGroup() # If this is a group, and it is the correct group, add the method. if isinstance(group, group_class) and group.group_name() == group_name: group.AddMethod(self) return self # Create a new group and add the method. new_group = group_class(group_name) new_group.AddMethod(self) self._call_queue.append(new_group) return self def InAnyOrder(self, group_name="default"): """Move this method into a group of unordered calls. A group of unordered calls must be defined together, and must be executed in full before the next expected method can be called. There can be multiple groups that are expected serially, if they are given different group names. The same group name can be reused if there is a standard method call, or a group with a different name, spliced between usages. Args: group_name: the name of the unordered group. Returns: self """ return self._CheckAndCreateNewGroup(group_name, UnorderedGroup) def MultipleTimes(self, group_name="default"): """Move this method into group of calls which may be called multiple times. A group of repeating calls must be defined together, and must be executed in full before the next expected mehtod can be called. Args: group_name: the name of the unordered group. Returns: self """ return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup) def AndReturn(self, return_value): """Set the value to return when this method is called. Args: # return_value can be anything. """ self._return_value = return_value return return_value def AndRaise(self, exception): """Set the exception to raise when this method is called. Args: # exception: the exception to raise when this method is called. exception: Exception """ self._exception = exception def WithSideEffects(self, side_effects): """Set the side effects that are simulated when this method is called. Args: side_effects: A callable which modifies the parameters or other relevant state which a given test case depends on. Returns: Self for chaining with AndReturn and AndRaise. """ self._side_effects = side_effects return self class Comparator: """Base class for all Mox comparators. A Comparator can be used as a parameter to a mocked method when the exact value is not known. For example, the code you are testing might build up a long SQL string that is passed to your mock DAO. You're only interested that the IN clause contains the proper primary keys, so you can set your mock up as follows: mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'. A Comparator may replace one or more parameters, for example: # return at most 10 rows mock_dao.RunQuery(StrContains('SELECT'), 10) or # Return some non-deterministic number of rows mock_dao.RunQuery(StrContains('SELECT'), IsA(int)) """ def equals(self, rhs): """Special equals method that all comparators must implement. Args: rhs: any python object """ raise NotImplementedError, 'method must be implemented by a subclass.' def __eq__(self, rhs): return self.equals(rhs) def __ne__(self, rhs): return not self.equals(rhs) class IsA(Comparator): """This class wraps a basic Python type or class. It is used to verify that a parameter is of the given type or class. Example: mock_dao.Connect(IsA(DbConnectInfo)) """ def __init__(self, class_name): """Initialize IsA Args: class_name: basic python type or a class """ self._class_name = class_name def equals(self, rhs): """Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return isinstance(rhs, self._class_name) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(rhs) == type(self._class_name) def __repr__(self): return str(self._class_name) class IsAlmost(Comparator): """Comparison class used to check whether a parameter is nearly equal to a given value. Generally useful for floating point numbers. Example mock_dao.SetTimeout((IsAlmost(3.9))) """ def __init__(self, float_value, places=7): """Initialize IsAlmost. Args: float_value: The value for making the comparison. places: The number of decimal places to round to. """ self._float_value = float_value self._places = places def equals(self, rhs): """Check to see if RHS is almost equal to float_value Args: rhs: the value to compare to float_value Returns: bool """ try: return round(rhs-self._float_value, self._places) == 0 except TypeError: # This is probably because either float_value or rhs is not a number. return False def __repr__(self): return str(self._float_value) class StrContains(Comparator): """Comparison class used to check whether a substring exists in a string parameter. This can be useful in mocking a database with SQL passed in as a string parameter, for example. Example: mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) """ def __init__(self, search_string): """Initialize. Args: # search_string: the string you are searching for search_string: str """ self._search_string = search_string def equals(self, rhs): """Check to see if the search_string is contained in the rhs string. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return rhs.find(self._search_string) > -1 except Exception: return False def __repr__(self): return '<str containing \'%s\'>' % self._search_string class Regex(Comparator): """Checks if a string matches a regular expression. This uses a given regular expression to determine equality. """ def __init__(self, pattern, flags=0): """Initialize. Args: # pattern is the regular expression to search for pattern: str # flags passed to re.compile function as the second argument flags: int """ self.regex = re.compile(pattern, flags=flags) def equals(self, rhs): """Check to see if rhs matches regular expression pattern. Returns: bool """ return self.regex.search(rhs) is not None def __repr__(self): s = '<regular expression \'%s\'' % self.regex.pattern if self.regex.flags: s += ', flags=%d' % self.regex.flags s += '>' return s class In(Comparator): """Checks whether an item (or key) is in a list (or dict) parameter. Example: mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result) """ def __init__(self, key): """Initialize. Args: # key is any thing that could be in a list or a key in a dict """ self._key = key def equals(self, rhs): """Check to see whether key is in rhs. Args: rhs: dict Returns: bool """ return self._key in rhs def __repr__(self): return '<sequence or map containing \'%s\'>' % self._key class ContainsKeyValue(Comparator): """Checks whether a key/value pair is in a dict parameter. Example: mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info)) """ def __init__(self, key, value): """Initialize. Args: # key: a key in a dict # value: the corresponding value """ self._key = key self._value = value def equals(self, rhs): """Check whether the given key/value pair is in the rhs dict. Returns: bool """ try: return rhs[self._key] == self._value except Exception: return False def __repr__(self): return '<map containing the entry \'%s: %s\'>' % (self._key, self._value) class SameElementsAs(Comparator): """Checks whether iterables contain the same elements (ignoring order). Example: mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki')) """ def __init__(self, expected_seq): """Initialize. Args: expected_seq: a sequence """ self._expected_seq = expected_seq def equals(self, actual_seq): """Check to see whether actual_seq has same elements as expected_seq. Args: actual_seq: sequence Returns: bool """ try: expected = dict([(element, None) for element in self._expected_seq]) actual = dict([(element, None) for element in actual_seq]) except TypeError: # Fall back to slower list-compare if any of the objects are unhashable. expected = list(self._expected_seq) actual = list(actual_seq) expected.sort() actual.sort() return expected == actual def __repr__(self): return '<sequence with same elements as \'%s\'>' % self._expected_seq class And(Comparator): """Evaluates one or more Comparators on RHS and returns an AND of the results. """ def __init__(self, *args): """Initialize. Args: *args: One or more Comparator """ self._comparators = args def equals(self, rhs): """Checks whether all Comparators are equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if not comparator.equals(rhs): return False return True def __repr__(self): return '<AND %s>' % str(self._comparators) class Or(Comparator): """Evaluates one or more Comparators on RHS and returns an OR of the results. """ def __init__(self, *args): """Initialize. Args: *args: One or more Mox comparators """ self._comparators = args def equals(self, rhs): """Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if comparator.equals(rhs): return True return False def __repr__(self): return '<OR %s>' % str(self._comparators) class Func(Comparator): """Call a function that should verify the parameter passed in is correct. You may need the ability to perform more advanced operations on the parameter in order to validate it. You can use this to have a callable validate any parameter. The callable should return either True or False. Example: def myParamValidator(param): # Advanced logic here return True mock_dao.DoSomething(Func(myParamValidator), true) """ def __init__(self, func): """Initialize. Args: func: callable that takes one parameter and returns a bool """ self._func = func def equals(self, rhs): """Test whether rhs passes the function test. rhs is passed into func. Args: rhs: any python object Returns: the result of func(rhs) """ return self._func(rhs) def __repr__(self): return str(self._func) class IgnoreArg(Comparator): """Ignore an argument. This can be used when we don't care about an argument of a method call. Example: # Check if CastMagic is called with 3 as first arg and 'disappear' as third. mymock.CastMagic(3, IgnoreArg(), 'disappear') """ def equals(self, unused_rhs): """Ignores arguments and returns True. Args: unused_rhs: any python object Returns: always returns True """ return True def __repr__(self): return '<IgnoreArg>' class MethodGroup(object): """Base class containing common behaviour for MethodGroups.""" def __init__(self, group_name): self._group_name = group_name def group_name(self): return self._group_name def __str__(self): return '<%s "%s">' % (self.__class__.__name__, self._group_name) def AddMethod(self, mock_method): raise NotImplementedError def MethodCalled(self, mock_method): raise NotImplementedError def IsSatisfied(self): raise NotImplementedError class UnorderedGroup(MethodGroup): """UnorderedGroup holds a set of method calls that may occur in any order. This construct is helpful for non-deterministic events, such as iterating over the keys of a dict. """ def __init__(self, group_name): super(UnorderedGroup, self).__init__(group_name) self._methods = [] def AddMethod(self, mock_method): """Add a method to this group. Args: mock_method: A mock method to be added to this group. """ self._methods.append(mock_method) def MethodCalled(self, mock_method): """Remove a method call from the group. If the method is not in the set, an UnexpectedMethodCallError will be raised. Args: mock_method: a mock method that should be equal to a method in the group. Returns: The mock method from the group Raises: UnexpectedMethodCallError if the mock_method was not in the group. """ # Check to see if this method exists, and if so, remove it from the set # and return it. for method in self._methods: if method == mock_method: # Remove the called mock_method instead of the method in the group. # The called method will match any comparators when equality is checked # during removal. The method in the group could pass a comparator to # another comparator during the equality check. self._methods.remove(mock_method) # If this group is not empty, put it back at the head of the queue. if not self.IsSatisfied(): mock_method._call_queue.appendleft(self) return self, method raise UnexpectedMethodCallError(mock_method, self) def IsSatisfied(self): """Return True if there are not any methods in this group.""" return len(self._methods) == 0 class MultipleTimesGroup(MethodGroup): """MultipleTimesGroup holds methods that may be called any number of times. Note: Each method must be called at least once. This is helpful, if you don't know or care how many times a method is called. """ def __init__(self, group_name): super(MultipleTimesGroup, self).__init__(group_name) self._methods = set() self._methods_called = set() def AddMethod(self, mock_method): """Add a method to this group. Args: mock_method: A mock method to be added to this group. """ self._methods.add(mock_method) def MethodCalled(self, mock_method): """Remove a method call from the group. If the method is not in the set, an UnexpectedMethodCallError will be raised. Args: mock_method: a mock method that should be equal to a method in the group. Returns: The mock method from the group Raises: UnexpectedMethodCallError if the mock_method was not in the group. """ # Check to see if this method exists, and if so add it to the set of # called methods. for method in self._methods: if method == mock_method: self._methods_called.add(mock_method) # Always put this group back on top of the queue, because we don't know # when we are done. mock_method._call_queue.appendleft(self) return self, method if self.IsSatisfied(): next_method = mock_method._PopNextMethod(); return next_method, None else: raise UnexpectedMethodCallError(mock_method, self) def IsSatisfied(self): """Return True if all methods in this group are called at least once.""" # NOTE(psycho): We can't use the simple set difference here because we want # to match different parameters which are considered the same e.g. IsA(str) # and some string. This solution is O(n^2) but n should be small. tmp = self._methods.copy() for called in self._methods_called: for expected in tmp: if called == expected: tmp.remove(expected) if not tmp: return True break return False class MoxMetaTestBase(type): """Metaclass to add mox cleanup and verification to every test. As the mox unit testing class is being constructed (MoxTestBase or a subclass), this metaclass will modify all test functions to call the CleanUpMox method of the test class after they finish. This means that unstubbing and verifying will happen for every test with no additional code, and any failures will result in test failures as opposed to errors. """ def __init__(cls, name, bases, d): type.__init__(cls, name, bases, d) # also get all the attributes from the base classes to account # for a case when test class is not the immediate child of MoxTestBase for base in bases: for attr_name in dir(base): d[attr_name] = getattr(base, attr_name) for func_name, func in d.items(): if func_name.startswith('test') and callable(func): setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func)) @staticmethod def CleanUpTest(cls, func): """Adds Mox cleanup code to any MoxTestBase method. Always unsets stubs after a test. Will verify all mocks for tests that otherwise pass. Args: cls: MoxTestBase or subclass; the class whose test method we are altering. func: method; the method of the MoxTestBase test class we wish to alter. Returns: The modified method. """ def new_method(self, *args, **kwargs): mox_obj = getattr(self, 'mox', None) cleanup_mox = False if mox_obj and isinstance(mox_obj, Mox): cleanup_mox = True try: func(self, *args, **kwargs) finally: if cleanup_mox: mox_obj.UnsetStubs() if cleanup_mox: mox_obj.VerifyAll() new_method.__name__ = func.__name__ new_method.__doc__ = func.__doc__ new_method.__module__ = func.__module__ return new_method class MoxTestBase(unittest.TestCase): """Convenience test class to make stubbing easier. Sets up a "mox" attribute which is an instance of Mox - any mox tests will want this. Also automatically unsets any stubs and verifies that all mock methods have been called at the end of each test, eliminating boilerplate code. """ __metaclass__ = MoxMetaTestBase def setUp(self): self.mox = Mox()
bsd-3-clause
justin-ho/passwd-mng
pycrypto-2.6.1/pycrypto-2.6.1/lib/Crypto/SelfTest/PublicKey/test_DSA.py
118
9861
# -*- coding: utf-8 -*- # # SelfTest/PublicKey/test_DSA.py: Self-test for the DSA primitive # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self-test suite for Crypto.PublicKey.DSA""" __revision__ = "$Id$" import sys import os if sys.version_info[0] == 2 and sys.version_info[1] == 1: from Crypto.Util.py21compat import * from Crypto.Util.py3compat import * import unittest from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex def _sws(s): """Remove whitespace from a text or byte string""" if isinstance(s,str): return "".join(s.split()) else: return b("").join(s.split()) class DSATest(unittest.TestCase): # Test vector from "Appendix 5. Example of the DSA" of # "Digital Signature Standard (DSS)", # U.S. Department of Commerce/National Institute of Standards and Technology # FIPS 186-2 (+Change Notice), 2000 January 27. # http://csrc.nist.gov/publications/fips/fips186-2/fips186-2-change1.pdf y = _sws("""19131871 d75b1612 a819f29d 78d1b0d7 346f7aa7 7bb62a85 9bfd6c56 75da9d21 2d3a36ef 1672ef66 0b8c7c25 5cc0ec74 858fba33 f44c0669 9630a76b 030ee333""") g = _sws("""626d0278 39ea0a13 413163a5 5b4cb500 299d5522 956cefcb 3bff10f3 99ce2c2e 71cb9de5 fa24babf 58e5b795 21925c9c c42e9f6f 464b088c c572af53 e6d78802""") p = _sws("""8df2a494 492276aa 3d25759b b06869cb eac0d83a fb8d0cf7 cbb8324f 0d7882e5 d0762fc5 b7210eaf c2e9adac 32ab7aac 49693dfb f83724c2 ec0736ee 31c80291""") q = _sws("""c773218c 737ec8ee 993b4f2d ed30f48e dace915f""") x = _sws("""2070b322 3dba372f de1c0ffc 7b2e3b49 8b260614""") k = _sws("""358dad57 1462710f 50e254cf 1a376b2b deaadfbf""") k_inverse = _sws("""0d516729 8202e49b 4116ac10 4fc3f415 ae52f917""") m = b2a_hex(b("abc")) m_hash = _sws("""a9993e36 4706816a ba3e2571 7850c26c 9cd0d89d""") r = _sws("""8bac1ab6 6410435c b7181f95 b16ab97c 92b341c0""") s = _sws("""41e2345f 1f56df24 58f426d1 55b4ba2d b6dcd8c8""") def setUp(self): global DSA, Random, bytes_to_long, size from Crypto.PublicKey import DSA from Crypto import Random from Crypto.Util.number import bytes_to_long, inverse, size self.dsa = DSA def test_generate_1arg(self): """DSA (default implementation) generated key (1 argument)""" dsaObj = self.dsa.generate(1024) self._check_private_key(dsaObj) pub = dsaObj.publickey() self._check_public_key(pub) def test_generate_2arg(self): """DSA (default implementation) generated key (2 arguments)""" dsaObj = self.dsa.generate(1024, Random.new().read) self._check_private_key(dsaObj) pub = dsaObj.publickey() self._check_public_key(pub) def test_construct_4tuple(self): """DSA (default implementation) constructed key (4-tuple)""" (y, g, p, q) = [bytes_to_long(a2b_hex(param)) for param in (self.y, self.g, self.p, self.q)] dsaObj = self.dsa.construct((y, g, p, q)) self._test_verification(dsaObj) def test_construct_5tuple(self): """DSA (default implementation) constructed key (5-tuple)""" (y, g, p, q, x) = [bytes_to_long(a2b_hex(param)) for param in (self.y, self.g, self.p, self.q, self.x)] dsaObj = self.dsa.construct((y, g, p, q, x)) self._test_signing(dsaObj) self._test_verification(dsaObj) def _check_private_key(self, dsaObj): # Check capabilities self.assertEqual(1, dsaObj.has_private()) self.assertEqual(1, dsaObj.can_sign()) self.assertEqual(0, dsaObj.can_encrypt()) self.assertEqual(0, dsaObj.can_blind()) # Check dsaObj.[ygpqx] -> dsaObj.key.[ygpqx] mapping self.assertEqual(dsaObj.y, dsaObj.key.y) self.assertEqual(dsaObj.g, dsaObj.key.g) self.assertEqual(dsaObj.p, dsaObj.key.p) self.assertEqual(dsaObj.q, dsaObj.key.q) self.assertEqual(dsaObj.x, dsaObj.key.x) # Sanity check key data self.assertEqual(1, dsaObj.p > dsaObj.q) # p > q self.assertEqual(160, size(dsaObj.q)) # size(q) == 160 bits self.assertEqual(0, (dsaObj.p - 1) % dsaObj.q) # q is a divisor of p-1 self.assertEqual(dsaObj.y, pow(dsaObj.g, dsaObj.x, dsaObj.p)) # y == g**x mod p self.assertEqual(1, 0 < dsaObj.x < dsaObj.q) # 0 < x < q def _check_public_key(self, dsaObj): k = a2b_hex(self.k) m_hash = a2b_hex(self.m_hash) # Check capabilities self.assertEqual(0, dsaObj.has_private()) self.assertEqual(1, dsaObj.can_sign()) self.assertEqual(0, dsaObj.can_encrypt()) self.assertEqual(0, dsaObj.can_blind()) # Check dsaObj.[ygpq] -> dsaObj.key.[ygpq] mapping self.assertEqual(dsaObj.y, dsaObj.key.y) self.assertEqual(dsaObj.g, dsaObj.key.g) self.assertEqual(dsaObj.p, dsaObj.key.p) self.assertEqual(dsaObj.q, dsaObj.key.q) # Check that private parameters are all missing self.assertEqual(0, hasattr(dsaObj, 'x')) self.assertEqual(0, hasattr(dsaObj.key, 'x')) # Sanity check key data self.assertEqual(1, dsaObj.p > dsaObj.q) # p > q self.assertEqual(160, size(dsaObj.q)) # size(q) == 160 bits self.assertEqual(0, (dsaObj.p - 1) % dsaObj.q) # q is a divisor of p-1 # Public-only key objects should raise an error when .sign() is called self.assertRaises(TypeError, dsaObj.sign, m_hash, k) # Check __eq__ and __ne__ self.assertEqual(dsaObj.publickey() == dsaObj.publickey(),True) # assert_ self.assertEqual(dsaObj.publickey() != dsaObj.publickey(),False) # failIf def _test_signing(self, dsaObj): k = a2b_hex(self.k) m_hash = a2b_hex(self.m_hash) r = bytes_to_long(a2b_hex(self.r)) s = bytes_to_long(a2b_hex(self.s)) (r_out, s_out) = dsaObj.sign(m_hash, k) self.assertEqual((r, s), (r_out, s_out)) def _test_verification(self, dsaObj): m_hash = a2b_hex(self.m_hash) r = bytes_to_long(a2b_hex(self.r)) s = bytes_to_long(a2b_hex(self.s)) self.assertEqual(1, dsaObj.verify(m_hash, (r, s))) self.assertEqual(0, dsaObj.verify(m_hash + b("\0"), (r, s))) class DSAFastMathTest(DSATest): def setUp(self): DSATest.setUp(self) self.dsa = DSA.DSAImplementation(use_fast_math=True) def test_generate_1arg(self): """DSA (_fastmath implementation) generated key (1 argument)""" DSATest.test_generate_1arg(self) def test_generate_2arg(self): """DSA (_fastmath implementation) generated key (2 arguments)""" DSATest.test_generate_2arg(self) def test_construct_4tuple(self): """DSA (_fastmath implementation) constructed key (4-tuple)""" DSATest.test_construct_4tuple(self) def test_construct_5tuple(self): """DSA (_fastmath implementation) constructed key (5-tuple)""" DSATest.test_construct_5tuple(self) class DSASlowMathTest(DSATest): def setUp(self): DSATest.setUp(self) self.dsa = DSA.DSAImplementation(use_fast_math=False) def test_generate_1arg(self): """DSA (_slowmath implementation) generated key (1 argument)""" DSATest.test_generate_1arg(self) def test_generate_2arg(self): """DSA (_slowmath implementation) generated key (2 arguments)""" DSATest.test_generate_2arg(self) def test_construct_4tuple(self): """DSA (_slowmath implementation) constructed key (4-tuple)""" DSATest.test_construct_4tuple(self) def test_construct_5tuple(self): """DSA (_slowmath implementation) constructed key (5-tuple)""" DSATest.test_construct_5tuple(self) def get_tests(config={}): tests = [] tests += list_test_cases(DSATest) try: from Crypto.PublicKey import _fastmath tests += list_test_cases(DSAFastMathTest) except ImportError: from distutils.sysconfig import get_config_var import inspect _fm_path = os.path.normpath(os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) +"/../../PublicKey/_fastmath"+get_config_var("SO")) if os.path.exists(_fm_path): raise ImportError("While the _fastmath module exists, importing "+ "it failed. This may point to the gmp or mpir shared library "+ "not being in the path. _fastmath was found at "+_fm_path) tests += list_test_cases(DSASlowMathTest) return tests if __name__ == '__main__': suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite') # vim:set ts=4 sw=4 sts=4 expandtab:
gpl-3.0
mrquim/repository.mrquim
repo/script.module.xbmcutil/lib/xbmcutil/viewModes.py
5
3683
""" ###################### xbmcutil.viewModes ###################### Copyright: (c) 2013 William Forde (willforde+kodi@gmail.com) License: GPLv3, see LICENSE for more details This file is part of xbmcutil xbmcutil is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. xbmcutil is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ # Call Custom xbmcutil Module from xbmcutil import plugin, storageDB class Selector(object): # Fetch Current Skin ID skin = plugin.xbmc.getSkinDir() viewModes = {} # Main Initializer def __init__(self, mode): # Create List for display showList = [plugin.getstr(571)] self.mode = mode # Load in Skin Codes from Database jsonData = storageDB.SkinCodes() if self.skin in jsonData: # Fetch viewmodes for selected mode self.viewModes = self.filterCodes(jsonData[self.skin], mode) # Append each key of viewModes to show list for i in sorted(self.viewModes.keys()): showList.append(i) # Fetch Current Mode if set and Show to user under Custom Mode ID self.currentMode = currentMode = plugin.getSetting("%s.%s.view" % (self.skin, mode)) if currentMode: showList.append("%s (%s)" % (plugin.getstr(636), currentMode)) else: showList.append(plugin.getstr(636)) # Display List self.display(showList) def filterCodes(self, skinCodes, mode): filterList = {} if mode in skinCodes: self.filterModes(filterList, skinCodes[mode]) if "both" in skinCodes: self.filterModes(filterList, skinCodes["both"]) return filterList def filterModes(self, filterd, modes): # Loop each view and assign to filterd list for view in modes: # Fetch Localized String key = plugin.getuni(view["id"]) if view["id"] is not None else "" # If strcomb exists then combine the localized tring to it if "strextra" in view: key = "%s %s" % (key, view["strextra"]) # Assign Modes to Dict filterd[key.strip()] = view["mode"] def display(self, showList): # Bold the already selected view mode orgList = showList[:] if self.currentMode and len(showList) > 2: # Convert current viewmode to an interger currentMode = int(self.currentMode) for key, value in self.viewModes.iteritems(): # Check if current mode is found in viewModes if currentMode == value: # When found find its position in the list for count, i in enumerate(showList): # Check for required key if key == i: # Wen found, Bold and Indent the value showList[count] = "[B]-%s[/B]" % showList[count] break break # Display List to User ret = plugin.dialogSelect(plugin.getAddonData(self.skin, "name"), showList) if ret >= 0: # Take action depending on response response = orgList[ret] if response.startswith(plugin.getstr(636)): self.askForViewID() elif response == plugin.getstr(571): plugin.setSetting("%s.%s.view" % (self.skin, self.mode), "") else: plugin.setSetting("%s.%s.view" % (self.skin, self.mode), str(self.viewModes[str(response)])) def askForViewID(self): # Display Numeric Dialog ret = plugin.dialogNumeric(0, plugin.getstr(611), self.currentMode) if ret: plugin.setSetting("%s.%s.view" % (self.skin, self.mode), str(ret))
gpl-2.0
tangfeixiong/nova
nova/virt/hyperv/ioutils.py
48
2597
# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from eventlet import patcher from oslo_log import log as logging from nova.i18n import _LE LOG = logging.getLogger(__name__) native_threading = patcher.original('threading') class IOThread(native_threading.Thread): def __init__(self, src, dest, max_bytes): super(IOThread, self).__init__() self.setDaemon(True) self._src = src self._dest = dest self._dest_archive = dest + '.1' self._max_bytes = max_bytes self._stopped = native_threading.Event() def run(self): try: self._copy() except IOError as err: self._stopped.set() # Invalid argument error means that the vm console pipe was closed, # probably the vm was stopped. The worker can stop it's execution. if err.errno != errno.EINVAL: LOG.error(_LE("Error writing vm console log file from " "serial console pipe. Error: %s") % err) def _copy(self): with open(self._src, 'rb') as src: with open(self._dest, 'ab', 0) as dest: dest.seek(0, os.SEEK_END) log_size = dest.tell() while (not self._stopped.isSet()): # Read one byte at a time to avoid blocking. data = src.read(1) dest.write(data) log_size += len(data) if (log_size >= self._max_bytes): dest.close() if os.path.exists(self._dest_archive): os.remove(self._dest_archive) os.rename(self._dest, self._dest_archive) dest = open(self._dest, 'ab', 0) log_size = 0 def join(self): self._stopped.set() super(IOThread, self).join() def is_active(self): return not self._stopped.isSet()
apache-2.0
kjw0106/boto
boto/s3/acl.py
136
5716
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.s3.user import User CannedACLStrings = ['private', 'public-read', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control', 'log-delivery-write'] class Policy(object): def __init__(self, parent=None): self.parent = parent self.namespace = None self.acl = None def __repr__(self): grants = [] for g in self.acl.grants: if g.id == self.owner.id: grants.append("%s (owner) = %s" % (g.display_name, g.permission)) else: if g.type == 'CanonicalUser': u = g.display_name elif g.type == 'Group': u = g.uri else: u = g.email_address grants.append("%s = %s" % (u, g.permission)) return "<Policy: %s>" % ", ".join(grants) def startElement(self, name, attrs, connection): if name == 'AccessControlPolicy': self.namespace = attrs.get('xmlns', None) return None if name == 'Owner': self.owner = User(self) return self.owner elif name == 'AccessControlList': self.acl = ACL(self) return self.acl else: return None def endElement(self, name, value, connection): if name == 'Owner': pass elif name == 'AccessControlList': pass else: setattr(self, name, value) def to_xml(self): if self.namespace is not None: s = '<AccessControlPolicy xmlns="{0}">'.format(self.namespace) else: s = '<AccessControlPolicy>' s += self.owner.to_xml() s += self.acl.to_xml() s += '</AccessControlPolicy>' return s class ACL(object): def __init__(self, policy=None): self.policy = policy self.grants = [] def add_grant(self, grant): self.grants.append(grant) def add_email_grant(self, permission, email_address): grant = Grant(permission=permission, type='AmazonCustomerByEmail', email_address=email_address) self.grants.append(grant) def add_user_grant(self, permission, user_id, display_name=None): grant = Grant(permission=permission, type='CanonicalUser', id=user_id, display_name=display_name) self.grants.append(grant) def startElement(self, name, attrs, connection): if name == 'Grant': self.grants.append(Grant(self)) return self.grants[-1] else: return None def endElement(self, name, value, connection): if name == 'Grant': pass else: setattr(self, name, value) def to_xml(self): s = '<AccessControlList>' for grant in self.grants: s += grant.to_xml() s += '</AccessControlList>' return s class Grant(object): NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' def __init__(self, permission=None, type=None, id=None, display_name=None, uri=None, email_address=None): self.permission = permission self.id = id self.display_name = display_name self.uri = uri self.email_address = email_address self.type = type def startElement(self, name, attrs, connection): if name == 'Grantee': self.type = attrs['xsi:type'] return None def endElement(self, name, value, connection): if name == 'ID': self.id = value elif name == 'DisplayName': self.display_name = value elif name == 'URI': self.uri = value elif name == 'EmailAddress': self.email_address = value elif name == 'Grantee': pass elif name == 'Permission': self.permission = value else: setattr(self, name, value) def to_xml(self): s = '<Grant>' s += '<Grantee %s xsi:type="%s">' % (self.NameSpace, self.type) if self.type == 'CanonicalUser': s += '<ID>%s</ID>' % self.id s += '<DisplayName>%s</DisplayName>' % self.display_name elif self.type == 'Group': s += '<URI>%s</URI>' % self.uri else: s += '<EmailAddress>%s</EmailAddress>' % self.email_address s += '</Grantee>' s += '<Permission>%s</Permission>' % self.permission s += '</Grant>' return s
mit
mnuthan1/workflow
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
2360
3778
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" # Note: This file is under the PSF license as the code comes from the python # stdlib. http://docs.python.org/3/license.html import re __version__ = '3.4.0.2' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
apache-2.0
daigofuji/jekyll-foundation-5-starter
node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_sln.py
1831
5099
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints the information in a sln file in a diffable way. It first outputs each projects in alphabetical order with their dependencies. Then it outputs a possible build order. """ __author__ = 'nsylvain (Nicolas Sylvain)' import os import re import sys import pretty_vcproj def BuildProject(project, built, projects, deps): # if all dependencies are done, we can build it, otherwise we try to build the # dependency. # This is not infinite-recursion proof. for dep in deps[project]: if dep not in built: BuildProject(dep, built, projects, deps) print project built.append(project) def ParseSolution(solution_file): # All projects, their clsid and paths. projects = dict() # A list of dependencies associated with a project. dependencies = dict() # Regular expressions that matches the SLN format. # The first line of a project definition. begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942' r'}"\) = "(.*)", "(.*)", "(.*)"$') # The last line of a project definition. end_project = re.compile('^EndProject$') # The first line of a dependency list. begin_dep = re.compile( r'ProjectSection\(ProjectDependencies\) = postProject$') # The last line of a dependency list. end_dep = re.compile('EndProjectSection$') # A line describing a dependency. dep_line = re.compile(' *({.*}) = ({.*})$') in_deps = False solution = open(solution_file) for line in solution: results = begin_project.search(line) if results: # Hack to remove icu because the diff is too different. if results.group(1).find('icu') != -1: continue # We remove "_gyp" from the names because it helps to diff them. current_project = results.group(1).replace('_gyp', '') projects[current_project] = [results.group(2).replace('_gyp', ''), results.group(3), results.group(2)] dependencies[current_project] = [] continue results = end_project.search(line) if results: current_project = None continue results = begin_dep.search(line) if results: in_deps = True continue results = end_dep.search(line) if results: in_deps = False continue results = dep_line.search(line) if results and in_deps and current_project: dependencies[current_project].append(results.group(1)) continue # Change all dependencies clsid to name instead. for project in dependencies: # For each dependencies in this project new_dep_array = [] for dep in dependencies[project]: # Look for the project name matching this cldis for project_info in projects: if projects[project_info][1] == dep: new_dep_array.append(project_info) dependencies[project] = sorted(new_dep_array) return (projects, dependencies) def PrintDependencies(projects, deps): print "---------------------------------------" print "Dependencies for all projects" print "---------------------------------------" print "-- --" for (project, dep_list) in sorted(deps.items()): print "Project : %s" % project print "Path : %s" % projects[project][0] if dep_list: for dep in dep_list: print " - %s" % dep print "" print "-- --" def PrintBuildOrder(projects, deps): print "---------------------------------------" print "Build order " print "---------------------------------------" print "-- --" built = [] for (project, _) in sorted(deps.items()): if project not in built: BuildProject(project, built, projects, deps) print "-- --" def PrintVCProj(projects): for project in projects: print "-------------------------------------" print "-------------------------------------" print project print project print project print "-------------------------------------" print "-------------------------------------" project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]), projects[project][2])) pretty = pretty_vcproj argv = [ '', project_path, '$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]), ] argv.extend(sys.argv[3:]) pretty.main(argv) def main(): # check if we have exactly 1 parameter. if len(sys.argv) < 2: print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0] return 1 (projects, deps) = ParseSolution(sys.argv[1]) PrintDependencies(projects, deps) PrintBuildOrder(projects, deps) if '--recursive' in sys.argv: PrintVCProj(projects) return 0 if __name__ == '__main__': sys.exit(main())
mit
denisff/python-for-android
python-modules/pybluez/examples/simple/rfcomm-client.py
67
1148
# file: rfcomm-client.py # auth: Albert Huang <albert@csail.mit.edu> # desc: simple demonstration of a client application that uses RFCOMM sockets # intended for use with rfcomm-server # # $Id: rfcomm-client.py 424 2006-08-24 03:35:54Z albert $ from bluetooth import * import sys addr = None if len(sys.argv) < 2: print "no device specified. Searching all nearby bluetooth devices for" print "the SampleServer service" else: addr = sys.argv[1] print "Searching for SampleServer on %s" % addr # search for the SampleServer service uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee" service_matches = find_service( uuid = uuid, address = addr ) if len(service_matches) == 0: print "couldn't find the SampleServer service =(" sys.exit(0) first_match = service_matches[0] port = first_match["port"] name = first_match["name"] host = first_match["host"] print "connecting to \"%s\" on %s" % (name, host) # Create the client socket sock=BluetoothSocket( RFCOMM ) sock.connect((host, port)) print "connected. type stuff" while True: data = raw_input() if len(data) == 0: break sock.send(data) sock.close()
apache-2.0
DavidCorn/LeagueRank
code/top_champion.py
1
7062
import csv import json import os import urllib2 from RiotCrawler import get_tier from config import config class TopChampion: FIELD_NAMES = ['totalSessionsPlayed', 'totalSessionsLost', 'totalSessionsWon', 'totalChampionKills', 'totalDamageDealt', 'totalDamageTaken', 'mostChampionKillsPerSession', 'totalMinionKills', 'totalDoubleKills', 'totalTripleKills', 'totalQuadraKills', 'totalPentaKills', 'totalUnrealKills', 'totalDeathsPerSession', 'totalGoldEarned', 'mostSpellsCast', 'totalTurretsKilled', 'totalPhysicalDamageDealt', 'totalMagicDamageDealt', 'totalFirstBlood', 'totalAssists', 'maxChampionsKilled', 'maxNumDeaths', 'label'] def __init__(self, key, player_id, label, n): self.label = label self.player_id = player_id self.key = key self.n = n self.top_champions = [] pass def get_top_champions(self): self.top_champions[:] = [] data = urllib2.urlopen( 'https://na.api.pvp.net/api/lol/na/v1.3/stats/by-summoner/' + self.player_id + '/ranked?season=SEASON2016&api_key=' + self.key ).read() json_data = json.loads(data) champions = json_data['champions'] champion_stats = [] for champion in champions: champion_stat = champion['stats'] champion_stat['id'] = champion['id'] champion_stat['label'] = self.label champion_stats.append(champion_stat) pass self.top_champions = sorted(champion_stats, key=lambda x: x['totalSessionsPlayed'], reverse=True)[1:self.n + 1] return self.top_champions pass def save_top_champions(self): for champion in self.top_champions: file_name = '../data/{}.csv'.format(champion['id']) if os.path.isfile(file_name): with open(file_name, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=self.FIELD_NAMES) writer.writerow( { 'totalSessionsPlayed': champion['totalSessionsPlayed'], 'totalSessionsLost': champion['totalSessionsLost'], 'totalSessionsWon': champion['totalSessionsWon'], 'totalChampionKills': champion['totalChampionKills'], 'totalDamageDealt': champion['totalDamageDealt'], 'totalDamageTaken': champion['totalDamageTaken'], 'mostChampionKillsPerSession': champion['mostChampionKillsPerSession'], 'totalMinionKills': champion['totalMinionKills'], 'totalDoubleKills': champion['totalDoubleKills'], 'totalTripleKills': champion['totalTripleKills'], 'totalQuadraKills': champion['totalQuadraKills'], 'totalPentaKills': champion['totalPentaKills'], 'totalUnrealKills': champion['totalUnrealKills'], 'totalDeathsPerSession': champion['totalDeathsPerSession'], 'totalGoldEarned': champion['totalGoldEarned'], 'mostSpellsCast': champion['mostSpellsCast'], 'totalTurretsKilled': champion['totalTurretsKilled'], 'totalPhysicalDamageDealt': champion['totalPhysicalDamageDealt'], 'totalMagicDamageDealt': champion['totalMagicDamageDealt'], 'totalFirstBlood': champion['totalFirstBlood'], 'totalAssists': champion['totalAssists'], 'maxChampionsKilled': champion['maxChampionsKilled'], 'maxNumDeaths': champion['maxNumDeaths'], 'label': champion['label'] } ) pass pass else: with open(file_name, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=self.FIELD_NAMES) writer.writeheader() writer.writerow( { 'totalSessionsPlayed': champion['totalSessionsPlayed'], 'totalSessionsLost': champion['totalSessionsLost'], 'totalSessionsWon': champion['totalSessionsWon'], 'totalChampionKills': champion['totalChampionKills'], 'totalDamageDealt': champion['totalDamageDealt'], 'totalDamageTaken': champion['totalDamageTaken'], 'mostChampionKillsPerSession': champion['mostChampionKillsPerSession'], 'totalMinionKills': champion['totalMinionKills'], 'totalDoubleKills': champion['totalDoubleKills'], 'totalTripleKills': champion['totalTripleKills'], 'totalQuadraKills': champion['totalQuadraKills'], 'totalPentaKills': champion['totalPentaKills'], 'totalUnrealKills': champion['totalUnrealKills'], 'totalDeathsPerSession': champion['totalDeathsPerSession'], 'totalGoldEarned': champion['totalGoldEarned'], 'mostSpellsCast': champion['mostSpellsCast'], 'totalTurretsKilled': champion['totalTurretsKilled'], 'totalPhysicalDamageDealt': champion['totalPhysicalDamageDealt'], 'totalMagicDamageDealt': champion['totalMagicDamageDealt'], 'totalFirstBlood': champion['totalFirstBlood'], 'totalAssists': champion['totalAssists'], 'maxChampionsKilled': champion['maxChampionsKilled'], 'maxNumDeaths': champion['maxNumDeaths'], 'label': champion['label'] } ) pass pass pass pass pass def main(): import time tiers = get_tier() for tier, rank_dict in tiers.iteritems(): print 'starting tier: {}'.format(tier) for summoner_id in rank_dict: print 'tier: {}, summoner id: {}'.format(tier, summoner_id) top_champion = TopChampion(config['key'], summoner_id, tier, 3) top_champion.get_top_champions() top_champion.save_top_champions() time.sleep(1) print 'end tier: {}'.format(tier) if __name__ == '__main__': main()
apache-2.0
lulandco/SickRage
lib/chardet/euctwfreq.py
53
31621
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # EUCTW frequency table # Converted from big5 work # by Taiwan's Mandarin Promotion Council # <http:#www.edu.tw:81/mandr/> # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 # Char to FreqOrder table , EUCTW_TABLE_SIZE = 8102 EUCTW_CHAR_TO_FREQ_ORDER = ( 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 )
gpl-3.0
Belxjander/Kirito
Python-3.5.0-Amiga/Lib/distutils/tests/test_install_scripts.py
118
2625
"""Tests for distutils.command.install_scripts.""" import os import unittest from distutils.command.install_scripts import install_scripts from distutils.core import Distribution from distutils.tests import support from test.support import run_unittest class InstallScriptsTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): def test_default_settings(self): dist = Distribution() dist.command_obj["build"] = support.DummyCommand( build_scripts="/foo/bar") dist.command_obj["install"] = support.DummyCommand( install_scripts="/splat/funk", force=1, skip_build=1, ) cmd = install_scripts(dist) self.assertFalse(cmd.force) self.assertFalse(cmd.skip_build) self.assertIsNone(cmd.build_dir) self.assertIsNone(cmd.install_dir) cmd.finalize_options() self.assertTrue(cmd.force) self.assertTrue(cmd.skip_build) self.assertEqual(cmd.build_dir, "/foo/bar") self.assertEqual(cmd.install_dir, "/splat/funk") def test_installation(self): source = self.mkdtemp() expected = [] def write_script(name, text): expected.append(name) f = open(os.path.join(source, name), "w") try: f.write(text) finally: f.close() write_script("script1.py", ("#! /usr/bin/env python2.3\n" "# bogus script w/ Python sh-bang\n" "pass\n")) write_script("script2.py", ("#!/usr/bin/python\n" "# bogus script w/ Python sh-bang\n" "pass\n")) write_script("shell.sh", ("#!/bin/sh\n" "# bogus shell script w/ sh-bang\n" "exit 0\n")) target = self.mkdtemp() dist = Distribution() dist.command_obj["build"] = support.DummyCommand(build_scripts=source) dist.command_obj["install"] = support.DummyCommand( install_scripts=target, force=1, skip_build=1, ) cmd = install_scripts(dist) cmd.finalize_options() cmd.run() installed = os.listdir(target) for name in expected: self.assertIn(name, installed) def test_suite(): return unittest.makeSuite(InstallScriptsTestCase) if __name__ == "__main__": run_unittest(test_suite())
gpl-3.0
Digilent/u-boot-digilent
tools/patman/cros_subprocess.py
11
15719
# Copyright (c) 2012 The Chromium OS Authors. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. """Subprocress execution This module holds a subclass of subprocess.Popen with our own required features, mainly that we get access to the subprocess output while it is running rather than just at the end. This makes it easiler to show progress information and filter output in real time. """ import errno import os import pty import select import subprocess import sys import unittest # Import these here so the caller does not need to import subprocess also. PIPE = subprocess.PIPE STDOUT = subprocess.STDOUT PIPE_PTY = -3 # Pipe output through a pty stay_alive = True class Popen(subprocess.Popen): """Like subprocess.Popen with ptys and incremental output This class deals with running a child process and filtering its output on both stdout and stderr while it is running. We do this so we can monitor progress, and possibly relay the output to the user if requested. The class is similar to subprocess.Popen, the equivalent is something like: Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) But this class has many fewer features, and two enhancement: 1. Rather than getting the output data only at the end, this class sends it to a provided operation as it arrives. 2. We use pseudo terminals so that the child will hopefully flush its output to us as soon as it is produced, rather than waiting for the end of a line. Use CommunicateFilter() to handle output from the subprocess. """ def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY, shell=False, cwd=None, env=None, **kwargs): """Cut-down constructor Args: args: Program and arguments for subprocess to execute. stdin: See subprocess.Popen() stdout: See subprocess.Popen(), except that we support the sentinel value of cros_subprocess.PIPE_PTY. stderr: See subprocess.Popen(), except that we support the sentinel value of cros_subprocess.PIPE_PTY. shell: See subprocess.Popen() cwd: Working directory to change to for subprocess, or None if none. env: Environment to use for this subprocess, or None to inherit parent. kwargs: No other arguments are supported at the moment. Passing other arguments will cause a ValueError to be raised. """ stdout_pty = None stderr_pty = None if stdout == PIPE_PTY: stdout_pty = pty.openpty() stdout = os.fdopen(stdout_pty[1]) if stderr == PIPE_PTY: stderr_pty = pty.openpty() stderr = os.fdopen(stderr_pty[1]) super(Popen, self).__init__(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env, **kwargs) # If we're on a PTY, we passed the slave half of the PTY to the subprocess. # We want to use the master half on our end from now on. Setting this here # does make some assumptions about the implementation of subprocess, but # those assumptions are pretty minor. # Note that if stderr is STDOUT, then self.stderr will be set to None by # this constructor. if stdout_pty is not None: self.stdout = os.fdopen(stdout_pty[0]) if stderr_pty is not None: self.stderr = os.fdopen(stderr_pty[0]) # Insist that unit tests exist for other arguments we don't support. if kwargs: raise ValueError("Unit tests do not test extra args - please add tests") def CommunicateFilter(self, output): """Interact with process: Read data from stdout and stderr. This method runs until end-of-file is reached, then waits for the subprocess to terminate. The output function is sent all output from the subprocess and must be defined like this: def Output([self,] stream, data) Args: stream: the stream the output was received on, which will be sys.stdout or sys.stderr. data: a string containing the data Note: The data read is buffered in memory, so do not use this method if the data size is large or unlimited. Args: output: Function to call with each fragment of output. Returns: A tuple (stdout, stderr, combined) which is the data received on stdout, stderr and the combined data (interleaved stdout and stderr). Note that the interleaved output will only be sensible if you have set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on the timing of the output in the subprocess. If a subprocess flips between stdout and stderr quickly in succession, by the time we come to read the output from each we may see several lines in each, and will read all the stdout lines, then all the stderr lines. So the interleaving may not be correct. In this case you might want to pass stderr=cros_subprocess.STDOUT to the constructor. This feature is still useful for subprocesses where stderr is rarely used and indicates an error. Note also that if you set stderr to STDOUT, then stderr will be empty and the combined output will just be the same as stdout. """ read_set = [] write_set = [] stdout = None # Return stderr = None # Return if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if input: write_set.append(self.stdin) else: self.stdin.close() if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr and self.stderr != self.stdout: read_set.append(self.stderr) stderr = [] combined = [] input_offset = 0 while read_set or write_set: try: rlist, wlist, _ = select.select(read_set, write_set, [], 0.2) except select.error as e: if e.args[0] == errno.EINTR: continue raise if not stay_alive: self.terminate() if self.stdin in wlist: # When select has indicated that the file is writable, # we can write up to PIPE_BUF bytes without risk # blocking. POSIX defines PIPE_BUF >= 512 chunk = input[input_offset : input_offset + 512] bytes_written = os.write(self.stdin.fileno(), chunk) input_offset += bytes_written if input_offset >= len(input): self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = "" # We will get an error on read if the pty is closed try: data = os.read(self.stdout.fileno(), 1024) except OSError: pass if data == "": self.stdout.close() read_set.remove(self.stdout) else: stdout.append(data) combined.append(data) if output: output(sys.stdout, data) if self.stderr in rlist: data = "" # We will get an error on read if the pty is closed try: data = os.read(self.stderr.fileno(), 1024) except OSError: pass if data == "": self.stderr.close() read_set.remove(self.stderr) else: stderr.append(data) combined.append(data) if output: output(sys.stderr, data) # All data exchanged. Translate lists into strings. if stdout is not None: stdout = ''.join(stdout) else: stdout = '' if stderr is not None: stderr = ''.join(stderr) else: stderr = '' combined = ''.join(combined) # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr, combined) # Just being a unittest.TestCase gives us 14 public methods. Unless we # disable this, we can only have 6 tests in a TestCase. That's not enough. # # pylint: disable=R0904 class TestSubprocess(unittest.TestCase): """Our simple unit test for this module""" class MyOperation: """Provides a operation that we can pass to Popen""" def __init__(self, input_to_send=None): """Constructor to set up the operation and possible input. Args: input_to_send: a text string to send when we first get input. We will add \r\n to the string. """ self.stdout_data = '' self.stderr_data = '' self.combined_data = '' self.stdin_pipe = None self._input_to_send = input_to_send if input_to_send: pipe = os.pipe() self.stdin_read_pipe = pipe[0] self._stdin_write_pipe = os.fdopen(pipe[1], 'w') def Output(self, stream, data): """Output handler for Popen. Stores the data for later comparison""" if stream == sys.stdout: self.stdout_data += data if stream == sys.stderr: self.stderr_data += data self.combined_data += data # Output the input string if we have one. if self._input_to_send: self._stdin_write_pipe.write(self._input_to_send + '\r\n') self._stdin_write_pipe.flush() def _BasicCheck(self, plist, oper): """Basic checks that the output looks sane.""" self.assertEqual(plist[0], oper.stdout_data) self.assertEqual(plist[1], oper.stderr_data) self.assertEqual(plist[2], oper.combined_data) # The total length of stdout and stderr should equal the combined length self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2])) def test_simple(self): """Simple redirection: Get process list""" oper = TestSubprocess.MyOperation() plist = Popen(['ps']).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) def test_stderr(self): """Check stdout and stderr""" oper = TestSubprocess.MyOperation() cmd = 'echo fred >/dev/stderr && false || echo bad' plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], 'bad\r\n') self.assertEqual(plist [1], 'fred\r\n') def test_shell(self): """Check with and without shell works""" oper = TestSubprocess.MyOperation() cmd = 'echo test >/dev/stderr' self.assertRaises(OSError, Popen, [cmd], shell=False) plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(len(plist [0]), 0) self.assertEqual(plist [1], 'test\r\n') def test_list_args(self): """Check with and without shell works using list arguments""" oper = TestSubprocess.MyOperation() cmd = ['echo', 'test', '>/dev/stderr'] plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n') self.assertEqual(len(plist [1]), 0) oper = TestSubprocess.MyOperation() # this should be interpreted as 'echo' with the other args dropped cmd = ['echo', 'test', '>/dev/stderr'] plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], '\r\n') def test_cwd(self): """Check we can change directory""" for shell in (False, True): oper = TestSubprocess.MyOperation() plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], '/tmp\r\n') def test_env(self): """Check we can change environment""" for add in (False, True): oper = TestSubprocess.MyOperation() env = os.environ if add: env ['FRED'] = 'fred' cmd = 'echo $FRED' plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n') def test_extra_args(self): """Check we can't add extra arguments""" self.assertRaises(ValueError, Popen, 'true', close_fds=False) def test_basic_input(self): """Check that incremental input works We set up a subprocess which will prompt for name. When we see this prompt we send the name as input to the process. It should then print the name properly to stdout. """ oper = TestSubprocess.MyOperation('Flash') prompt = 'What is your name?: ' cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt plist = Popen([cmd], stdin=oper.stdin_read_pipe, shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(len(plist [1]), 0) self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n') def test_isatty(self): """Check that ptys appear as terminals to the subprocess""" oper = TestSubprocess.MyOperation() cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; ' 'else echo "not %d" >&%d; fi;') both_cmds = '' for fd in (1, 2): both_cmds += cmd % (fd, fd, fd, fd, fd) plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], 'terminal 1\r\n') self.assertEqual(plist [1], 'terminal 2\r\n') # Now try with PIPE and make sure it is not a terminal oper = TestSubprocess.MyOperation() plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).CommunicateFilter(oper.Output) self._BasicCheck(plist, oper) self.assertEqual(plist [0], 'not 1\n') self.assertEqual(plist [1], 'not 2\n') if __name__ == '__main__': unittest.main()
gpl-2.0
Burus/fats
fats/examples/hello_agi.py
1
2045
# -*- test-case-name: twisted.fats.examples.test.test_hello_agi -*- # Copyright (c) 2006-2008 Alexander Burtsev # See LICENSE for details """Hello FastAGI application. API Stability: unstable @author: U{Alexander Burtsev<mailto:eburus@gmail.com>} $Id: hello_agi.py 25 2008-02-18 15:34:56Z burus $ """ from zope.interface import implements, Interface from twisted.python import log, components from twisted.application import internet, service from twisted.internet.defer import inlineCallbacks, returnValue from twisted.fats.service import FastAGIFactory, IFastAGIFactory, CallHandler class HelloCallHandler(CallHandler): """My first call handler. """ def startCall(self): log.msg('Hello FastAGI logging system.') # Answer the call. df = self.agi.answer() # Say number and stop call session df.addCallback(lambda _: self.agi.sayNumber(666)) return df class IHelloFastAGIService(Interface): """Example service interface """ def my_method(param): """Example method @param param: parameter """ class ExampleService: implements(IHelloFastAGIService) class HelloFastAGIFactoryFromService(FastAGIFactory): """My factory from service. Implement service method and use them in the factory if it's required. """ implements(IFastAGIFactory) handler = HelloCallHandler def __init__(self, service): self.service = service def my_method(self, param): """Adapt method from the service. """ return self.service.my_method(param) components.registerAdapter(HelloFastAGIFactoryFromService, IHelloFastAGIService, IFastAGIFactory) # create application and TCP server with factory. PORT = 9000 factory = HelloFastAGIFactoryFromService(ExampleService) application = service.Application("HelloFastAGI") fastagi_service = internet.TCPServer(PORT, factory) fastagi_service.setServiceParent(application)
mit
Pennebaker/wagtail
wagtail/wagtailadmin/tests/test_pages_views.py
10
98014
from datetime import timedelta import mock from django.test import TestCase from django.core.urlresolvers import reverse from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, Permission from django.core import mail from django.core.paginator import Paginator from django.db.models.signals import pre_delete, post_delete from django.utils import timezone from wagtail.tests.testapp.models import ( SimplePage, EventPage, EventPageCarouselItem, StandardIndex, StandardChild, BusinessIndex, BusinessChild, BusinessSubIndex, TaggedPage, Advert, AdvertPlacement) from wagtail.tests.utils import WagtailTestUtils from wagtail.wagtailcore.models import Page, PageRevision from wagtail.wagtailcore.signals import page_published, page_unpublished from wagtail.wagtailusers.models import UserProfile def submittable_timestamp(timestamp): """ Helper function to translate a possibly-timezone-aware datetime into the format used in the go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator. This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we need to pass it through timezone.localtime to ensure that the client and server are in agreement about what the timestamp means. """ return str(timezone.localtime(timestamp)).split('.')[0] class TestPageExplorer(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add child page self.child_page = SimplePage( title="Hello world!", slug="hello-world", ) self.root_page.add_child(instance=self.child_page) # Login self.login() def test_explore(self): response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, ))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') self.assertEqual(self.root_page, response.context['parent_page']) self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.child_page.id).exists()) def test_explore_root(self): response = self.client.get(reverse('wagtailadmin_explore_root')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') self.assertEqual(Page.objects.get(id=1), response.context['parent_page']) self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.root_page.id).exists()) def test_ordering(self): response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'content_type'}) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') self.assertEqual(response.context['ordering'], 'content_type') def test_invalid_ordering(self): response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'invalid_order'}) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') self.assertEqual(response.context['ordering'], '-latest_revision_created_at') def test_reordering(self): response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'ord'}) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') self.assertEqual(response.context['ordering'], 'ord') # Pages must not be paginated self.assertNotIsInstance(response.context['pages'], Paginator) def make_pages(self): for i in range(150): self.root_page.add_child(instance=SimplePage( title="Page " + str(i), slug="page-" + str(i), )) def test_pagination(self): self.make_pages() response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 2}) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') # Check that we got the correct page self.assertEqual(response.context['pages'].number, 2) def test_pagination_invalid(self): self.make_pages() response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 'Hello World!'}) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') # Check that we got page one self.assertEqual(response.context['pages'].number, 1) def test_pagination_out_of_range(self): self.make_pages() response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 99999}) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html') # Check that we got the last page self.assertEqual(response.context['pages'].number, response.context['pages'].paginator.num_pages) class TestPageExplorerSignposting(TestCase, WagtailTestUtils): fixtures = ['test.json'] def setUp(self): # Find root page self.root_page = Page.objects.get(id=1) # Find page with an associated site self.site_page = Page.objects.get(id=2) # Add another top-level page (which will have no corresponding site record) self.no_site_page = SimplePage( title="Hello world!", slug="hello-world", ) self.root_page.add_child(instance=self.no_site_page) def test_admin_at_root(self): self.client.login(username='superuser', password='password') response = self.client.get(reverse('wagtailadmin_explore_root')) self.assertEqual(response.status_code, 200) # Administrator (or user with add_site permission) should get the full message # about configuring sites self.assertContains(response, "The root level is where you can add new sites to your Wagtail installation. Pages created here will not be accessible at any URL until they are associated with a site.") self.assertContains(response, """<a href="/admin/sites/">Configure a site now.</a>""") def test_admin_at_non_site_page(self): self.client.login(username='superuser', password='password') response = self.client.get(reverse('wagtailadmin_explore', args=(self.no_site_page.id, ))) self.assertEqual(response.status_code, 200) # Administrator (or user with add_site permission) should get a warning about # unroutable pages, and be directed to the site config area self.assertContains(response, "There is no site set up for this location. Pages created here will not be accessible at any URL until a site is associated with this location.") self.assertContains(response, """<a href="/admin/sites/">Configure a site now.</a>""") def test_admin_at_site_page(self): self.client.login(username='superuser', password='password') response = self.client.get(reverse('wagtailadmin_explore', args=(self.site_page.id, ))) self.assertEqual(response.status_code, 200) # There should be no warning message here self.assertNotContains(response, "Pages created here will not be accessible") def test_nonadmin_at_root(self): self.client.login(username='siteeditor', password='password') response = self.client.get(reverse('wagtailadmin_explore_root')) self.assertEqual(response.status_code, 200) # Non-admin should get a simple "create pages as children of the homepage" prompt self.assertContains(response, "Pages created here will not be accessible at any URL. To add pages to an existing site, create them as children of the homepage.") def test_nonadmin_at_non_site_page(self): self.client.login(username='siteeditor', password='password') response = self.client.get(reverse('wagtailadmin_explore', args=(self.no_site_page.id, ))) self.assertEqual(response.status_code, 200) # Non-admin should get a warning about unroutable pages self.assertContains(response, "There is no site record for this location. Pages created here will not be accessible at any URL.") def test_nonadmin_at_site_page(self): self.client.login(username='siteeditor', password='password') response = self.client.get(reverse('wagtailadmin_explore', args=(self.site_page.id, ))) self.assertEqual(response.status_code, 200) # There should be no warning message here self.assertNotContains(response, "Pages created here will not be accessible") class TestPageCreation(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Login self.user = self.login() def test_add_subpage(self): response = self.client.get(reverse('wagtailadmin_pages:add_subpage', args=(self.root_page.id, ))) self.assertEqual(response.status_code, 200) def test_add_subpage_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get add subpage page response = self.client.get(reverse('wagtailadmin_pages:add_subpage', args=(self.root_page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_add_subpage_nonexistantparent(self): response = self.client.get(reverse('wagtailadmin_pages:add_subpage', args=(100000, ))) self.assertEqual(response.status_code, 404) def test_create_simplepage(self): response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id))) self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="#content" class="active">Content</a>') self.assertContains(response, '<a href="#promote" class="">Promote</a>') def test_create_page_without_promote_tab(self): """ Test that the Promote tab is not rendered for page classes that define it as empty """ response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'standardindex', self.root_page.id))) self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="#content" class="active">Content</a>') self.assertNotContains(response, '<a href="#promote" class="">Promote</a>') def test_create_page_with_custom_tabs(self): """ Test that custom edit handlers are rendered """ response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'standardchild', self.root_page.id))) self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="#content" class="active">Content</a>') self.assertContains(response, '<a href="#promote" class="">Promote</a>') self.assertContains(response, '<a href="#dinosaurs" class="">Dinosaurs</a>') def test_create_simplepage_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get page response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_create_simplepage_post(self): post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Find the page and check it page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific # Should be redirected to edit page self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(page.id, ))) self.assertEqual(page.title, post_data['title']) self.assertIsInstance(page, SimplePage) self.assertFalse(page.live) self.assertFalse(page.first_published_at) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_create_simplepage_scheduled(self): go_live_at = timezone.now() + timedelta(days=1) expire_at = timezone.now() + timedelta(days=2) post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'go_live_at': submittable_timestamp(go_live_at), 'expire_at': submittable_timestamp(expire_at), } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Should be redirected to explorer page self.assertEqual(response.status_code, 302) # Find the page and check the scheduled times page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific self.assertEqual(page.go_live_at.date(), go_live_at.date()) self.assertEqual(page.expire_at.date(), expire_at.date()) self.assertEqual(page.expired, False) self.assertTrue(page.status_string, "draft") # No revisions with approved_go_live_at self.assertFalse(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists()) def test_create_simplepage_scheduled_go_live_before_expiry(self): post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)), 'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)), } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time") self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time") def test_create_simplepage_scheduled_expire_in_the_past(self): post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)), } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future") def test_create_simplepage_post_publish(self): # Connect a mock signal handler to page_published signal mock_handler = mock.MagicMock() page_published.connect(mock_handler) # Post post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Find the page and check it page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) self.assertEqual(page.title, post_data['title']) self.assertIsInstance(page, SimplePage) self.assertTrue(page.live) self.assertTrue(page.first_published_at) # Check that the page_published signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call['sender'], page.specific_class) self.assertEqual(mock_call['instance'], page) self.assertIsInstance(mock_call['instance'], page.specific_class) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_create_simplepage_post_publish_scheduled(self): go_live_at = timezone.now() + timedelta(days=1) expire_at = timezone.now() + timedelta(days=2) post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", 'go_live_at': submittable_timestamp(go_live_at), 'expire_at': submittable_timestamp(expire_at), } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Should be redirected to explorer page self.assertEqual(response.status_code, 302) # Find the page and check it page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific self.assertEqual(page.go_live_at.date(), go_live_at.date()) self.assertEqual(page.expire_at.date(), expire_at.date()) self.assertEqual(page.expired, False) # A revision with approved_go_live_at should exist now self.assertTrue(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists()) # But Page won't be live self.assertFalse(page.live) self.assertFalse(page.first_published_at) self.assertTrue(page.status_string, "scheduled") def test_create_simplepage_post_submit(self): # Create a moderator user for testing email get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password') # Submit post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Find the page and check it page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) self.assertEqual(page.title, post_data['title']) self.assertIsInstance(page, SimplePage) self.assertFalse(page.live) self.assertFalse(page.first_published_at) # The latest revision for the page should now be in moderation self.assertTrue(page.get_latest_revision().submitted_for_moderation) # Check that the moderator got an email self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['moderator@email.com']) self.assertEqual(mail.outbox[0].subject, 'The page "New page!" has been submitted for moderation') def test_create_simplepage_post_existing_slug(self): # This tests the existing slug checking on page save # Create a page self.child_page = SimplePage() self.child_page.title = "Hello world!" self.child_page.slug = "hello-world" self.root_page.add_child(instance=self.child_page) # Attempt to create a new one with the same slug post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Should not be redirected (as the save should fail) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'slug', "This slug is already in use") def test_create_nonexistantparent(self): response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', 100000))) self.assertEqual(response.status_code, 404) def test_create_nonpagetype(self): response = self.client.get(reverse('wagtailadmin_pages:add', args=('wagtailimages', 'image', self.root_page.id))) self.assertEqual(response.status_code, 404) def test_preview_on_create(self): post_data = { 'title': "New page!", 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:preview_on_add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Check the response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'tests/simple_page.html') self.assertContains(response, "New page!") # Check that the treebeard attributes were set correctly on the page object self.assertEqual(response.context['self'].depth, self.root_page.depth + 1) self.assertTrue(response.context['self'].path.startswith(self.root_page.path)) self.assertEqual(response.context['self'].get_parent(), self.root_page) def test_whitespace_titles(self): post_data = { 'title': " ", # Single space on purpose 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", 'seo_title': '\t', } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Check that a form error was raised self.assertFormError(response, 'form', 'title', "Value cannot be entirely whitespace characters") self.assertFormError(response, 'form', 'seo_title', "Value cannot be entirely whitespace characters") def test_long_slug(self): post_data = { 'title': "Hello world", 'content': "Some content", 'slug': 'hello-world-hello-world-hello-world-hello-world-hello-world-hello-world-' 'hello-world-hello-world-hello-world-hello-world-hello-world-hello-world-' 'hello-world-hello-world-hello-world-hello-world-hello-world-hello-world-' 'hello-world-hello-world-hello-world-hello-world-hello-world-hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)), post_data) # Check that a form error was raised self.assertEqual(response.status_code, 200) self.assertFormError(response, 'form', 'slug', "Ensure this value has at most 255 characters (it has 287).") class TestPageEdit(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add child page child_page = SimplePage( title="Hello world!", slug="hello-world", ) self.root_page.add_child(instance=child_page) child_page.save_revision().publish() self.child_page = SimplePage.objects.get(id=child_page.id) # Add event page (to test edit handlers) self.event_page = EventPage() self.event_page.title = "Event page" self.event_page.slug = "event-page" self.root_page.add_child(instance=self.event_page) # Login self.user = self.login() def test_page_edit(self): # Tests that the edit page loads response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.event_page.id, ))) self.assertEqual(response.status_code, 200) def test_page_edit_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get edit page response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_page_edit_post(self): # Tests simple editing post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to edit page self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(self.child_page.id, ))) # The page should have "has_unpublished_changes" flag set child_page_new = SimplePage.objects.get(id=self.child_page.id) self.assertTrue(child_page_new.has_unpublished_changes) def test_page_edit_post_when_locked(self): # Tests that trying to edit a locked page results in an error # Lock the page self.child_page.locked = True self.child_page.save() # Post post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Shouldn't be redirected self.assertContains(response, "The page could not be saved as it is locked") # The page shouldn't have "has_unpublished_changes" flag set child_page_new = SimplePage.objects.get(id=self.child_page.id) self.assertFalse(child_page_new.has_unpublished_changes) def test_edit_post_scheduled(self): # put go_live_at and expire_at several days away from the current date, to avoid # false matches in content_json__contains tests go_live_at = timezone.now() + timedelta(days=10) expire_at = timezone.now() + timedelta(days=20) post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'go_live_at': submittable_timestamp(go_live_at), 'expire_at': submittable_timestamp(expire_at), } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to explorer page self.assertEqual(response.status_code, 302) child_page_new = SimplePage.objects.get(id=self.child_page.id) # The page will still be live self.assertTrue(child_page_new.live) # A revision with approved_go_live_at should not exist self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists()) # But a revision with go_live_at and expire_at in their content json *should* exist self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(go_live_at.date())).exists()) self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(expire_at.date())).exists()) def test_edit_scheduled_go_live_before_expiry(self): post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)), 'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)), } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time") self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time") def test_edit_scheduled_expire_in_the_past(self): post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)), } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future") def test_page_edit_post_publish(self): # Connect a mock signal handler to page_published signal mock_handler = mock.MagicMock() page_published.connect(mock_handler) # Set has_unpublished_changes=True on the existing record to confirm that the publish action # is resetting it (and not just leaving it alone) self.child_page.has_unpublished_changes = True self.child_page.save() # Save current value of first_published_at so we can check that it doesn't change first_published_at = SimplePage.objects.get(id=self.child_page.id).first_published_at # Tests publish from edit page post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page was edited child_page_new = SimplePage.objects.get(id=self.child_page.id) self.assertEqual(child_page_new.title, post_data['title']) # Check that the page_published signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call['sender'], child_page_new.specific_class) self.assertEqual(mock_call['instance'], child_page_new) self.assertIsInstance(mock_call['instance'], child_page_new.specific_class) # The page shouldn't have "has_unpublished_changes" flag set self.assertFalse(child_page_new.has_unpublished_changes) # first_published_at should not change as it was already set self.assertEqual(first_published_at, child_page_new.first_published_at) def test_edit_post_publish_scheduled(self): go_live_at = timezone.now() + timedelta(days=1) expire_at = timezone.now() + timedelta(days=2) post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", 'go_live_at': submittable_timestamp(go_live_at), 'expire_at': submittable_timestamp(expire_at), } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to explorer page self.assertEqual(response.status_code, 302) child_page_new = SimplePage.objects.get(id=self.child_page.id) # The page should not be live anymore self.assertFalse(child_page_new.live) # Instead a revision with approved_go_live_at should now exist self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists()) # The page SHOULD have the "has_unpublished_changes" flag set, because the changes are not visible as a live page yet self.assertTrue(child_page_new.has_unpublished_changes, "A page scheduled for future publishing should have has_unpublished_changes=True") def test_edit_post_publish_now_an_already_scheduled(self): # First let's publish a page with a go_live_at in the future go_live_at = timezone.now() + timedelta(days=1) expire_at = timezone.now() + timedelta(days=2) post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", 'go_live_at': submittable_timestamp(go_live_at), 'expire_at': submittable_timestamp(expire_at), } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to edit page self.assertEqual(response.status_code, 302) child_page_new = SimplePage.objects.get(id=self.child_page.id) # The page should not be live anymore self.assertFalse(child_page_new.live) # Instead a revision with approved_go_live_at should now exist self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists()) # Now, let's edit it and publish it right now go_live_at = timezone.now() post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-publish': "Publish", 'go_live_at': "", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to edit page self.assertEqual(response.status_code, 302) child_page_new = SimplePage.objects.get(id=self.child_page.id) # The page should be live now self.assertTrue(child_page_new.live) # And a revision with approved_go_live_at should not exist self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists()) def test_page_edit_post_submit(self): # Create a moderator user for testing email get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password') # Tests submitting from edit page post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # The page should have "has_unpublished_changes" flag set child_page_new = SimplePage.objects.get(id=self.child_page.id) self.assertTrue(child_page_new.has_unpublished_changes) # The latest revision for the page should now be in moderation self.assertTrue(child_page_new.get_latest_revision().submitted_for_moderation) # Check that the moderator got an email self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['moderator@email.com']) self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been submitted for moderation') # Note: should this be "I've been edited!"? def test_page_edit_post_existing_slug(self): # This tests the existing slug checking on page edit # Create a page self.child_page = SimplePage() self.child_page.title = "Hello world 2" self.child_page.slug = "hello-world2" self.root_page.add_child(instance=self.child_page) # Attempt to change the slug to one thats already in use post_data = { 'title': "Hello world 2", 'slug': 'hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), post_data) # Should not be redirected (as the save should fail) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'slug', "This slug is already in use") def test_preview_on_edit(self): post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", } response = self.client.post(reverse('wagtailadmin_pages:preview_on_edit', args=(self.child_page.id, )), post_data) # Check the response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'tests/simple_page.html') self.assertContains(response, "I&#39;ve been edited!") class TestPageEditReordering(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add event page self.event_page = EventPage() self.event_page.title = "Event page" self.event_page.slug = "event-page" self.event_page.carousel_items = [ EventPageCarouselItem(caption='1234567', sort_order=1), EventPageCarouselItem(caption='7654321', sort_order=2), EventPageCarouselItem(caption='abcdefg', sort_order=3), ] self.root_page.add_child(instance=self.event_page) # Login self.user = self.login() def check_order(self, response, expected_order): inline_panel = response.context['edit_handler'].children[0].children[9] order = [child.form.instance.caption for child in inline_panel.children] self.assertEqual(order, expected_order) def test_order(self): response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.event_page.id, ))) self.assertEqual(response.status_code, 200) self.check_order(response, ['1234567', '7654321', 'abcdefg']) def test_reorder(self): post_data = { 'title': "Event page", 'slug': 'event-page', 'date_from': '01/01/2014', 'cost': '$10', 'audience': 'public', 'location': 'somewhere', 'related_links-INITIAL_FORMS': 0, 'related_links-MAX_NUM_FORMS': 1000, 'related_links-TOTAL_FORMS': 0, 'speakers-INITIAL_FORMS': 0, 'speakers-MAX_NUM_FORMS': 1000, 'speakers-TOTAL_FORMS': 0, 'carousel_items-INITIAL_FORMS': 3, 'carousel_items-MAX_NUM_FORMS': 1000, 'carousel_items-TOTAL_FORMS': 3, 'carousel_items-0-id': self.event_page.carousel_items.all()[0].id, 'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption, 'carousel_items-0-ORDER': 2, 'carousel_items-1-id': self.event_page.carousel_items.all()[1].id, 'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption, 'carousel_items-1-ORDER': 3, 'carousel_items-2-id': self.event_page.carousel_items.all()[2].id, 'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption, 'carousel_items-2-ORDER': 1, } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.event_page.id, )), post_data) # Should be redirected back to same page self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(self.event_page.id, ))) # Check order response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.event_page.id, ))) self.assertEqual(response.status_code, 200) self.check_order(response, ['abcdefg', '1234567', '7654321']) def test_reorder_with_validation_error(self): post_data = { 'title': "", # Validation error 'slug': 'event-page', 'date_from': '01/01/2014', 'cost': '$10', 'audience': 'public', 'location': 'somewhere', 'related_links-INITIAL_FORMS': 0, 'related_links-MAX_NUM_FORMS': 1000, 'related_links-TOTAL_FORMS': 0, 'speakers-INITIAL_FORMS': 0, 'speakers-MAX_NUM_FORMS': 1000, 'speakers-TOTAL_FORMS': 0, 'carousel_items-INITIAL_FORMS': 3, 'carousel_items-MAX_NUM_FORMS': 1000, 'carousel_items-TOTAL_FORMS': 3, 'carousel_items-0-id': self.event_page.carousel_items.all()[0].id, 'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption, 'carousel_items-0-ORDER': 2, 'carousel_items-1-id': self.event_page.carousel_items.all()[1].id, 'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption, 'carousel_items-1-ORDER': 3, 'carousel_items-2-id': self.event_page.carousel_items.all()[2].id, 'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption, 'carousel_items-2-ORDER': 1, } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.event_page.id, )), post_data) self.assertEqual(response.status_code, 200) self.check_order(response, ['abcdefg', '1234567', '7654321']) class TestPageDelete(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add child page self.child_page = SimplePage() self.child_page.title = "Hello world!" self.child_page.slug = "hello-world" self.root_page.add_child(instance=self.child_page) # Add a page with child pages of its own self.child_index = StandardIndex(title="Hello index", slug='hello-index') self.root_page.add_child(instance=self.child_index) self.grandchild_page = StandardChild(title="Hello Kitty", slug='hello-kitty') self.child_index.add_child(instance=self.grandchild_page) # Login self.user = self.login() def test_page_delete(self): response = self.client.get(reverse('wagtailadmin_pages:delete', args=(self.child_page.id, ))) self.assertEqual(response.status_code, 200) # deletion should not actually happen on GET self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists()) def test_page_delete_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get delete page response = self.client.get(reverse('wagtailadmin_pages:delete', args=(self.child_page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) # Check that the deletion has not happened self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists()) def test_page_delete_post(self): # Connect a mock signal handler to page_unpublished signal mock_handler = mock.MagicMock() page_unpublished.connect(mock_handler) # Post response = self.client.post(reverse('wagtailadmin_pages:delete', args=(self.child_page.id, ))) # Should be redirected to explorer page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') # Check that the page is gone self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0) # Check that the page_unpublished signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call['sender'], self.child_page.specific_class) self.assertEqual(mock_call['instance'], self.child_page) self.assertIsInstance(mock_call['instance'], self.child_page.specific_class) def test_page_delete_notlive_post(self): # Same as above, but this makes sure the page_unpublished signal is not fired # when if the page is not live when it is deleted # Unpublish the page self.child_page.live = False self.child_page.save() # Connect a mock signal handler to page_unpublished signal mock_handler = mock.MagicMock() page_unpublished.connect(mock_handler) # Post response = self.client.post(reverse('wagtailadmin_pages:delete', args=(self.child_page.id, ))) # Should be redirected to explorer page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') # Check that the page is gone self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0) # Check that the page_unpublished signal was not fired self.assertEqual(mock_handler.call_count, 0) def test_subpage_deletion(self): # Connect mock signal handlers to page_unpublished, pre_delete and post_delete signals unpublish_signals_received = [] pre_delete_signals_received = [] post_delete_signals_received = [] def page_unpublished_handler(sender, instance, **kwargs): unpublish_signals_received.append((sender, instance.id)) def pre_delete_handler(sender, instance, **kwargs): pre_delete_signals_received.append((sender, instance.id)) def post_delete_handler(sender, instance, **kwargs): post_delete_signals_received.append((sender, instance.id)) page_unpublished.connect(page_unpublished_handler) pre_delete.connect(pre_delete_handler) post_delete.connect(post_delete_handler) # Post response = self.client.post(reverse('wagtailadmin_pages:delete', args=(self.child_index.id, ))) # Should be redirected to explorer page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') # Check that the page is gone self.assertFalse(StandardIndex.objects.filter(id=self.child_index.id).exists()) self.assertFalse(Page.objects.filter(id=self.child_index.id).exists()) # Check that the subpage is also gone self.assertFalse(StandardChild.objects.filter(id=self.grandchild_page.id).exists()) self.assertFalse(Page.objects.filter(id=self.grandchild_page.id).exists()) # Check that the signals were fired for both pages self.assertIn((StandardIndex, self.child_index.id), unpublish_signals_received) self.assertIn((StandardChild, self.grandchild_page.id), unpublish_signals_received) self.assertIn((StandardIndex, self.child_index.id), pre_delete_signals_received) self.assertIn((StandardChild, self.grandchild_page.id), pre_delete_signals_received) self.assertIn((StandardIndex, self.child_index.id), post_delete_signals_received) self.assertIn((StandardChild, self.grandchild_page.id), post_delete_signals_received) class TestPageSearch(TestCase, WagtailTestUtils): def setUp(self): # Login self.login() def get(self, params=None, **extra): return self.client.get(reverse('wagtailadmin_pages:search'), params or {}, **extra) def test_view(self): response = self.get() self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html') self.assertEqual(response.status_code, 200) def test_search(self): response = self.get({'q': "Hello"}) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html') self.assertEqual(response.context['query_string'], "Hello") def test_ajax(self): response = self.get({'q': "Hello"}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(response.status_code, 200) self.assertTemplateNotUsed(response, 'wagtailadmin/pages/search.html') self.assertTemplateUsed(response, 'wagtailadmin/pages/search_results.html') self.assertEqual(response.context['query_string'], "Hello") def test_pagination(self): pages = ['0', '1', '-1', '9999', 'Not a page'] for page in pages: response = self.get({'q': "Hello", 'p': page}) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html') def test_root_can_appear_in_search_results(self): response = self.get({'q': "roo"}) self.assertEqual(response.status_code, 200) # 'pages' list in the response should contain root results = response.context['pages'] self.assertTrue(any([r.slug == 'root' for r in results])) class TestPageMove(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Create two sections self.section_a = SimplePage() self.section_a.title = "Section A" self.section_a.slug = "section-a" self.root_page.add_child(instance=self.section_a) self.section_b = SimplePage() self.section_b.title = "Section B" self.section_b.slug = "section-b" self.root_page.add_child(instance=self.section_b) # Add test page into section A self.test_page = SimplePage() self.test_page.title = "Hello world!" self.test_page.slug = "hello-world" self.section_a.add_child(instance=self.test_page) # Login self.user = self.login() def test_page_move(self): response = self.client.get(reverse('wagtailadmin_pages:move', args=(self.test_page.id, ))) self.assertEqual(response.status_code, 200) def test_page_move_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get move page response = self.client.get(reverse('wagtailadmin_pages:move', args=(self.test_page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_page_move_confirm(self): response = self.client.get(reverse('wagtailadmin_pages:move_confirm', args=(self.test_page.id, self.section_b.id))) self.assertEqual(response.status_code, 200) def test_page_set_page_position(self): response = self.client.get(reverse('wagtailadmin_pages:set_page_position', args=(self.test_page.id, ))) self.assertEqual(response.status_code, 200) class TestPageCopy(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Create a page self.test_page = self.root_page.add_child(instance=SimplePage( title="Hello world!", slug='hello-world', live=True, has_unpublished_changes=False, )) # Create a couple of child pages self.test_child_page = self.test_page.add_child(instance=SimplePage( title="Child page", slug='child-page', live=True, has_unpublished_changes=True, )) self.test_unpublished_child_page = self.test_page.add_child(instance=SimplePage( title="Unpublished Child page", slug='unpublished-child-page', live=False, has_unpublished_changes=True, )) # Login self.user = self.login() def test_page_copy(self): response = self.client.get(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, ))) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html') # Make sure all fields are in the form self.assertContains(response, "New title") self.assertContains(response, "New slug") self.assertContains(response, "New parent page") self.assertContains(response, "Copy subpages") self.assertContains(response, "Publish copies") def test_page_copy_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get copy page post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world', 'new_parent_page': str(self.test_page.id), 'copy_subpages': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user received a 403 response self.assertEqual(response.status_code, 403) def test_page_copy_post(self): post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world-2', 'new_parent_page': str(self.root_page.id), 'copy_subpages': False, 'publish_copies': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Get copy page_copy = self.root_page.get_children().filter(slug='hello-world-2').first() # Check that the copy exists self.assertNotEqual(page_copy, None) # Check that the copy is not live self.assertFalse(page_copy.live) self.assertTrue(page_copy.has_unpublished_changes) # Check that the owner of the page is set correctly self.assertEqual(page_copy.owner, self.user) # Check that the children were not copied self.assertEqual(page_copy.get_children().count(), 0) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_page_copy_post_copy_subpages(self): post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world-2', 'new_parent_page': str(self.root_page.id), 'copy_subpages': True, 'publish_copies': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Get copy page_copy = self.root_page.get_children().filter(slug='hello-world-2').first() # Check that the copy exists self.assertNotEqual(page_copy, None) # Check that the copy is not live self.assertFalse(page_copy.live) self.assertTrue(page_copy.has_unpublished_changes) # Check that the owner of the page is set correctly self.assertEqual(page_copy.owner, self.user) # Check that the children were copied self.assertEqual(page_copy.get_children().count(), 2) # Check the the child pages # Neither of them should be live child_copy = page_copy.get_children().filter(slug='child-page').first() self.assertNotEqual(child_copy, None) self.assertFalse(child_copy.live) self.assertTrue(child_copy.has_unpublished_changes) unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first() self.assertNotEqual(unpublished_child_copy, None) self.assertFalse(unpublished_child_copy.live) self.assertTrue(unpublished_child_copy.has_unpublished_changes) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_page_copy_post_copy_subpages_publish_copies(self): post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world-2', 'new_parent_page': str(self.root_page.id), 'copy_subpages': True, 'publish_copies': True, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Get copy page_copy = self.root_page.get_children().filter(slug='hello-world-2').first() # Check that the copy exists self.assertNotEqual(page_copy, None) # Check that the copy is live self.assertTrue(page_copy.live) self.assertFalse(page_copy.has_unpublished_changes) # Check that the owner of the page is set correctly self.assertEqual(page_copy.owner, self.user) # Check that the children were copied self.assertEqual(page_copy.get_children().count(), 2) # Check the the child pages # The child_copy should be live but the unpublished_child_copy shouldn't child_copy = page_copy.get_children().filter(slug='child-page').first() self.assertNotEqual(child_copy, None) self.assertTrue(child_copy.live) self.assertTrue(child_copy.has_unpublished_changes) unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first() self.assertNotEqual(unpublished_child_copy, None) self.assertFalse(unpublished_child_copy.live) self.assertTrue(unpublished_child_copy.has_unpublished_changes) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_page_copy_post_new_parent(self): post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world-2', 'new_parent_page': str(self.test_child_page.id), 'copy_subpages': False, 'publish_copies': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the new parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, ))) # Check that the page was copied to the correct place self.assertTrue(Page.objects.filter(slug='hello-world-2').first().get_parent(), self.test_child_page) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') def test_page_copy_post_existing_slug_within_same_parent_page(self): # This tests the existing slug checking on page copy when not changing the parent page # Attempt to copy the page but forget to change the slug post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world', 'new_parent_page': str(self.root_page.id), 'copy_subpages': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Should not be redirected (as the save should fail) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'new_slug', "This slug is already in use within the context of its parent page \"Welcome to your new Wagtail site!\"") def test_page_copy_post_existing_slug_to_another_parent_page(self): # This tests the existing slug checking on page copy when changing the parent page # Attempt to copy the page and changed the parent page post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world', 'new_parent_page': str(self.test_child_page.id), 'copy_subpages': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, ))) def test_page_copy_post_invalid_slug(self): # Attempt to copy the page but set an invalid slug string post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello world!', 'new_parent_page': str(self.root_page.id), 'copy_subpages': False, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Should not be redirected (as the save should fail) self.assertEqual(response.status_code, 200) # Check that a form error was raised self.assertFormError(response, 'form', 'new_slug', "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.") def test_page_copy_no_publish_permission(self): # Turn user into an editor who can add pages but not publish them self.user.is_superuser = False self.user.groups.add( Group.objects.get(name="Editors"), ) self.user.save() # Get copy page response = self.client.get(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, ))) # The user should have access to the copy page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html') # Make sure the "publish copies" field is hidden self.assertNotContains(response, "Publish copies") def test_page_copy_no_publish_permission_post_copy_subpages_publish_copies(self): # This tests that unprivileged users cannot publish copied pages even if they hack their browser # Turn user into an editor who can add pages but not publish them self.user.is_superuser = False self.user.groups.add( Group.objects.get(name="Editors"), ) self.user.save() # Post post_data = { 'new_title': "Hello world 2", 'new_slug': 'hello-world-2', 'new_parent_page': str(self.root_page.id), 'copy_subpages': True, 'publish_copies': True, } response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data) # Check that the user was redirected to the parents explore page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Get copy page_copy = self.root_page.get_children().filter(slug='hello-world-2').first() # Check that the copy exists self.assertNotEqual(page_copy, None) # Check that the copy is not live self.assertFalse(page_copy.live) # Check that the owner of the page is set correctly self.assertEqual(page_copy.owner, self.user) # Check that the children were copied self.assertEqual(page_copy.get_children().count(), 2) # Check the the child pages # Neither of them should be live child_copy = page_copy.get_children().filter(slug='child-page').first() self.assertNotEqual(child_copy, None) self.assertFalse(child_copy.live) unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first() self.assertNotEqual(unpublished_child_copy, None) self.assertFalse(unpublished_child_copy.live) # treebeard should report no consistency problems with the tree self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems') class TestPageUnpublish(TestCase, WagtailTestUtils): def setUp(self): self.user = self.login() # Create a page to unpublish self.root_page = Page.objects.get(id=2) self.page = SimplePage( title="Hello world!", slug='hello-world', live=True, ) self.root_page.add_child(instance=self.page) def test_unpublish_view(self): """ This tests that the unpublish view responds with an unpublish confirm page """ # Get unpublish page response = self.client.get(reverse('wagtailadmin_pages:unpublish', args=(self.page.id, ))) # Check that the user recieved an unpublish confirm page self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html') def test_unpublish_view_invalid_page_id(self): """ This tests that the unpublish view returns an error if the page id is invalid """ # Get unpublish page response = self.client.get(reverse('wagtailadmin_pages:unpublish', args=(12345, ))) # Check that the user recieved a 404 response self.assertEqual(response.status_code, 404) def test_unpublish_view_bad_permissions(self): """ This tests that the unpublish view doesn't allow users without unpublish permissions """ # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Get unpublish page response = self.client.get(reverse('wagtailadmin_pages:unpublish', args=(self.page.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_unpublish_view_post(self): """ This posts to the unpublish view and checks that the page was unpublished """ # Connect a mock signal handler to page_unpublished signal mock_handler = mock.MagicMock() page_unpublished.connect(mock_handler) # Post to the unpublish page response = self.client.post(reverse('wagtailadmin_pages:unpublish', args=(self.page.id, ))) # Should be redirected to explorer page self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.page.id).live) # Check that the page_unpublished signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call['sender'], self.page.specific_class) self.assertEqual(mock_call['instance'], self.page) self.assertIsInstance(mock_call['instance'], self.page.specific_class) class TestApproveRejectModeration(TestCase, WagtailTestUtils): def setUp(self): self.submitter = get_user_model().objects.create_superuser( username='submitter', email='submitter@email.com', password='password', ) self.user = self.login() # Create a page and submit it for moderation root_page = Page.objects.get(id=2) self.page = SimplePage( title="Hello world!", slug='hello-world', live=False, has_unpublished_changes=True, ) root_page.add_child(instance=self.page) self.page.save_revision(user=self.submitter, submitted_for_moderation=True) self.revision = self.page.get_latest_revision() def test_approve_moderation_view(self): """ This posts to the approve moderation view and checks that the page was approved """ # Connect a mock signal handler to page_published signal mock_handler = mock.MagicMock() page_published.connect(mock_handler) # Post response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, ))) # Check that the user was redirected to the dashboard self.assertRedirects(response, reverse('wagtailadmin_home')) page = Page.objects.get(id=self.page.id) # Page must be live self.assertTrue(page.live, "Approving moderation failed to set live=True") # Page should now have no unpublished changes self.assertFalse(page.has_unpublished_changes, "Approving moderation failed to set has_unpublished_changes=False") # Check that the page_published signal was fired self.assertEqual(mock_handler.call_count, 1) mock_call = mock_handler.mock_calls[0][2] self.assertEqual(mock_call['sender'], self.page.specific_class) self.assertEqual(mock_call['instance'], self.page) self.assertIsInstance(mock_call['instance'], self.page.specific_class) def test_approve_moderation_when_later_revision_exists(self): self.page.title = "Goodbye world!" self.page.save_revision(user=self.submitter, submitted_for_moderation=False) response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, ))) # Check that the user was redirected to the dashboard self.assertRedirects(response, reverse('wagtailadmin_home')) page = Page.objects.get(id=self.page.id) # Page must be live self.assertTrue(page.live, "Approving moderation failed to set live=True") # Page content should be the submitted version, not the published one self.assertEqual(page.title, "Hello world!") # Page should still have unpublished changes self.assertTrue(page.has_unpublished_changes, "has_unpublished_changes incorrectly cleared on approve_moderation when a later revision exists") def test_approve_moderation_view_bad_revision_id(self): """ This tests that the approve moderation view handles invalid revision ids correctly """ # Post response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(12345, ))) # Check that the user recieved a 404 response self.assertEqual(response.status_code, 404) def test_approve_moderation_view_bad_permissions(self): """ This tests that the approve moderation view doesn't allow users without moderation permissions """ # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Post response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_reject_moderation_view(self): """ This posts to the reject moderation view and checks that the page was rejected """ # Post response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, ))) # Check that the user was redirected to the dashboard self.assertRedirects(response, reverse('wagtailadmin_home')) # Page must not be live self.assertFalse(Page.objects.get(id=self.page.id).live) # Revision must no longer be submitted for moderation self.assertFalse(PageRevision.objects.get(id=self.revision.id).submitted_for_moderation) def test_reject_moderation_view_bad_revision_id(self): """ This tests that the reject moderation view handles invalid revision ids correctly """ # Post response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(12345, ))) # Check that the user recieved a 404 response self.assertEqual(response.status_code, 404) def test_reject_moderation_view_bad_permissions(self): """ This tests that the reject moderation view doesn't allow users without moderation permissions """ # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Post response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, ))) # Check that the user recieved a 403 response self.assertEqual(response.status_code, 403) def test_preview_for_moderation(self): response = self.client.get(reverse('wagtailadmin_pages:preview_for_moderation', args=(self.revision.id, ))) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'tests/simple_page.html') self.assertContains(response, "Hello world!") class TestContentTypeUse(TestCase, WagtailTestUtils): fixtures = ['test.json'] def setUp(self): self.user = self.login() def test_content_type_use(self): # Get use of event page response = self.client.get(reverse('wagtailadmin_pages:type_use', args=('tests', 'eventpage'))) # Check response self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/pages/content_type_use.html') self.assertContains(response, "Christmas") class TestSubpageBusinessRules(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Add standard page (allows subpages of any type) self.standard_index = StandardIndex() self.standard_index.title = "Standard Index" self.standard_index.slug = "standard-index" self.root_page.add_child(instance=self.standard_index) # Add business page (allows BusinessChild and BusinessSubIndex as subpages) self.business_index = BusinessIndex() self.business_index.title = "Business Index" self.business_index.slug = "business-index" self.root_page.add_child(instance=self.business_index) # Add business child (allows no subpages) self.business_child = BusinessChild() self.business_child.title = "Business Child" self.business_child.slug = "business-child" self.business_index.add_child(instance=self.business_child) # Add business subindex (allows only BusinessChild as subpages) self.business_subindex = BusinessSubIndex() self.business_subindex.title = "Business Subindex" self.business_subindex.slug = "business-subindex" self.business_index.add_child(instance=self.business_subindex) # Login self.login() def test_standard_subpage(self): add_subpage_url = reverse('wagtailadmin_pages:add_subpage', args=(self.standard_index.id, )) # explorer should contain a link to 'add child page' response = self.client.get(reverse('wagtailadmin_explore', args=(self.standard_index.id, ))) self.assertEqual(response.status_code, 200) self.assertContains(response, add_subpage_url) # add_subpage should give us choices of StandardChild, and BusinessIndex. # BusinessSubIndex and BusinessChild are not allowed response = self.client.get(add_subpage_url) self.assertEqual(response.status_code, 200) self.assertContains(response, StandardChild.get_verbose_name()) self.assertContains(response, BusinessIndex.get_verbose_name()) self.assertNotContains(response, BusinessSubIndex.get_verbose_name()) self.assertNotContains(response, BusinessChild.get_verbose_name()) def test_business_subpage(self): add_subpage_url = reverse('wagtailadmin_pages:add_subpage', args=(self.business_index.id, )) # explorer should contain a link to 'add child page' response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_index.id, ))) self.assertEqual(response.status_code, 200) self.assertContains(response, add_subpage_url) # add_subpage should give us a cut-down set of page types to choose response = self.client.get(add_subpage_url) self.assertEqual(response.status_code, 200) self.assertNotContains(response, StandardIndex.get_verbose_name()) self.assertNotContains(response, StandardChild.get_verbose_name()) self.assertContains(response, BusinessSubIndex.get_verbose_name()) self.assertContains(response, BusinessChild.get_verbose_name()) def test_business_child_subpage(self): add_subpage_url = reverse('wagtailadmin_pages:add_subpage', args=(self.business_child.id, )) # explorer should not contain a link to 'add child page', as this page doesn't accept subpages response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_child.id, ))) self.assertEqual(response.status_code, 200) self.assertNotContains(response, add_subpage_url) # this also means that fetching add_subpage is blocked at the permission-check level response = self.client.get(reverse('wagtailadmin_pages:add_subpage', args=(self.business_child.id, ))) self.assertEqual(response.status_code, 403) def test_cannot_add_invalid_subpage_type(self): # cannot add StandardChild as a child of BusinessIndex, as StandardChild is not present in subpage_types response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'standardchild', self.business_index.id))) self.assertEqual(response.status_code, 403) # likewise for BusinessChild which has an empty subpage_types list response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'standardchild', self.business_child.id))) self.assertEqual(response.status_code, 403) # cannot add BusinessChild to StandardIndex, as BusinessChild restricts is parent page types response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'businesschild', self.standard_index.id))) self.assertEqual(response.status_code, 403) # but we can add a BusinessChild to BusinessIndex response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'businesschild', self.business_index.id))) self.assertEqual(response.status_code, 200) def test_not_prompted_for_page_type_when_only_one_choice(self): response = self.client.get(reverse('wagtailadmin_pages:add_subpage', args=(self.business_subindex.id, ))) # BusinessChild is the only valid subpage type of BusinessSubIndex, so redirect straight there self.assertRedirects(response, reverse('wagtailadmin_pages:add', args=('tests', 'businesschild', self.business_subindex.id))) class TestNotificationPreferences(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Login self.user = self.login() # Create two moderator users for testing 'submitted' email User = get_user_model() self.moderator = User.objects.create_superuser('moderator', 'moderator@email.com', 'password') self.moderator2 = User.objects.create_superuser('moderator2', 'moderator2@email.com', 'password') # Create a submitter for testing 'rejected' and 'approved' emails self.submitter = User.objects.create_user('submitter', 'submitter@email.com', 'password') # User profiles for moderator2 and the submitter self.moderator2_profile = UserProfile.get_for_user(self.moderator2) self.submitter_profile = UserProfile.get_for_user(self.submitter) # Create a page and submit it for moderation self.child_page = SimplePage( title="Hello world!", slug='hello-world', live=False, ) self.root_page.add_child(instance=self.child_page) # POST data to edit the page self.post_data = { 'title': "I've been edited!", 'content': "Some content", 'slug': 'hello-world', 'action-submit': "Submit", } def submit(self): return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), self.post_data) def silent_submit(self): """ Sets up the child_page as needing moderation, without making a request """ self.child_page.save_revision(user=self.submitter, submitted_for_moderation=True) self.revision = self.child_page.get_latest_revision() def approve(self): return self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, ))) def reject(self): return self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, ))) def test_vanilla_profile(self): # Check that the vanilla profile has rejected notifications on self.assertEqual(self.submitter_profile.rejected_notifications, True) # Check that the vanilla profile has approved notifications on self.assertEqual(self.submitter_profile.approved_notifications, True) def test_submit_notifications_sent(self): # Submit self.submit() # Check that both the moderators got an email, and no others self.assertEqual(len(mail.outbox), 1) self.assertIn(self.moderator.email, mail.outbox[0].to) self.assertIn(self.moderator2.email, mail.outbox[0].to) self.assertEqual(len(mail.outbox[0].to), 2) def test_submit_notification_preferences_respected(self): # moderator2 doesn't want emails self.moderator2_profile.submitted_notifications = False self.moderator2_profile.save() # Submit self.submit() # Check that only one moderator got an email self.assertEqual(len(mail.outbox), 1) self.assertEqual([self.moderator.email], mail.outbox[0].to) def test_approved_notifications(self): # Set up the page version self.silent_submit() # Approve self.approve() # Submitter must recieve an approved email self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['submitter@email.com']) self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been approved') def test_approved_notifications_preferences_respected(self): # Submitter doesn't want 'approved' emails self.submitter_profile.approved_notifications = False self.submitter_profile.save() # Set up the page version self.silent_submit() # Approve self.approve() # No email to send self.assertEqual(len(mail.outbox), 0) def test_rejected_notifications(self): # Set up the page version self.silent_submit() # Reject self.reject() # Submitter must recieve a rejected email self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['submitter@email.com']) self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been rejected') def test_rejected_notification_preferences_respected(self): # Submitter doesn't want 'rejected' emails self.submitter_profile.rejected_notifications = False self.submitter_profile.save() # Set up the page version self.silent_submit() # Reject self.reject() # No email to send self.assertEqual(len(mail.outbox), 0) class TestLocking(TestCase, WagtailTestUtils): def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) # Login self.user = self.login() # Create a page and submit it for moderation self.child_page = SimplePage( title="Hello world!", slug='hello-world', live=False, ) self.root_page.add_child(instance=self.child_page) def test_lock_post(self): response = self.client.post(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, ))) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_lock_get(self): response = self.client.get(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, ))) # Check response self.assertEqual(response.status_code, 405) # Check that the page is still unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_lock_post_already_locked(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, ))) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is still locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_lock_post_with_good_redirect(self): response = self.client.post(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, )), { 'next': reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )) }) # Check response self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(self.child_page.id, ))) # Check that the page is locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_lock_post_with_bad_redirect(self): response = self.client.post(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, )), { 'next': 'http://www.google.co.uk' }) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_lock_post_bad_page(self): response = self.client.post(reverse('wagtailadmin_pages:lock', args=(9999, ))) # Check response self.assertEqual(response.status_code, 404) # Check that the page is still unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_lock_post_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() response = self.client.post(reverse('wagtailadmin_pages:lock', args=(self.child_page.id, ))) # Check response self.assertEqual(response.status_code, 403) # Check that the page is still unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, ))) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_unlock_get(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.get(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, ))) # Check response self.assertEqual(response.status_code, 405) # Check that the page is still locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post_already_unlocked(self): response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, ))) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is still unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post_with_good_redirect(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, )), { 'next': reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )) }) # Check response self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(self.child_page.id, ))) # Check that the page is unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post_with_bad_redirect(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, )), { 'next': 'http://www.google.co.uk' }) # Check response self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that the page is unlocked self.assertFalse(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post_bad_page(self): # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(9999, ))) # Check response self.assertEqual(response.status_code, 404) # Check that the page is still locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) def test_unlock_post_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin') ) self.user.save() # Lock the page self.child_page.locked = True self.child_page.save() response = self.client.post(reverse('wagtailadmin_pages:unlock', args=(self.child_page.id, ))) # Check response self.assertEqual(response.status_code, 403) # Check that the page is still locked self.assertTrue(Page.objects.get(id=self.child_page.id).locked) class TestIssue197(TestCase, WagtailTestUtils): def test_issue_197(self): # Find root page self.root_page = Page.objects.get(id=2) # Create a tagged page with no tags self.tagged_page = self.root_page.add_child(instance=TaggedPage( title="Tagged page", slug='tagged-page', live=False, )) # Login self.user = self.login() # Add some tags and publish using edit view post_data = { 'title': "Tagged page", 'slug': 'tagged-page', 'tags': "hello, world", 'action-publish': "Publish", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.tagged_page.id, )), post_data) # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Check that both tags are in the pages tag set page = TaggedPage.objects.get(id=self.tagged_page.id) self.assertIn('hello', page.tags.slugs()) self.assertIn('world', page.tags.slugs()) class TestChildRelationsOnSuperclass(TestCase, WagtailTestUtils): # In our test models we define AdvertPlacement as a child relation on the Page model. # Here we check that this behaves correctly when exposed on the edit form of a Page # subclass (StandardIndex here). fixtures = ['test.json'] def setUp(self): # Find root page self.root_page = Page.objects.get(id=2) self.test_advert = Advert.objects.get(id=1) # Add child page self.index_page = StandardIndex( title="My lovely index", slug="my-lovely-index", advert_placements=[AdvertPlacement(advert=self.test_advert)] ) self.root_page.add_child(instance=self.index_page) # Login self.login() def test_get_create_form(self): response = self.client.get(reverse('wagtailadmin_pages:add', args=('tests', 'standardindex', self.root_page.id))) self.assertEqual(response.status_code, 200) # Response should include an advert_placements formset labelled Adverts self.assertContains(response, "Adverts") self.assertContains(response, "id_advert_placements-TOTAL_FORMS") def test_post_create_form(self): post_data = { 'title': "New index!", 'slug': 'new-index', 'advert_placements-TOTAL_FORMS': '1', 'advert_placements-INITIAL_FORMS': '0', 'advert_placements-MAX_NUM_FORMS': '1000', 'advert_placements-0-advert': '1', 'advert_placements-0-colour': 'yellow', 'advert_placements-0-id': '', } response = self.client.post(reverse('wagtailadmin_pages:add', args=('tests', 'standardindex', self.root_page.id)), post_data) # Find the page and check it page = Page.objects.get(path__startswith=self.root_page.path, slug='new-index').specific # Should be redirected to edit page self.assertRedirects(response, reverse('wagtailadmin_pages:edit', args=(page.id, ))) self.assertEqual(page.advert_placements.count(), 1) self.assertEqual(page.advert_placements.first().advert.text, 'test_advert') def test_get_edit_form(self): response = self.client.get(reverse('wagtailadmin_pages:edit', args=(self.index_page.id, ))) self.assertEqual(response.status_code, 200) # Response should include an advert_placements formset labelled Adverts self.assertContains(response, "Adverts") self.assertContains(response, "id_advert_placements-TOTAL_FORMS") # the formset should be populated with an existing form self.assertContains(response, "id_advert_placements-0-advert") self.assertContains(response, '<option value="1" selected="selected">test_advert</option>') def test_post_edit_form(self): post_data = { 'title': "My lovely index", 'slug': 'my-lovely-index', 'advert_placements-TOTAL_FORMS': '2', 'advert_placements-INITIAL_FORMS': '1', 'advert_placements-MAX_NUM_FORMS': '1000', 'advert_placements-0-advert': '1', 'advert_placements-0-colour': 'yellow', 'advert_placements-0-id': self.index_page.advert_placements.first().id, 'advert_placements-1-advert': '1', 'advert_placements-1-colour': 'purple', 'advert_placements-1-id': '', 'action-publish': "Publish", } response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.index_page.id, )), post_data) # Should be redirected to explorer self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, ))) # Find the page and check it page = Page.objects.get(id=self.index_page.id).specific self.assertEqual(page.advert_placements.count(), 2) self.assertEqual(page.advert_placements.all()[0].advert.text, 'test_advert') self.assertEqual(page.advert_placements.all()[1].advert.text, 'test_advert')
bsd-3-clause
hradec/cortex
test/IECoreRI/MotionTest.py
7
2474
########################################################################## # # Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import unittest import IECore import IECoreRI import os.path import os class MotionTest( IECoreRI.TestCase ) : def test( self ) : r = IECoreRI.Renderer( "test/IECoreRI/output/motionTest.rib" ) with IECore.WorldBlock( r ) : with IECore.MotionBlock( r, [ 1.75, 2.25 ] ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0 ) ) ) r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 1 ) ) ) l = "\n".join( file( "test/IECoreRI/output/motionTest.rib" ).readlines() ) self.assert_( "MotionBegin [ 1.75 2.25 ]" in l ) self.assert_( "MotionEnd" in l ) if __name__ == "__main__": unittest.main()
bsd-3-clause
xindus40223115/w16b_test
static/Brython3.1.1-20150328-091302/Lib/sre_compile.py
630
16898
# # Secret Labs' Regular Expression Engine # # convert template to internal format # # Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" import sys import _sre import sre_parse from sre_constants import * from _sre import MAXREPEAT assert _sre.MAGIC == MAGIC, "SRE module mismatch" if _sre.CODESIZE == 2: MAXCODE = 65535 else: MAXCODE = 0xFFFFFFFF def _identityfunction(x): return x _LITERAL_CODES = set([LITERAL, NOT_LITERAL]) _REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT]) _SUCCESS_CODES = set([SUCCESS, FAILURE]) _ASSERT_CODES = set([ASSERT, ASSERT_NOT]) def _compile(code, pattern, flags): # internal: compile a (sub)pattern emit = code.append _len = len LITERAL_CODES = _LITERAL_CODES REPEATING_CODES = _REPEATING_CODES SUCCESS_CODES = _SUCCESS_CODES ASSERT_CODES = _ASSERT_CODES for op, av in pattern: #print('sre_compile.py:_compile:42', op, av) #print('sre_compile.py:_compile:42', code) if op in LITERAL_CODES: if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) emit(_sre.getlower(av, flags)) else: emit(OPCODES[op]) emit(av) elif op is IN: if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) def fixup(literal, flags=flags): return _sre.getlower(literal, flags) else: emit(OPCODES[op]) fixup = _identityfunction skip = _len(code); emit(0) _compile_charset(av, flags, code, fixup) code[skip] = _len(code) - skip elif op is ANY: if flags & SRE_FLAG_DOTALL: emit(OPCODES[ANY_ALL]) else: emit(OPCODES[ANY]) elif op in REPEATING_CODES: if flags & SRE_FLAG_TEMPLATE: raise error("internal: unsupported template operator") emit(OPCODES[REPEAT]) skip = _len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) emit(OPCODES[SUCCESS]) code[skip] = _len(code) - skip elif _simple(av) and op is not REPEAT: if op is MAX_REPEAT: emit(OPCODES[REPEAT_ONE]) else: emit(OPCODES[MIN_REPEAT_ONE]) skip = _len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) emit(OPCODES[SUCCESS]) code[skip] = _len(code) - skip else: emit(OPCODES[REPEAT]) skip = _len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) code[skip] = _len(code) - skip if op is MAX_REPEAT: emit(OPCODES[MAX_UNTIL]) else: emit(OPCODES[MIN_UNTIL]) elif op is SUBPATTERN: if av[0]: emit(OPCODES[MARK]) emit((av[0]-1)*2) # _compile_info(code, av[1], flags) _compile(code, av[1], flags) if av[0]: emit(OPCODES[MARK]) emit((av[0]-1)*2+1) elif op in SUCCESS_CODES: emit(OPCODES[op]) elif op in ASSERT_CODES: emit(OPCODES[op]) skip = _len(code); emit(0) if av[0] >= 0: emit(0) # look ahead else: lo, hi = av[1].getwidth() if lo != hi: raise error("look-behind requires fixed-width pattern") emit(lo) # look behind _compile(code, av[1], flags) emit(OPCODES[SUCCESS]) code[skip] = _len(code) - skip elif op is CALL: emit(OPCODES[op]) skip = _len(code); emit(0) _compile(code, av, flags) emit(OPCODES[SUCCESS]) code[skip] = _len(code) - skip elif op is AT: emit(OPCODES[op]) if flags & SRE_FLAG_MULTILINE: av = AT_MULTILINE.get(av, av) if flags & SRE_FLAG_LOCALE: av = AT_LOCALE.get(av, av) elif flags & SRE_FLAG_UNICODE: av = AT_UNICODE.get(av, av) emit(ATCODES[av]) elif op is BRANCH: emit(OPCODES[op]) tail = [] tailappend = tail.append for av in av[1]: skip = _len(code); emit(0) # _compile_info(code, av, flags) _compile(code, av, flags) emit(OPCODES[JUMP]) tailappend(_len(code)); emit(0) code[skip] = _len(code) - skip emit(0) # end of branch for tail in tail: code[tail] = _len(code) - tail elif op is CATEGORY: emit(OPCODES[op]) if flags & SRE_FLAG_LOCALE: av = CH_LOCALE[av] elif flags & SRE_FLAG_UNICODE: av = CH_UNICODE[av] emit(CHCODES[av]) elif op is GROUPREF: if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) else: emit(OPCODES[op]) emit(av-1) elif op is GROUPREF_EXISTS: emit(OPCODES[op]) emit(av[0]-1) skipyes = _len(code); emit(0) _compile(code, av[1], flags) if av[2]: emit(OPCODES[JUMP]) skipno = _len(code); emit(0) code[skipyes] = _len(code) - skipyes + 1 _compile(code, av[2], flags) code[skipno] = _len(code) - skipno else: code[skipyes] = _len(code) - skipyes + 1 else: raise ValueError("unsupported operand type", op) def _compile_charset(charset, flags, code, fixup=None): # compile charset subprogram emit = code.append if fixup is None: fixup = _identityfunction for op, av in _optimize_charset(charset, fixup): emit(OPCODES[op]) if op is NEGATE: pass elif op is LITERAL: emit(fixup(av)) elif op is RANGE: emit(fixup(av[0])) emit(fixup(av[1])) elif op is CHARSET: code.extend(av) elif op is BIGCHARSET: code.extend(av) elif op is CATEGORY: if flags & SRE_FLAG_LOCALE: emit(CHCODES[CH_LOCALE[av]]) elif flags & SRE_FLAG_UNICODE: emit(CHCODES[CH_UNICODE[av]]) else: emit(CHCODES[av]) else: raise error("internal: unsupported set operator") emit(OPCODES[FAILURE]) def _optimize_charset(charset, fixup): # internal: optimize character set out = [] outappend = out.append charmap = [0]*256 try: for op, av in charset: if op is NEGATE: outappend((op, av)) elif op is LITERAL: charmap[fixup(av)] = 1 elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = 1 elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except IndexError: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] runsappend = runs.append for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runsappend((p, n)) n = 0 i = i + 1 if n: runsappend((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: outappend((LITERAL, p)) else: outappend((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) outappend((CHARSET, data)) return out return charset def _mk_bitmap(bits): data = [] dataappend = data.append if _sre.CODESIZE == 2: start = (1, 0) else: start = (1, 0) m, v = start for c in bits: if c: v = v + m m = m + m if m > MAXCODE: dataappend(v) m, v = start return data # To represent a big charset, first a bitmap of all characters in the # set is constructed. Then, this bitmap is sliced into chunks of 256 # characters, duplicate chunks are eliminated, and each chunk is # given a number. In the compiled expression, the charset is # represented by a 16-bit word sequence, consisting of one word for # the number of different chunks, a sequence of 256 bytes (128 words) # of chunk numbers indexed by their original chunk position, and a # sequence of chunks (16 words each). # Compression is normally good: in a typical charset, large ranges of # Unicode will be either completely excluded (e.g. if only cyrillic # letters are to be matched), or completely included (e.g. if large # subranges of Kanji match). These ranges will be represented by # chunks of all one-bits or all zero-bits. # Matching can be also done efficiently: the more significant byte of # the Unicode character is an index into the chunk number, and the # less significant byte is a bit index in the chunk (just like the # CHARSET matching). # In UCS-4 mode, the BIGCHARSET opcode still supports only subsets # of the basic multilingual plane; an efficient representation # for all of UTF-16 has not yet been developed. This means, # in particular, that negated charsets cannot be represented as # bigcharsets. def _optimize_unicode(charset, fixup): try: import array except ImportError: return charset charmap = [0]*65536 negate = 0 try: for op, av in charset: if op is NEGATE: negate = 1 elif op is LITERAL: charmap[fixup(av)] = 1 elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = 1 elif op is CATEGORY: # XXX: could expand category return charset # cannot compress except IndexError: # non-BMP characters; XXX now they should work return charset if negate: if sys.maxunicode != 65535: # XXX: negation does not work with big charsets # XXX2: now they should work, but removing this will make the # charmap 17 times bigger return charset for i in range(65536): charmap[i] = not charmap[i] comps = {} mapping = [0]*256 block = 0 data = [] for i in range(256): chunk = tuple(charmap[i*256:(i+1)*256]) new = comps.setdefault(chunk, block) mapping[i] = new if new == block: block = block + 1 data = data + _mk_bitmap(chunk) header = [block] if _sre.CODESIZE == 2: code = 'H' else: code = 'I' # Convert block indices to byte array of 256 bytes mapping = array.array('b', mapping).tobytes() # Convert byte array to word array mapping = array.array(code, mapping) assert mapping.itemsize == _sre.CODESIZE assert len(mapping) * mapping.itemsize == 256 header = header + mapping.tolist() data[0:0] = header return [(BIGCHARSET, data)] def _simple(av): # check if av is a "simple" operator lo, hi = av[2].getwidth() if lo == 0 and hi == MAXREPEAT: raise error("nothing to repeat") return lo == hi == 1 and av[2][0][0] != SUBPATTERN def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() #print('sre_compile.py:_compile_info:370', lo, hi) if lo == 0: return # not worth it # look for a literal prefix prefix = [] prefixappend = prefix.append prefix_skip = 0 charset = [] # not used charsetappend = charset.append if not (flags & SRE_FLAG_IGNORECASE): # look for literal prefix for op, av in pattern.data: #print('sre_compile.py:_code:381',op,av) if op is LITERAL: if len(prefix) == prefix_skip: prefix_skip = prefix_skip + 1 prefixappend(av) elif op is SUBPATTERN and len(av[1]) == 1: op, av = av[1][0] if op is LITERAL: prefixappend(av) else: break else: break # if no prefix, look for charset prefix if not prefix and pattern.data: op, av = pattern.data[0] if op is SUBPATTERN and av[1]: op, av = av[1][0] if op is LITERAL: charsetappend((op, av)) elif op is BRANCH: c = [] cappend = c.append for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: cappend((op, av)) else: break else: charset = c elif op is BRANCH: c = [] cappend = c.append for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: cappend((op, av)) else: break else: charset = c elif op is IN: charset = av #print('sre_compile.py:_code:430', code) ## if prefix: ## print "*** PREFIX", prefix, prefix_skip ## if charset: ## print "*** CHARSET", charset # add an info block emit = code.append emit(OPCODES[INFO]) skip = len(code); emit(0) # literal flag mask = 0 if prefix: mask = SRE_INFO_PREFIX if len(prefix) == prefix_skip == len(pattern.data): mask = mask + SRE_INFO_LITERAL elif charset: mask = mask + SRE_INFO_CHARSET emit(mask) # pattern length if lo < MAXCODE: emit(lo) else: emit(MAXCODE) prefix = prefix[:MAXCODE] if hi < MAXCODE: emit(hi) else: emit(0) # add literal prefix #print('sre_compile.py:_code:457', code) if prefix: emit(len(prefix)) # length emit(prefix_skip) # skip code.extend(prefix) # generate overlap table table = [-1] + ([0]*len(prefix)) for i in range(len(prefix)): table[i+1] = table[i]+1 while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]: table[i+1] = table[table[i+1]-1]+1 code.extend(table[1:]) # don't store first entry elif charset: _compile_charset(charset, flags, code) code[skip] = len(code) - skip def isstring(obj): return isinstance(obj, (str, bytes)) def _code(p, flags): flags = p.pattern.flags | flags code = [] # compile info block _compile_info(code, p, flags) # compile the pattern _compile(code, p.data, flags) code.append(OPCODES[SUCCESS]) return code def compile(p, flags=0): # internal: convert pattern list to internal format #print("sre_compile.py:compile:504:p", p) if isstring(p): pattern = p p = sre_parse.parse(p, flags) else: pattern = None #print('sre_compile.py:498:p', p) code = _code(p, flags) #print('sre_compile.py:501:code', code) # print code # XXX: <fl> get rid of this limitation! if p.pattern.groups > 100: raise AssertionError( "sorry, but this version only supports 100 named groups" ) # map in either direction groupindex = p.pattern.groupdict indexgroup = [None] * p.pattern.groups for k, i in groupindex.items(): indexgroup[i] = k return _sre.compile( pattern, flags | p.pattern.flags, code, p.pattern.groups-1, groupindex, indexgroup )
gpl-3.0
SlugocM/bayesfit
bayesfit/checkParams.py
1
8573
""" ******************************************************* * * checkParams - CHECK USER PROVIDED ESTIMATES FOR PARAMETERS * * License: Apache 2.0 * Written by: Michael Slugocki * Created on: April 14, 2018 * Last updated: September 13, 2018 * ******************************************************* """ ################################################################# # IMPORT MODULES ################################################################# import numpy as np from .psyFunction import psyfunction as _psyfunction from scipy.optimize import curve_fit import warnings ################################################################# # COMPUTE ESTIMATES FOR SCALE/SLOPE PARAMETERS ################################################################# def _param_guess(data, nafc, sigmoid_type): """Generate initial estimate for parameters governing scale and slope of sigmoidal function. Keyword arguments: data -- m x 3 numpy array nafc -- N-alternative forced choice experiment (integer) sigmoid_type -- sigmoid type fit to data (string) """ # Wrapper function to avoid errors with passing sigmoid type defintion # to curve fit function def wrapper_fnc(x,scale,slope): return _psyfunction(x, scale, slope, 1/nafc, 0.001, sigmoid_type) # Fit quick function to estimate scale and slope param_est = curve_fit(wrapper_fnc, data[:,0], data[:,1]/data[:,2]) # If initial estimates fail, try brute force method if param_est[0][0] != param_est[0][0]: warnings.warn('''Initial estimation of parameters failed. Now using brute method to try and compute initial estimates. Sit tight!''') counter = 0 while param_est[0][0] != param_est[0][0]: scale_guess = (data[:,0].min() + data[:,0].max()) / 2 slope_guess = np.linspace(-200,1000,1000) p0 = [scale_guess, slope_guess[counter]] param_est = curve_fit(wrapper_fnc, data[:,0], data[:,1]/data[:,2], p0) counter += 1 if counter == 999: raise Exception('''Initial parameter estimates could not be generated! While this error can arise for a number of reasons. Please see API for more details.''') return param_est[0] ################################################################# # CHECK PARAMETERS ESTIMATES PROVIDED ################################################################# def check_params(data, param_ests, nafc, batch, sigmoid_type): """Performs that initial parameter estimates provided by user is in proper format, and assigns default values where necessary. Keyword arguments: data -- m x 3 numpy array param_ests -- list of user specified initial parameter estimates (list) nafc -- N-alternative forced choice experiment (integer) batch -- specifies whether batch fitting data (logical) sigmoid_type -- sigmoid type fit to data (string) """ if param_ests is not None: # If batch, param_ests needs to be set to None to auto generate # values if batch is True: raise Warning('''Warning: User cannot provide estimates for parameters when Batch options set to TRUE!''') # Check whether the variable for the parameter estimates provided is of type list if isinstance(param_ests, list) is False: raise ValueError('''User Error: Please provide a argument of type list for the parameter estimates of the model.''') # Check number of arguments provided if len(param_ests) < 4: raise ValueError('''User Error: Please provide a list of four parameter estimates for the model. Set estimate to NoneType if want BayesFit to generate estimate for a parameter.''') elif len(param_ests) > 4: raise ValueError('''User Error: More than 4 estimates for parameters of the model provided!''') # Generate value estimates for initialization to fill in those not # provided param_initial = [0, 0, 0, 0] param_guess = _param_guess(data, nafc, sigmoid_type) scale_guess = param_guess[0] slope_guess = param_guess[1] gamma_guess = 1/nafc lambda_guess = 0.001 param_tmp = [scale_guess, slope_guess, gamma_guess, lambda_guess] # Check whether arguments in list are of type integer or float NoneType = type(None) for i in range(0,4): if isinstance(param_ests[i], (int, float, NoneType)) is False: raise ValueError('''User Error: Please provide numerical values or NoneType for parameter estimates of the model.''') # Assign default value as needed if param_ests[i] is None: param_initial[i] = param_tmp[i] print('Setting parameter estimates for %s to default value!') elif param_ests[i] is not None: param_initial[i] = param_ests[i] elif param_ests is None: # Get initial estimate of scale and slope if batch is False: param_guess = _param_guess(data, nafc, sigmoid_type) scale_guess = param_guess[0] slope_guess = param_guess[1] gamma_guess = 1/nafc lambda_guess = 0.001 elif batch is True: # Iterate over datasets in dictionary and generate scale estimate # for each set scale_guess = dict() slope_guess = dict() gamma_guess = dict() lambda_guess = dict() for n_datasets in data: param_guess = _param_guess(data[n_datasets], nafc, sigmoid_type) scale_guess[n_datasets] = param_guess[0] slope_guess[n_datasets] = param_guess[1] gamma_guess[n_datasets] = 1/nafc lambda_guess[n_datasets] = 0.001 # Set initial parameter estimates param_initial = [scale_guess, slope_guess, gamma_guess, lambda_guess] return param_initial ################################################################# # CHECK WHICH PARAMETERS ARE FREE (1) VERSUS FIXED (0) ################################################################# def check_constraints(param_constraints): """Checks whether user provided list specifying which parameters are to be free versus fixed is in proper format, and assigns default values where necessary. Keyword arguments: param_constraints -- specifies which parameters to estimate (list) """ if param_constraints is None: # If no parameter constraints provided, set default values to # [alpha = True, beta = True, gamma = False, lambda = True] param_constraints = [True, True, False, False] elif param_constraints is not None: # Check whether the variable for the parameter constraints provided is of type list if isinstance(param_constraints, list) is False: raise ValueError('''User Error: Please provide a argument of type list indicating which parameters of the model are free (i.e., TRUE) versus fixed (i.e., FALSE).''') # Check whether arguments in list are of type boolean for i in param_constraints: if isinstance(i, bool) is False: raise ValueError('''User Error: Please provide boolean arguments for parameter ' constraints (i.e., free vs fixed) of the model.''') # Check number of arguments provided and set the remaining arguments to default values. if len(param_constraints) < 2: raise ValueError('''User Error: Please provide at least two constraints (scale; slope) for parameters of the model.''') elif len(param_constraints) == 2: print('Setting parameter constraints for gamma and lambda to default values (i.e., fixed)!') param_constraints.append(False) param_constraints.append(True) elif len(param_constraints) == 3: print('Setting parameter constraints for lambda to default value (i.e., fixed)!') param_constraints.append(True) elif len(param_constraints) > 4: raise ValueError('''User Error: More than 4 constraints for parameters of the model provided!''') return param_constraints
apache-2.0
invisiblek/python-for-android
python3-alpha/extra_modules/gdata/finance/service.py
261
8974
#!/usr/bin/python # # Copyright (C) 2009 Tan Swee Heng # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to interact with the Google Finance server.""" __author__ = 'thesweeheng@gmail.com' import gdata.service import gdata.finance import atom class PortfolioQuery(gdata.service.Query): """A query object for the list of a user's portfolios.""" def returns(self): return self.get('returns', False) def set_returns(self, value): if value is 'true' or value is True: self['returns'] = 'true' returns = property(returns, set_returns, doc="The returns query parameter") def positions(self): return self.get('positions', False) def set_positions(self, value): if value is 'true' or value is True: self['positions'] = 'true' positions = property(positions, set_positions, doc="The positions query parameter") class PositionQuery(gdata.service.Query): """A query object for the list of a user's positions in a portfolio.""" def returns(self): return self.get('returns', False) def set_returns(self, value): if value is 'true' or value is True: self['returns'] = 'true' returns = property(returns, set_returns, doc="The returns query parameter") def transactions(self): return self.get('transactions', False) def set_transactions(self, value): if value is 'true' or value is True: self['transactions'] = 'true' transactions = property(transactions, set_transactions, doc="The transactions query parameter") class FinanceService(gdata.service.GDataService): def __init__(self, email=None, password=None, source=None, server='finance.google.com', **kwargs): """Creates a client for the Finance service. Args: email: string (optional) The user's email address, used for authentication. password: string (optional) The user's password. source: string (optional) The name of the user's application. server: string (optional) The name of the server to which a connection will be opened. Default value: 'finance.google.com'. **kwargs: The other parameters to pass to gdata.service.GDataService constructor. """ gdata.service.GDataService.__init__(self, email=email, password=password, service='finance', server=server, **kwargs) def GetPortfolioFeed(self, query=None): uri = '/finance/feeds/default/portfolios' if query: uri = PortfolioQuery(feed=uri, params=query).ToUri() return self.Get(uri, converter=gdata.finance.PortfolioFeedFromString) def GetPositionFeed(self, portfolio_entry=None, portfolio_id=None, query=None): """ Args: portfolio_entry: PortfolioEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. query: PortfolioQuery (optional) Notes: Either a PortfolioEntry OR a portfolio ID must be provided. """ if portfolio_entry: uri = portfolio_entry.GetSelfLink().href + '/positions' elif portfolio_id: uri = '/finance/feeds/default/portfolios/%s/positions' % portfolio_id if query: uri = PositionQuery(feed=uri, params=query).ToUri() return self.Get(uri, converter=gdata.finance.PositionFeedFromString) def GetTransactionFeed(self, position_entry=None, portfolio_id=None, ticker_id=None): """ Args: position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG' Notes: Either a PositionEntry OR (a portfolio ID AND ticker ID) must be provided. """ if position_entry: uri = position_entry.GetSelfLink().href + '/transactions' elif portfolio_id and ticker_id: uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ % (portfolio_id, ticker_id) return self.Get(uri, converter=gdata.finance.TransactionFeedFromString) def GetPortfolio(self, portfolio_id=None, query=None): uri = '/finance/feeds/default/portfolios/%s' % portfolio_id if query: uri = PortfolioQuery(feed=uri, params=query).ToUri() return self.Get(uri, converter=gdata.finance.PortfolioEntryFromString) def AddPortfolio(self, portfolio_entry=None): uri = '/finance/feeds/default/portfolios' return self.Post(portfolio_entry, uri, converter=gdata.finance.PortfolioEntryFromString) def UpdatePortfolio(self, portfolio_entry=None): uri = portfolio_entry.GetEditLink().href return self.Put(portfolio_entry, uri, converter=gdata.finance.PortfolioEntryFromString) def DeletePortfolio(self, portfolio_entry=None): uri = portfolio_entry.GetEditLink().href return self.Delete(uri) def GetPosition(self, portfolio_id=None, ticker_id=None, query=None): uri = '/finance/feeds/default/portfolios/%s/positions/%s' \ % (portfolio_id, ticker_id) if query: uri = PositionQuery(feed=uri, params=query).ToUri() return self.Get(uri, converter=gdata.finance.PositionEntryFromString) def DeletePosition(self, position_entry=None, portfolio_id=None, ticker_id=None, transaction_feed=None): """A position is deleted by deleting all its transactions. Args: position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG' transaction_feed: TransactionFeed (optional; see Notes) Notes: Either a PositionEntry OR (a portfolio ID AND ticker ID) OR a TransactionFeed must be provided. """ if transaction_feed: feed = transaction_feed else: if position_entry: feed = self.GetTransactionFeed(position_entry=position_entry) elif portfolio_id and ticker_id: feed = self.GetTransactionFeed( portfolio_id=portfolio_id, ticker_id=ticker_id) for txn in feed.entry: self.DeleteTransaction(txn) return True def GetTransaction(self, portfolio_id=None, ticker_id=None, transaction_id=None): uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions/%s' \ % (portfolio_id, ticker_id, transaction_id) return self.Get(uri, converter=gdata.finance.TransactionEntryFromString) def AddTransaction(self, transaction_entry=None, transaction_feed = None, position_entry=None, portfolio_id=None, ticker_id=None): """ Args: transaction_entry: TransactionEntry (required) transaction_feed: TransactionFeed (optional; see Notes) position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG' Notes: Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND ticker ID) must be provided. """ if transaction_feed: uri = transaction_feed.GetPostLink().href elif position_entry: uri = position_entry.GetSelfLink().href + '/transactions' elif portfolio_id and ticker_id: uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ % (portfolio_id, ticker_id) return self.Post(transaction_entry, uri, converter=gdata.finance.TransactionEntryFromString) def UpdateTransaction(self, transaction_entry=None): uri = transaction_entry.GetEditLink().href return self.Put(transaction_entry, uri, converter=gdata.finance.TransactionEntryFromString) def DeleteTransaction(self, transaction_entry=None): uri = transaction_entry.GetEditLink().href return self.Delete(uri)
apache-2.0
deluge-clone/deluge
deluge/ui/web/gen_gettext.py
7
1609
#!/usr/bin/python """ Script to go through the javascript files and dynamically generate gettext.js """ import os import re output_file = "gettext.js" string_re = re.compile('_\\(\'(.*?)\'\\)') strings = {} gettext_tpl = """## -*- coding: utf-8 -*- /* * Script: gettext.js * A script file that is run through the template renderer in order for * translated strings to be used. * * Copyright: * (c) 2009 Damien Churchill <damoxc@gmail.com> */ GetText = { maps: {}, add: function(string, translation) { this.maps[string] = translation; }, get: function(string) { if (this.maps[string]) { return this.maps[string]; } else { return string; } } } function _(string) { return GetText.get(string); } """ for root, dnames, files in os.walk('js/deluge-all'): for filename in files: if filename.startswith('.'): continue if not filename.endswith('.js'): continue for lineno, line in enumerate(open(os.path.join(root, filename))): for match in string_re.finditer(line): string = match.group(1) locations = strings.get(string, []) locations.append((os.path.basename(filename), lineno + 1)) strings[string] = locations keys = strings.keys() keys.sort() fp = open(output_file, 'w') fp.write(gettext_tpl) for key in keys: fp.write('// %s\n' % ', '.join(map(lambda x: '%s:%s' % x, strings[key]))) fp.write("GetText.add('%(key)s', '${escape(_(\"%(key)s\"))}')\n\n" % locals()) fp.close()
gpl-3.0
shishaochen/TensorFlow-0.8-Win
third_party/grpc/src/python/grpcio/grpc/framework/foundation/callable_util.py
21
3960
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Utilities for working with callables.""" import abc import collections import enum import functools import logging import six class Outcome(six.with_metaclass(abc.ABCMeta)): """A sum type describing the outcome of some call. Attributes: kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the call returned a value or raised an exception. return_value: The value returned by the call. Must be present if kind is Kind.RETURNED. exception: The exception raised by the call. Must be present if kind is Kind.RAISED. """ @enum.unique class Kind(enum.Enum): """Identifies the general kind of the outcome of some call.""" RETURNED = object() RAISED = object() class _EasyOutcome( collections.namedtuple( '_EasyOutcome', ['kind', 'return_value', 'exception']), Outcome): """A trivial implementation of Outcome.""" def _call_logging_exceptions(behavior, message, *args, **kwargs): try: return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs), None) except Exception as e: # pylint: disable=broad-except logging.exception(message) return _EasyOutcome(Outcome.Kind.RAISED, None, e) def with_exceptions_logged(behavior, message): """Wraps a callable in a try-except that logs any exceptions it raises. Args: behavior: Any callable. message: A string to log if the behavior raises an exception. Returns: A callable that when executed invokes the given behavior. The returned callable takes the same arguments as the given behavior but returns a future.Outcome describing whether the given behavior returned a value or raised an exception. """ @functools.wraps(behavior) def wrapped_behavior(*args, **kwargs): return _call_logging_exceptions(behavior, message, *args, **kwargs) return wrapped_behavior def call_logging_exceptions(behavior, message, *args, **kwargs): """Calls a behavior in a try-except that logs any exceptions it raises. Args: behavior: Any callable. message: A string to log if the behavior raises an exception. *args: Positional arguments to pass to the given behavior. **kwargs: Keyword arguments to pass to the given behavior. Returns: An Outcome describing whether the given behavior returned a value or raised an exception. """ return _call_logging_exceptions(behavior, message, *args, **kwargs)
apache-2.0
dav94/plastex
plasTeX/Packages/amsmath.py
6
1791
#!/usr/bin/env python from plasTeX import Command, Environment from plasTeX.Base.LaTeX.Arrays import Array from plasTeX.Base.LaTeX.Math import EqnarrayStar, equation, eqnarray #### Imports Added by Tim #### from plasTeX.Base.LaTeX.Math import math class pmatrix(Array): pass class _AMSEquation(eqnarray): pass class _AMSEquationStar(EqnarrayStar): macroName = None class align(_AMSEquation): pass class AlignStar(_AMSEquationStar): macroName = 'align*' class gather(_AMSEquation): pass class GatherStar(_AMSEquationStar): macroName = 'gather*' class falign(_AMSEquation): pass class FAlignStar(_AMSEquationStar): macroName = 'falign*' class multiline(_AMSEquation): pass class MultilineStar(_AMSEquationStar): macroName = 'multiline*' class alignat(_AMSEquation): pass class AlignatStar(_AMSEquationStar): macroName = 'alignat*' class split(_AMSEquation): pass #### Added by Tim #### class EquationStar(_AMSEquationStar): macroName = 'equation*' class aligned(_AMSEquation): pass class cases(_AMSEquation): pass class alignat(_AMSEquation): args = 'column:int' class AlignatStar(_AMSEquationStar): args = 'column:int' macroName = 'alignat*' class flalign(_AMSEquation): pass class FlalignStar(_AMSEquationStar): macroName = 'flalign*' class subequations(_AMSEquation): pass class xalignat(alignat): pass class multline(multiline): pass class MultlineStar(MultilineStar): macroName = 'multline*' class matrix(Array): pass class vmatrix(Array): pass class Vmatrix(Array): pass class bmatrix(Array): pass class Bmatrix(Array): pass #### Inline Math class smallmatrix(math): pass class dddot(math): pass class ddddot(math): pass
mit
esmoyon1/GeonodeV1
geonode/base/management/commands/lib/gn20_to_24.py
11
9206
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2016 OSGeo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import re import datetime try: import json except ImportError: from django.utils import simplejson as json class DefaultMangler(json.JSONDecoder): def __init__(self, *args, **kwargs): self.basepk = kwargs.get('basepk', -1) self.owner = kwargs.get('owner', 'admin') self.datastore = kwargs.get('datastore', '') self.siteurl = kwargs.get('siteurl', '') super(DefaultMangler, self).__init__(*args) def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(DefaultMangler, self).decode(json_string) # manipulate your object any way you want # .... return default_obj class ResourceBaseMangler(DefaultMangler): def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(ResourceBaseMangler, self).decode(json_string) # manipulate your object any way you want # .... upload_sessions = [] for obj in default_obj: obj['pk'] = obj['pk'] + self.basepk obj['fields']['featured'] = False obj['fields']['rating'] = 0 obj['fields']['popular_count'] = 0 obj['fields']['share_count'] = 0 obj['fields']['is_published'] = True obj['fields']['thumbnail_url'] = '' if 'distribution_url' in obj['fields']: if not obj['fields']['distribution_url'] is None and 'layers' in obj['fields']['distribution_url']: obj['fields']['polymorphic_ctype'] = ["layers", "layer"] try: p = '(?P<protocol>http.*://)?(?P<host>[^:/ ]+).?(?P<port>[0-9]*)(?P<details_url>.*)' m = re.search(p, obj['fields']['distribution_url']) if 'http' in m.group('protocol'): obj['fields']['detail_url'] = self.siteurl + m.group('details_url') else: obj['fields']['detail_url'] = self.siteurl + obj['fields']['distribution_url'] except: obj['fields']['detail_url'] = obj['fields']['distribution_url'] else: obj['fields']['polymorphic_ctype'] = ["maps", "map"] try: obj['fields'].pop("distribution_description", None) except: pass try: obj['fields'].pop("distribution_url", None) except: pass try: obj['fields'].pop("thumbnail", None) except: pass upload_sessions.append(self.add_upload_session(obj['pk'], obj['fields']['owner'])) default_obj.extend(upload_sessions) return default_obj def add_upload_session(self, pk, owner): obj = dict() obj['pk'] = pk obj['model'] = 'layers.uploadsession' obj['fields'] = dict() obj['fields']['user'] = owner obj['fields']['traceback'] = None obj['fields']['context'] = None obj['fields']['error'] = None obj['fields']['processed'] = True obj['fields']['date'] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S") return obj class LayerMangler(DefaultMangler): def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(LayerMangler, self).decode(json_string) # manipulate your object any way you want # .... for obj in default_obj: obj['pk'] = obj['pk'] + self.basepk # Retrieve the ResourceBase associated to this Layer from geonode.base.models import ResourceBase resource = ResourceBase.objects.get(pk=obj['pk']) obj['fields']['upload_session'] = obj['pk'] obj['fields']['service'] = None obj['fields']['charset'] = "UTF-8" obj['fields']['title_en'] = resource.title obj['fields']['data_quality_statement_en'] = "" obj['fields']['regions'] = [] obj['fields']['supplemental_information_en'] = "No information provided" obj['fields']['abstract_en'] = "No abstract provided" obj['fields']['purpose_en'] = "" obj['fields']['constraints_other_en'] = "" obj['fields']['default_style'] = None if self.datastore: obj['fields']['store'] = self.datastore else: obj['fields']['store'] = obj['fields']['name'] try: obj['fields'].pop("popular_count", None) except: pass try: obj['fields'].pop("share_count", None) except: pass try: obj['fields'].pop("title", None) except: pass return default_obj class LayerAttributesMangler(DefaultMangler): def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(LayerAttributesMangler, self).decode(json_string) # manipulate your object any way you want # .... for obj in default_obj: obj['pk'] = obj['pk'] + self.basepk obj['fields']['layer'] = obj['fields']['layer'] + self.basepk return default_obj class MapMangler(DefaultMangler): def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(MapMangler, self).decode(json_string) # manipulate your object any way you want # .... for obj in default_obj: obj['pk'] = obj['pk'] + self.basepk # Retrieve the ResourceBase associated to this Layer from geonode.base.models import ResourceBase resource = ResourceBase.objects.get(pk=obj['pk']) obj['fields']['urlsuffix'] = "" obj['fields']['title_en'] = resource.title obj['fields']['featuredurl'] = "" obj['fields']['data_quality_statement_en'] = None obj['fields']['supplemental_information_en'] = "No information provided" obj['fields']['abstract_en'] = "" obj['fields']['purpose_en'] = None obj['fields']['constraints_other_en'] = None try: obj['fields'].pop("popular_count", None) except: pass try: obj['fields'].pop("share_count", None) except: pass try: obj['fields'].pop("title", None) except: pass return default_obj class MapLayersMangler(DefaultMangler): def default(self, obj): # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj) def decode(self, json_string): """ json_string is basicly string that you give to json.loads method """ default_obj = super(MapLayersMangler, self).decode(json_string) # manipulate your object any way you want # .... for obj in default_obj: obj['pk'] = obj['pk'] + self.basepk obj['fields']['map'] = obj['fields']['map'] + self.basepk return default_obj
gpl-3.0
courtarro/gnuradio
gr-wxgui/python/wxgui/plotter/gltext.py
37
16891
#!/usr/bin/env python # -*- coding: utf-8 # # Provides some text display functions for wx + ogl # Copyright (C) 2007 Christian Brugger, Stefan Hacker # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import wx from OpenGL.GL import * """ Optimize with psyco if possible, this gains us about 50% speed when creating our textures in trade for about 4MBytes of additional memory usage for psyco. If you don't like loosing the memory you have to turn the lines following "enable psyco" into a comment while uncommenting the line after "Disable psyco". """ #Try to enable psyco try: import psyco psyco_optimized = False except ImportError: psyco = None #Disable psyco #psyco = None class TextElement(object): """ A simple class for using system Fonts to display text in an OpenGL scene """ def __init__(self, text = '', font = None, foreground = wx.BLACK, centered = False): """ text (String) - Text font (wx.Font) - Font to draw with (None = System default) foreground (wx.Color) - Color of the text or (wx.Bitmap)- Bitmap to overlay the text with centered (bool) - Center the text Initializes the TextElement """ # save given variables self._text = text self._lines = text.split('\n') self._font = font self._foreground = foreground self._centered = centered # init own variables self._owner_cnt = 0 #refcounter self._texture = None #OpenGL texture ID self._text_size = None #x/y size tuple of the text self._texture_size= None #x/y Texture size tuple # create Texture self.createTexture() #---Internal helpers def _getUpper2Base(self, value): """ Returns the lowest value with the power of 2 greater than 'value' (2^n>value) """ base2 = 1 while base2 < value: base2 *= 2 return base2 #---Functions def draw_text(self, position = wx.Point(0,0), scale = 1.0, rotation = 0): """ position (wx.Point) - x/y Position to draw in scene scale (float) - Scale rotation (int) - Rotation in degree Draws the text to the scene """ #Enable necessary functions glColor(1,1,1,1) glEnable(GL_TEXTURE_2D) glEnable(GL_ALPHA_TEST) #Enable alpha test glAlphaFunc(GL_GREATER, 0) glEnable(GL_BLEND) #Enable blending glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) #Bind texture glBindTexture(GL_TEXTURE_2D, self._texture) ow, oh = self._text_size w , h = self._texture_size #Perform transformations glPushMatrix() glTranslated(position.x, position.y, 0) glRotate(-rotation, 0, 0, 1) glScaled(scale, scale, scale) if self._centered: glTranslate(-w/2, -oh/2, 0) #Draw vertices glBegin(GL_QUADS) glTexCoord2f(0,0); glVertex2f(0,0) glTexCoord2f(0,1); glVertex2f(0,h) glTexCoord2f(1,1); glVertex2f(w,h) glTexCoord2f(1,0); glVertex2f(w,0) glEnd() glPopMatrix() #Disable features glDisable(GL_BLEND) glDisable(GL_ALPHA_TEST) glDisable(GL_TEXTURE_2D) def createTexture(self): """ Creates a texture from the settings saved in TextElement, to be able to use normal system fonts conviently a wx.MemoryDC is used to draw on a wx.Bitmap. As wxwidgets device contexts don't support alpha at all it is necessary to apply a little hack to preserve antialiasing without sticking to a fixed background color: We draw the bmp in b/w mode so we can use its data as a alpha channel for a solid color bitmap which after GL_ALPHA_TEST and GL_BLEND will show a nicely antialiased text on any surface. To access the raw pixel data the bmp gets converted to a wx.Image. Now we just have to merge our foreground color with the alpha data we just created and push it all into a OpenGL texture and we are DONE *inhalesdelpy* DRAWBACK of the whole conversion thing is a really long time for creating the texture. If you see any optimizations that could save time PLEASE CREATE A PATCH!!! """ # get a memory dc dc = wx.MemoryDC() # Select an empty bitmap into the MemoryDC - otherwise the call to # GetMultiLineTextExtent() may fail below dc.SelectObject(wx.EmptyBitmap(1,1)) # set our font dc.SetFont(self._font) # Approximate extend to next power of 2 and create our bitmap # REMARK: You wouldn't believe how much fucking speed this little # sucker gains compared to sizes not of the power of 2. It's like # 500ms --> 0.5ms (on my ATI-GPU powered Notebook). On Sams nvidia # machine there don't seem to occur any losses...bad drivers? ow, oh = dc.GetMultiLineTextExtent(self._text)[:2] w, h = self._getUpper2Base(ow), self._getUpper2Base(oh) self._text_size = wx.Size(ow,oh) self._texture_size = wx.Size(w,h) bmp = wx.EmptyBitmap(w,h) #Draw in b/w mode to bmp so we can use it as alpha channel dc.SelectObject(bmp) dc.SetBackground(wx.BLACK_BRUSH) dc.Clear() dc.SetTextForeground(wx.WHITE) x,y = 0,0 centered = self.centered for line in self._lines: if not line: line = ' ' tw, th = dc.GetTextExtent(line) if centered: x = int(round((w-tw)/2)) dc.DrawText(line, x, y) x = 0 y += th #Release the dc dc.SelectObject(wx.NullBitmap) del dc #Generate a correct RGBA data string from our bmp """ NOTE: You could also use wx.AlphaPixelData to access the pixel data in 'bmp' directly, but the iterator given by it is much slower than first converting to an image and using wx.Image.GetData(). """ img = wx.ImageFromBitmap(bmp) alpha = img.GetData() if isinstance(self._foreground, wx.Colour): """ If we have a static color... """ r,g,b = self._foreground.Get() color = "%c%c%c" % (chr(r), chr(g), chr(b)) data = '' for i in xrange(0, len(alpha)-1, 3): data += color + alpha[i] elif isinstance(self._foreground, wx.Bitmap): """ If we have a bitmap... """ bg_img = wx.ImageFromBitmap(self._foreground) bg = bg_img.GetData() bg_width = self._foreground.GetWidth() bg_height = self._foreground.GetHeight() data = '' for y in xrange(0, h): for x in xrange(0, w): if (y > (bg_height-1)) or (x > (bg_width-1)): color = "%c%c%c" % (chr(0),chr(0),chr(0)) else: pos = (x+y*bg_width) * 3 color = bg[pos:pos+3] data += color + alpha[(x+y*w)*3] # now convert it to ogl texture self._texture = glGenTextures(1) glBindTexture(GL_TEXTURE_2D, self._texture) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) glPixelStorei(GL_UNPACK_ROW_LENGTH, 0) glPixelStorei(GL_UNPACK_ALIGNMENT, 2) glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data) def deleteTexture(self): """ Deletes the OpenGL texture object """ if self._texture: if glIsTexture(self._texture): glDeleteTextures(self._texture) else: self._texture = None def bind(self): """ Increase refcount """ self._owner_cnt += 1 def release(self): """ Decrease refcount """ self._owner_cnt -= 1 def isBound(self): """ Return refcount """ return self._owner_cnt def __del__(self): """ Destructor """ self.deleteTexture() #---Getters/Setters def getText(self): return self._text def getFont(self): return self._font def getForeground(self): return self._foreground def getCentered(self): return self._centered def getTexture(self): return self._texture def getTexture_size(self): return self._texture_size def getOwner_cnt(self): return self._owner_cnt def setOwner_cnt(self, value): self._owner_cnt = value #---Properties text = property(getText, None, None, "Text of the object") font = property(getFont, None, None, "Font of the object") foreground = property(getForeground, None, None, "Color of the text") centered = property(getCentered, None, None, "Is text centered") owner_cnt = property(getOwner_cnt, setOwner_cnt, None, "Owner count") texture = property(getTexture, None, None, "Used texture") texture_size = property(getTexture_size, None, None, "Size of the used texture") class Text(object): """ A simple class for using System Fonts to display text in an OpenGL scene. The Text adds a global Cache of already created text elements to TextElement's base functionality so you can save some memory and increase speed """ _texts = [] #Global cache for TextElements def __init__(self, text = 'Text', font = None, font_size = 8, foreground = wx.BLACK, centered = False, bold = False): """ text (string) - displayed text font (wx.Font) - if None, system default font will be used with font_size font_size (int) - font size in points foreground (wx.Color) - Color of the text or (wx.Bitmap) - Bitmap to overlay the text with centered (bool) - should the text drawn centered towards position? Initializes the text object """ #Init/save variables self._aloc_text = None self._text = text self._font_size = font_size self._foreground= foreground self._centered = centered #Check if we are offered a font if not font: #if not use the system default self._font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) else: #save it self._font = font if bold: self._font.SetWeight(wx.FONTWEIGHT_BOLD) #Bind us to our texture self._initText() #---Internal helpers def _initText(self): """ Initializes/Reinitializes the Text object by binding it to a TextElement suitable for its current settings """ #Check if we already bound to a texture if self._aloc_text: #if so release it self._aloc_text.release() if not self._aloc_text.isBound(): self._texts.remove(self._aloc_text) self._aloc_text = None #Adjust our font self._font.SetPointSize(self._font_size) #Search for existing element in our global buffer for element in self._texts: if element.text == self._text and\ element.font == self._font and\ element.foreground == self._foreground and\ element.centered == self._centered: # We already exist in global buffer ;-) element.bind() self._aloc_text = element break if not self._aloc_text: # We are not in the global buffer, let's create ourselves aloc_text = self._aloc_text = TextElement(self._text, self._font, self._foreground, self._centered) aloc_text.bind() self._texts.append(aloc_text) def __del__(self): """ Destructor """ aloc_text = self._aloc_text aloc_text.release() if not aloc_text.isBound(): self._texts.remove(aloc_text) #---Functions def draw_text(self, position = wx.Point(0,0), scale = 1.0, rotation = 0): """ position (wx.Point) - x/y Position to draw in scene scale (float) - Scale rotation (int) - Rotation in degree Draws the text to the scene """ self._aloc_text.draw_text(position, scale, rotation) #---Setter/Getter def getText(self): return self._text def setText(self, value, reinit = True): """ value (bool) - New Text reinit (bool) - Create a new texture Sets a new text """ self._text = value if reinit: self._initText() def getFont(self): return self._font def setFont(self, value, reinit = True): """ value (bool) - New Font reinit (bool) - Create a new texture Sets a new font """ self._font = value if reinit: self._initText() def getFont_size(self): return self._font_size def setFont_size(self, value, reinit = True): """ value (bool) - New font size reinit (bool) - Create a new texture Sets a new font size """ self._font_size = value if reinit: self._initText() def getForeground(self): return self._foreground def setForeground(self, value, reinit = True): """ value (bool) - New centered value reinit (bool) - Create a new texture Sets a new value for 'centered' """ self._foreground = value if reinit: self._initText() def getCentered(self): return self._centered def setCentered(self, value, reinit = True): """ value (bool) - New centered value reinit (bool) - Create a new texture Sets a new value for 'centered' """ self._centered = value if reinit: self._initText() def get_size(self): """ Returns a text size tuple """ return self._aloc_text._text_size def getTexture_size(self): """ Returns a texture size tuple """ return self._aloc_text.texture_size def getTextElement(self): """ Returns the text element bound to the Text class """ return self._aloc_text def getTexture(self): """ Returns the texture of the bound TextElement """ return self._aloc_text.texture #---Properties text = property(getText, setText, None, "Text of the object") font = property(getFont, setFont, None, "Font of the object") font_size = property(getFont_size, setFont_size, None, "Font size") foreground = property(getForeground, setForeground, None, "Color/Overlay bitmap of the text") centered = property(getCentered, setCentered, None, "Display the text centered") texture_size = property(getTexture_size, None, None, "Size of the used texture") texture = property(getTexture, None, None, "Texture of bound TextElement") text_element = property(getTextElement,None , None, "TextElement bound to this class") #Optimize critical functions if psyco and not psyco_optimized: psyco.bind(TextElement.createTexture) psyco_optimized = True
gpl-3.0
takahashiminoru/ryu
ryu/lib/packet/stream_parser.py
58
2384
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta, abstractmethod import six @six.add_metaclass(ABCMeta) class StreamParser(object): """Streaming parser base class. An instance of a subclass of this class is used to extract messages from a raw byte stream. It's designed to be used for data read from a transport which doesn't preserve message boundaries. A typical example of such a transport is TCP. """ class TooSmallException(Exception): pass def __init__(self): self._q = bytearray() def parse(self, data): """Tries to extract messages from a raw byte stream. The data argument would be python bytes newly read from the input stream. Returns an ordered list of extracted messages. It can be an empty list. The rest of data which doesn't produce a complete message is kept internally and will be used when more data is come. I.e. next time this method is called again. """ self._q.append(data) msgs = [] while True: try: msg, self._q = self.try_parse(self._q) except self.TooSmallException: break msgs.append(msg) return msgs @abstractmethod def try_parse(self, q): """Try to extract a message from the given bytes. This is an override point for subclasses. This method tries to extract a message from bytes given by the argument. Raises TooSmallException if the given data is not enough to extract a complete message but there's still a chance to extract a message if more data is come later. """ pass
apache-2.0
Khan/werkzeug
werkzeug/testsuite/debug.py
74
7476
# -*- coding: utf-8 -*- """ werkzeug.testsuite.debug ~~~~~~~~~~~~~~~~~~~~~~~~ Tests some debug utilities. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest import sys import re from werkzeug.testsuite import WerkzeugTestCase from werkzeug.debug.repr import debug_repr, DebugReprGenerator, \ dump, helper from werkzeug.debug.console import HTMLStringO class DebugReprTestCase(WerkzeugTestCase): def test_basic_repr(self): assert debug_repr([]) == u'[]' assert debug_repr([1, 2]) == \ u'[<span class="number">1</span>, <span class="number">2</span>]' assert debug_repr([1, 'test']) == \ u'[<span class="number">1</span>, <span class="string">\'test\'</span>]' assert debug_repr([None]) == \ u'[<span class="object">None</span>]' def test_sequence_repr(self): assert debug_repr(list(range(20))) == ( u'[<span class="number">0</span>, <span class="number">1</span>, ' u'<span class="number">2</span>, <span class="number">3</span>, ' u'<span class="number">4</span>, <span class="number">5</span>, ' u'<span class="number">6</span>, <span class="number">7</span>, ' u'<span class="extended"><span class="number">8</span>, ' u'<span class="number">9</span>, <span class="number">10</span>, ' u'<span class="number">11</span>, <span class="number">12</span>, ' u'<span class="number">13</span>, <span class="number">14</span>, ' u'<span class="number">15</span>, <span class="number">16</span>, ' u'<span class="number">17</span>, <span class="number">18</span>, ' u'<span class="number">19</span></span>]' ) def test_mapping_repr(self): assert debug_repr({}) == u'{}' assert debug_repr({'foo': 42}) == \ u'{<span class="pair"><span class="key"><span class="string">\'foo\''\ u'</span></span>: <span class="value"><span class="number">42' \ u'</span></span></span>}' assert debug_repr(dict(zip(range(10), [None] * 10))) == \ u'{<span class="pair"><span class="key"><span class="number">0</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">1</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">2</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">3</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="extended"><span class="pair"><span class="key"><span class="number">4</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">5</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">6</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">7</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">8</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">9</span></span>: <span class="value"><span class="object">None</span></span></span></span>}' assert debug_repr((1, 'zwei', u'drei')) ==\ u'(<span class="number">1</span>, <span class="string">\'' \ u'zwei\'</span>, <span class="string">u\'drei\'</span>)' def test_custom_repr(self): class Foo(object): def __repr__(self): return '<Foo 42>' assert debug_repr(Foo()) == '<span class="object">&lt;Foo 42&gt;</span>' def test_list_subclass_repr(self): class MyList(list): pass assert debug_repr(MyList([1, 2])) == \ u'<span class="module">werkzeug.testsuite.debug.</span>MyList([' \ u'<span class="number">1</span>, <span class="number">2</span>])' def test_regex_repr(self): assert debug_repr(re.compile(r'foo\d')) == \ u're.compile(<span class="string regex">r\'foo\\d\'</span>)' assert debug_repr(re.compile(ur'foo\d')) == \ u're.compile(<span class="string regex">ur\'foo\\d\'</span>)' def test_set_repr(self): assert debug_repr(frozenset('x')) == \ u'frozenset([<span class="string">\'x\'</span>])' assert debug_repr(set('x')) == \ u'set([<span class="string">\'x\'</span>])' def test_recursive_repr(self): a = [1] a.append(a) assert debug_repr(a) == u'[<span class="number">1</span>, [...]]' def test_broken_repr(self): class Foo(object): def __repr__(self): 1/0 assert debug_repr(Foo()) == \ u'<span class="brokenrepr">&lt;broken repr (ZeroDivisionError: ' \ u'integer division or modulo by zero)&gt;</span>' class DebugHelpersTestCase(WerkzeugTestCase): def test_object_dumping(self): class Foo(object): x = 42 y = 23 def __init__(self): self.z = 15 drg = DebugReprGenerator() out = drg.dump_object(Foo()) assert re.search('Details for werkzeug.testsuite.debug.Foo object at', out) assert re.search('<th>x.*<span class="number">42</span>(?s)', out) assert re.search('<th>y.*<span class="number">23</span>(?s)', out) assert re.search('<th>z.*<span class="number">15</span>(?s)', out) out = drg.dump_object({'x': 42, 'y': 23}) assert re.search('Contents of', out) assert re.search('<th>x.*<span class="number">42</span>(?s)', out) assert re.search('<th>y.*<span class="number">23</span>(?s)', out) out = drg.dump_object({'x': 42, 'y': 23, 23: 11}) assert not re.search('Contents of', out) out = drg.dump_locals({'x': 42, 'y': 23}) assert re.search('Local variables in frame', out) assert re.search('<th>x.*<span class="number">42</span>(?s)', out) assert re.search('<th>y.*<span class="number">23</span>(?s)', out) def test_debug_dump(self): old = sys.stdout sys.stdout = HTMLStringO() try: dump([1, 2, 3]) x = sys.stdout.reset() dump() y = sys.stdout.reset() finally: sys.stdout = old assert 'Details for list object at' in x assert '<span class="number">1</span>' in x assert 'Local variables in frame' in y assert '<th>x' in y assert '<th>old' in y def test_debug_help(self): old = sys.stdout sys.stdout = HTMLStringO() try: helper([1, 2, 3]) x = sys.stdout.reset() finally: sys.stdout = old assert 'Help on list object' in x assert '__delitem__' in x def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DebugReprTestCase)) suite.addTest(unittest.makeSuite(DebugHelpersTestCase)) return suite
bsd-3-clause
174high/bitcoin
test/functional/txn_clone.py
32
7658
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TxnMallTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def add_options(self, parser): parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super(TxnMallTest, self).setup_network() disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! # Assign coins to foo and bar accounts: self.nodes[0].settxfee(.001) node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1,1) clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outputs # 40 BTC serialized is 00286bee00000000 pos0 = 2*(4+1+36+1+4+1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40): output0 = clone_raw[pos0 : pos0 + output_len] output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) # Check node0's individual account balances. # "foo" should have been debited by the equivalent clone of tx1 assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) # "bar" should have been debited by (possibly unconfirmed) tx2 assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) # "" should have starting balance, less funding txes, plus subsidies assert_equal(self.nodes[0].getbalance("", 0), starting_balance - 1219 + fund_foo_tx["fee"] - 29 + fund_bar_tx["fee"] + 100) # Node1's "from0" account balance assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"])) if __name__ == '__main__': TxnMallTest().main()
mit
ifduyue/django
django/contrib/contenttypes/management/__init__.py
53
4857
from django.apps import apps as global_apps from django.db import DEFAULT_DB_ALIAS, migrations, router, transaction from django.db.utils import IntegrityError class RenameContentType(migrations.RunPython): def __init__(self, app_label, old_model, new_model): self.app_label = app_label self.old_model = old_model self.new_model = new_model super().__init__(self.rename_forward, self.rename_backward) def _rename(self, apps, schema_editor, old_model, new_model): ContentType = apps.get_model('contenttypes', 'ContentType') db = schema_editor.connection.alias if not router.allow_migrate_model(db, ContentType): return try: content_type = ContentType.objects.db_manager(db).get_by_natural_key(self.app_label, old_model) except ContentType.DoesNotExist: pass else: content_type.model = new_model try: with transaction.atomic(using=db): content_type.save(update_fields={'model'}) except IntegrityError: # Gracefully fallback if a stale content type causes a # conflict as remove_stale_contenttypes will take care of # asking the user what should be done next. content_type.model = old_model else: # Clear the cache as the `get_by_natual_key()` call will cache # the renamed ContentType instance by its old model name. ContentType.objects.clear_cache() def rename_forward(self, apps, schema_editor): self._rename(apps, schema_editor, self.old_model, self.new_model) def rename_backward(self, apps, schema_editor): self._rename(apps, schema_editor, self.new_model, self.old_model) def inject_rename_contenttypes_operations(plan=None, apps=global_apps, using=DEFAULT_DB_ALIAS, **kwargs): """ Insert a `RenameContentType` operation after every planned `RenameModel` operation. """ if plan is None: return # Determine whether or not the ContentType model is available. try: ContentType = apps.get_model('contenttypes', 'ContentType') except LookupError: available = False else: if not router.allow_migrate_model(using, ContentType): return available = True for migration, backward in plan: if (migration.app_label, migration.name) == ('contenttypes', '0001_initial'): # There's no point in going forward if the initial contenttypes # migration is unapplied as the ContentType model will be # unavailable from this point. if backward: break else: available = True continue # The ContentType model is not available yet. if not available: continue inserts = [] for index, operation in enumerate(migration.operations): if isinstance(operation, migrations.RenameModel): operation = RenameContentType( migration.app_label, operation.old_name_lower, operation.new_name_lower ) inserts.append((index + 1, operation)) for inserted, (index, operation) in enumerate(inserts): migration.operations.insert(inserted + index, operation) def get_contenttypes_and_models(app_config, using, ContentType): if not router.allow_migrate_model(using, ContentType): return None, None ContentType.objects.clear_cache() content_types = { ct.model: ct for ct in ContentType.objects.using(using).filter(app_label=app_config.label) } app_models = { model._meta.model_name: model for model in app_config.get_models() } return content_types, app_models def create_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, apps=global_apps, **kwargs): """ Create content types for models in the given app. """ if not app_config.models_module: return app_label = app_config.label try: app_config = apps.get_app_config(app_label) ContentType = apps.get_model('contenttypes', 'ContentType') except LookupError: return content_types, app_models = get_contenttypes_and_models(app_config, using, ContentType) if not app_models: return cts = [ ContentType( app_label=app_label, model=model_name, ) for (model_name, model) in app_models.items() if model_name not in content_types ] ContentType.objects.using(using).bulk_create(cts) if verbosity >= 2: for ct in cts: print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
bsd-3-clause
PARINetwork/pari
functional_tests/factory/gallery_home_page_factory.py
1
1374
import factory from core.models import GalleryHomePage from functional_tests.factory import ContentTypeFactory from functional_tests.factory.article_factory import ArticleFactory from functional_tests.factory.album_factory import AlbumFactory from functional_tests.factory.image_factory import ImageFactory class GalleryHomePageFactory(factory.django.DjangoModelFactory): class Meta: model = GalleryHomePage django_get_or_create = ('title',) path = "0001000A" depth = 2 numchild = 0 slug = "gallery" title = "Gallery Home Page" live = True has_unpublished_changes = False seo_title = " " show_in_menus = False search_description = " " go_live_at = '2011-10-24 12:43' expire_at = '2050-12-31 12:43' expired = False content_type = factory.SubFactory(ContentTypeFactory, app_label="core", model="galleryhomepage") locked = False latest_revision_created_at = '2011-10-24 12:43' first_published_at = '2011-10-24 12:43' photo_of_the_week = factory.SubFactory(ImageFactory) photo_title = "Gallery title" photo_link = "http://www.google.com" talking_album = factory.SubFactory(AlbumFactory) photo_album = factory.SubFactory(AlbumFactory) video = factory.SubFactory(ArticleFactory, title="carousel_0", content_type__app_label="article", content_type__model="article")
bsd-3-clause
xenserver/xsconsole
plugins-oem/XSFeatureUpdate.py
1
4561
# Copyright (c) 2008-2009 Citrix Systems Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 only. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. if __name__ == "__main__": raise Exception("This script is a plugin for xsconsole and cannot run independently") from XSConsoleStandard import * class UpdateDialogue(FileDialogue): def __init__(self): self.custom = { 'title' : Lang("Apply Software Update"), 'searchregexp' : r'.*\.xsoem$', # Type of system update file is .xsoem 'deviceprompt' : Lang("Select the device containing the update"), 'fileprompt' : Lang("Select the update file"), 'confirmprompt' : Lang("Press <F8> to begin the update process"), 'mode' : 'ro' } FileDialogue.__init__(self) # Must fill in self.custom before calling __init__ def DoAction(self): success = False Layout.Inst().PopDialogue() Layout.Inst().PushDialogue(BannerDialogue( Lang("Applying update... This may take several minutes. Press <Ctrl-C> to abort."))) hostEnabled = Data.Inst().host.enabled(False) try: try: Layout.Inst().Refresh() Layout.Inst().DoUpdate() if VMUtils.numLocalResidentVMs() > 0: raise Exception(Lang("One or more Virtual Machines are running on this host. Please migrate, shut down or suspend Virtual Machines before continuing.")) Data.Inst().LocalHostDisable() hostRef = Data.Inst().host.uuid(None) if hostRef is None: raise Exception("Internal error 1") filename = self.vdiMount.MountedPath(self.filename) FileUtils.AssertSafePath(filename) command = "/opt/xensource/bin/xe update-upload file-name='"+filename+"' host-uuid="+hostRef status, output = commands.getstatusoutput(command) if status != 0: raise Exception(output) Layout.Inst().PopDialogue() Layout.Inst().PushDialogue(InfoDialogue( Lang("Update Successful"), Lang("Please reboot to use the newly installed software."))) XSLog('Software updated') hostEnabled = False except Exception, e: Layout.Inst().PopDialogue() Layout.Inst().PushDialogue(InfoDialogue( Lang("Software Update Failed"), Lang(e))) finally: try: self.PreExitActions() if hostEnabled: # Dont leave the host disabled if the update has failed Data.Inst().LocalHostEnable() except Exception, e: Layout.Inst().PushDialogue(InfoDialogue( Lang("Software Update Failed"), Lang(e))) class XSFeatureUpdate: @classmethod def StatusUpdateHandler(cls, inPane): data = Data.Inst() inPane.AddTitleField(Lang("Apply Update")) inPane.AddWrappedTextField(Lang( "Press <Enter> to apply a software update.")) inPane.AddKeyHelpField( { Lang("<Enter>") : Lang("Update") } ) @classmethod def ActivateHandler(cls): DialogueUtils.AuthenticatedOnly(lambda: Layout.Inst().PushDialogue(UpdateDialogue())) def Register(self): Importer.RegisterNamedPlugIn( self, 'UPDATE', # Key of this plugin for replacement, etc. { 'menuname' : 'MENU_BUR', 'menupriority' : 100, 'menutext' : Lang('Apply Update') , 'statusupdatehandler' : self.StatusUpdateHandler, 'activatehandler' : self.ActivateHandler } ) # Register this plugin when module is imported XSFeatureUpdate().Register()
gpl-2.0
Eficent/account-invoice-reporting
__unported__/invoice_report_assemble/company.py
10
1268
# -*- coding: utf-8 -*- ############################################################################## # # Author: Yannick Vaucher # Copyright 2013 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import orm, fields class ResCompany(orm.Model): _inherit = 'res.company' _columns = { 'assemble_invoice_report_ids': fields.one2many( 'assembled.report', 'company_id', 'Account Invoice Assemblage Report', domain=[('model', '=', 'account.invoice')]), }
agpl-3.0
702nADOS/sumo
tools/assign/cadytsIterate.py
1
8471
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @file cadytsIterate.py @author Jakob Erdmann @author Yun-Pang Floetteroed @author Daniel Krajzewicz @author Michael Behrisch @date 2010-09-15 @version $Id: cadytsIterate.py 22608 2017-01-17 06:28:54Z behrisch $ Run cadyts to calibrate the simulation with given routes and traffic measurements. Respective traffic zones information has to exist in the given route files. SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/ Copyright (C) 2010-2017 DLR (http://www.dlr.de/) and contributors This file is part of SUMO. SUMO is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. """ from __future__ import absolute_import from __future__ import print_function import os import sys import subprocess import types from datetime import datetime from argparse import ArgumentParser from duaIterate import call, writeSUMOConf, addGenericOptions sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import sumolib def initOptions(): argParser = ArgumentParser() addGenericOptions(argParser) argParser.add_argument("-r", "--route-alternatives", dest="routes", help="route alternatives from sumo (comma separated list, mandatory)", metavar="FILE") argParser.add_argument("-d", "--detector-values", dest="detvals", help="adapt to the flow on the given edges", metavar="FILE") argParser.add_argument("-c", "--classpath", dest="classpath", default=os.path.join(os.path.dirname( sys.argv[0]), "..", "contributed", "calibration", "cadytsSumoController.jar"), help="classpath for the calibrator [default: %default]") argParser.add_argument("-l", "--last-calibration-step", dest="calibStep", type=int, default=100, help="last step of the calibration [default: %default]") argParser.add_argument("-S", "--demandscale", dest="demandscale", type=float, default=2., help="scaled demand [default: %default]") argParser.add_argument("-F", "--freezeit", dest="freezeit", type=int, default=85, help="define the number of iterations for stablizing the results in the DTA-calibration") argParser.add_argument("-V", "--varscale", dest="varscale", type=float, default=1., help="define variance of the measured traffic flows for the DTA-calibration") argParser.add_argument("-P", "--PREPITS", type=int, dest="PREPITS", default=5, help="number of preparatory iterations") argParser.add_argument("-W", "--evaluation-prefix", dest="evalprefix", help="prefix of flow evaluation files; only for the calibration with use of detector data") argParser.add_argument("-Y", "--bruteforce", action="store_true", dest="bruteforce", default=False, help="fit the traffic counts as accurate as possible") argParser.add_argument("-Z", "--mincountstddev", type=float, dest="mincountstddev", default=25., help="minimal traffic count standard deviation") argParser.add_argument("-O", "--overridett", action="store_true", dest="overridett", default=False, help="override depart times according to updated link travel times") argParser.add_argument("-E", "--disable-summary", "--disable-emissions", action="store_true", dest="noSummary", default=False, help="No summaries are written by the simulation") argParser.add_argument("-T", "--disable-tripinfos", action="store_true", dest="noTripinfo", default=False, help="No tripinfos are written by the simulation") argParser.add_argument("-M", "--matrix-prefix", dest="fmaprefix", help="prefix of OD matrix files in visum format") argParser.add_argument("-N", "--clone-postfix", dest="clonepostfix", default='-CLONE', help="postfix attached to clone ids") argParser.add_argument("-X", "--cntfirstlink", action="store_true", dest="cntfirstlink", default=False, help="if entering vehicles are assumed to cross the upstream sensor of their entry link") argParser.add_argument("-K", "--cntlastlink", action="store_true", dest="cntlastlink", default=False, help="if exiting vehicles are assumed to cross the upstream sensor of their exit link") argParser.add_argument("remaining_args", nargs='*') return argParser def main(): argParser = initOptions() options = argParser.parse_args() if not options.net or not options.routes or not options.detvals: argParser.error( "--net-file, --routes and --detector-values have to be given!") if options.mesosim: sumoBinary = sumolib.checkBinary("meso", options.path) else: sumoBinary = sumolib.checkBinary("sumo", options.path) calibrator = ["java", "-cp", options.classpath, "-Xmx1G", "cadyts.interfaces.sumo.SumoController"] log = open("cadySumo-log.txt", "w+") # calibration init starttime = datetime.now() evalprefix = None if options.evalprefix: evalprefix = options.evalprefix # begin the calibration if options.fmaprefix: call(calibrator + ["INIT", "-varscale", options.varscale, "-freezeit", options.freezeit, "-measfile", options.detvals, "-binsize", options.aggregation, "-PREPITS", options.PREPITS, "-bruteforce", options.bruteforce, "-demandscale", options.demandscale, "-mincountstddev", options.mincountstddev, "-overridett", options.overridett, "-clonepostfix", options.clonepostfix, "-fmaprefix", options.fmaprefix, "-cntfirstlink", options.cntfirstlink, "-cntlastlink", options.cntlastlink], log) else: call(calibrator + ["INIT", "-varscale", options.varscale, "-freezeit", options.freezeit, "-measfile", options.detvals, "-binsize", options.aggregation, "-PREPITS", options.PREPITS, "-bruteforce", options.bruteforce, "-demandscale", options.demandscale, "-mincountstddev", options.mincountstddev, "-overridett", options.overridett, "-clonepostfix", options.clonepostfix, "-cntfirstlink", options.cntfirstlink, "-cntlastlink", options.cntlastlink], log) for step in range(options.calibStep): print('calibration step:', step) files = [] # calibration choice firstRoute = options.routes.split(",")[0] routname = os.path.basename(firstRoute) if '_' in routname: output = "%s_%03i.cal.xml" % (routname[:routname.rfind('_')], step) else: output = "%s_%03i.cal.xml" % (routname[:routname.find('.')], step) call(calibrator + ["CHOICE", "-choicesetfile", options.routes, "-choicefile", "%s" % output], log) files.append(output) # simulation print(">> Running simulation") btime = datetime.now() print(">>> Begin time: %s" % btime) writeSUMOConf(sumoBinary, step, options, [], ",".join(files)) retCode = call( [sumoBinary, "-c", "iteration_%03i.sumocfg" % step], log) etime = datetime.now() print(">>> End time: %s" % etime) print(">>> Duration: %s" % (etime - btime)) print("<<") # calibration update if evalprefix: call(calibrator + ["UPDATE", "-netfile", "dump_%03i_%s.xml" % ( step, options.aggregation), "-flowfile", "%s_%03i.txt" % (evalprefix, step)], log) else: call(calibrator + ["UPDATE", "-netfile", "dump_%03i_%s.xml" % (step, options.aggregation)], log) print("< Step %s ended (duration: %s)" % (step, datetime.now() - btime)) print("------------------\n") log.flush() print("calibration ended (duration: %s)" % (datetime.now() - starttime)) log.close() if __name__ == "__main__": main()
gpl-3.0
jayceechou/capirca
third_party/ply/yacc.py
319
128492
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2009, # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- __version__ = "3.3" __tabversion__ = "3.2" # Table version #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = 1 # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = 0 # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files import re, types, sys, os.path # Compatibility function for python 2.6/3.0 if sys.version_info[0] < 3: def func_code(f): return f.func_code else: def func_code(f): return f.__code__ # Compatibility try: MAXINT = sys.maxint except AttributeError: MAXINT = sys.maxsize # Python 2.x/3.0 compatibility. def load_ply_lex(): if sys.version_info[0] < 3: import lex else: import ply.lex as lex return lex # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self,f): self.f = f def debug(self,msg,*args,**kwargs): self.f.write((msg % args) + "\n") info = debug def warning(self,msg,*args,**kwargs): self.f.write("WARNING: "+ (msg % args) + "\n") def error(self,msg,*args,**kwargs): self.f.write("ERROR: " + (msg % args) + "\n") critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self,name): return self def __call__(self,*args,**kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit]+" ..." result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return "<%s @ 0x%x>" % (type(r).__name__,id(r)) #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self,s,stack=None): self.slice = s self.stack = stack self.lexer = None self.parser= None def __getitem__(self,n): if n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self,n,v): self.slice[n].value = v def __getslice__(self,i,j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self,n): return getattr(self.slice[n],"lineno",0) def set_lineno(self,n,lineno): self.slice[n].lineno = lineno def linespan(self,n): startline = getattr(self.slice[n],"lineno",0) endline = getattr(self.slice[n],"endlineno",startline) return startline,endline def lexpos(self,n): return getattr(self.slice[n],"lexpos",0) def lexspan(self,n): startpos = getattr(self.slice[n],"lexpos",0) endpos = getattr(self.slice[n],"endlexpos",startpos) return startpos,endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self,lrtab,errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf def errok(self): self.errorok = 1 def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): if debug or yaccdevel: if isinstance(debug,int): debug = PlyLogger(sys.stderr) return self.parsedebug(input,lexer,debug,tracking,tokenfunc) elif tracking: return self.parseopt(input,lexer,debug,tracking,tokenfunc) else: return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. For the non-debugging version, # copy this code to a method parseopt() and delete all of the sections # enclosed in: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # --! DEBUG debug.info("PLY: PARSE DEBUG START") # --! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = "$end" symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer # --! DEBUG debug.debug('') debug.debug('State : %s', state) # --! DEBUG if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = "$end" # --! DEBUG debug.debug('Stack : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t # --! DEBUG debug.debug("Action : Shift and goto state %s", t) # --! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None # --! DEBUG if plen: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t) else: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t) # --! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n,"value",None) # --! DEBUG debug.info("Done : Returning %s", format_result(result)) debug.info("PLY: PARSE DEBUG END") # --! DEBUG return result if t == None: # --! DEBUG debug.error('Error : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == "$end": errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != "$end": lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == "$end": # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY. # Edit the debug version above, then copy any modifications to the method # below while removing #--! DEBUG sections. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove # code in the #--! TRACKING sections # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- import re # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [ ] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = "%s -> %s" % (self.name," ".join(self.prod)) else: self.str = "%s -> <empty>" % self.name def __str__(self): return self.str def __repr__(self): return "Production("+str(self)+")" def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self,index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self,n): if n > len(self.prod): return None p = LRItem(self,n) # Precompute the list of productions immediately following. Hack. Remove later try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError,KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self,str,name,len,func,file,line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return "MiniProduction(%s)" % self.str # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self,p,n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = { } self.prod.insert(n,".") self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = "%s -> %s" % (self.name," ".join(self.prod)) else: s = "%s -> <empty>" % self.name return s def __repr__(self): return "LRItem("+str(self)+")" # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self,terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = { } # A dictionary that is only used to detect duplicate # productions. self.Terminals = { } # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = { } # A dictionary of precomputed FIRST(x) symbols self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self,index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self,term,assoc,level): assert self.Productions == [None],"Must call set_precedence() before add_production()" if term in self.Precedence: raise GrammarError("Precedence already specified for terminal '%s'" % term) if assoc not in ['left','right','nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc,level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self,prodname,syms,func=None,file='',line=0): if prodname in self.Terminals: raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname)) if prodname == 'error': raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname)) if not _is_identifier.match(prodname): raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname)) # Look for literal tokens for n,s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname)) if not c in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line)) if syms[-2] != '%prec': raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line)) precname = syms[-1] prodprec = self.Precedence.get(precname,None) if not prodprec: raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname)) else: self.UsedPrecedence[precname] = 1 del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms,self.Terminals) prodprec = self.Precedence.get(precname,('right',0)) # See if the rule is already in the rulemap map = "%s -> %s" % (prodname,syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) + "Previous definition at %s:%d" % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if not prodname in self.Nonterminals: self.Nonterminals[prodname] = [ ] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if not t in self.Nonterminals: self.Nonterminals[t] = [ ] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber,prodname,syms,prodprec,func,file,line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [ p ] return 0 # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self,start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError("start symbol %s undefined" % start) self.Productions[0] = Production(0,"S'",[start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if reachable[s]: # We've already reached symbol s. return reachable[s] = 1 for p in self.Prodnames.get(s,[]): for r in p.prod: mark_reachable_from(r) reachable = { } for s in list(self.Terminals) + list(self.Nonterminals): reachable[s] = 0 mark_reachable_from( self.Productions[0].prod[0] ) return [s for s in list(self.Nonterminals) if not reachable[s]] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = 1 terminates['$end'] = 1 # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = 0 # Then propagate termination until no change: while 1: some_change = 0 for (n,pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = 0 break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = 1 if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = 1 some_change = 1 # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s,term) in terminates.items(): if not term: if not s in self.Prodnames and not s in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if not s in self.Prodnames and not s in self.Terminals and s != 'error': result.append((s,p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s,v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s,v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname,self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self,beta): # We are computing First(x1,x2,x3,...,xn) result = [ ] for x in beta: x_produces_empty = 0 # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = 1 else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while 1: some_change = 0 for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append( f ) some_change = 1 if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self,start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [ ] if not start: start = self.Productions[1].name self.Follow[start] = [ '$end' ] while 1: didadd = 0 for p in self.Productions[1:]: # Here is the production set for i in range(len(p.prod)): B = p.prod[i] if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = 0 for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if f == '<empty>': hasempty = 1 if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while 1: if i > len(p): lri = None else: lri = LRItem(p,i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError,KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self,module): if isinstance(module,types.ModuleType): parsetab = module else: if sys.version_info[0] < 3: exec("import %s as parsetab" % module) else: env = { } exec("import %s as parsetab" % module, env, env) parsetab = env['parsetab'] if parsetab._tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self,filename): try: import cPickle as pickle except ImportError: import pickle in_f = open(filename,"rb") tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self,pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X,R,FP): N = { } for x in X: N[x] = 0 stack = [] F = { } for x in X: if N[x] == 0: traverse(x,N,stack,F,X,R,FP) return F def traverse(x,N,stack,F,X,R,FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y,N,stack,F,X,R,FP) N[x] = min(N[x],N[y]) for a in F.get(y,[]): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self,grammar,method='LALR',log=None): if method not in ['SLR','LALR']: raise LALRError("Unsupported method %s" % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self,I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = 1 while didadd: didadd = 0 for j in J: for x in j.lr_after: if getattr(x,"lr0_added",0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = 1 return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self,I,x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I),x),None) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x,None) if not s: s = { } self.lr_goto_cache[x] = s gs = [ ] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n),None) if not s1: s1 = { } s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end',None) if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I),x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = { } for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I,x) if not g: continue if id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = {} num_nullable = 0 while 1: for p in self.grammar.Productions[1:]: if p.len == 0: nullable[p.name] = 1 continue for t in p.prod: if not t in nullable: break else: nullable[p.name] = 1 if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self,C): trans = [] for state in range(len(C)): for p in C[state]: if p.lr_index < p.len - 1: t = (state,p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) state = state + 1 return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self,C,trans,nullable): dr_set = { } state,N = trans terms = [] g = self.lr0_goto(C[state],N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self,C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state],N) j = self.lr0_cidhash.get(id(g),-1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j,a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self,C,trans,nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state,N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j,t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if not p.prod[li] in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j,t)) g = self.lr0_goto(C[j],t) # Go to next set j = self.lr0_cidhash.get(id(g),-1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j,r)) for i in includes: if not i in includedict: includedict[i] = [] includedict[i].append((state,N)) lookdict[(state,N)] = lookb return lookdict,includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self,C, ntrans, nullable): FP = lambda x: self.dr_relation(C,x,nullable) R = lambda x: self.reads_relation(C,x,nullable) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self,ntrans,readsets,inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x,[]) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self,lookbacks,followset): for trans,lb in lookbacks.items(): # Loop over productions in lookback for state,p in lb: if not state in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans,[]) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self,C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C,trans,nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C,trans,nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans,readsets,included) # Add all of the lookaheads self.add_lookaheads(lookd,followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = { } # Action production array (temporary) log.info("Parsing method: %s", self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [ ] # List of actions st_action = { } st_actionp = { } st_goto = { } log.info("") log.info("state %d", st) log.info("") for p in I: log.info(" (%d) %s", p.number, str(p)) log.info("") for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action["$end"] = 0 st_actionp["$end"] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) r = st_action.get(a,None) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. sprec,slevel = Productions[st_actionp[a].number].prec rprec,rlevel = Precedence.get(a,('right',0)) if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp,rejectp = pp,oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp,rejectp = oldp,pp self.rr_conflicts.append((st,chosenp,rejectp)) log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a]) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I,a) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: # We are in a shift state actlist.append((a,p,"shift and go to state %d" % j)) r = st_action.get(a,None) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError("Shift/shift conflict in state %d" % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift rprec,rlevel = Productions[st_actionp[a].number].prec sprec,slevel = Precedence.get(a,('right',0)) if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = { } for a,p,m in actlist: if a in st_action: if p is st_actionp[a]: log.info(" %-15s %s",a,m) _actprint[(a,m)] = 1 log.info("") # Print the actions that were not used. (debugging) not_used = 0 for a,p,m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a,m) in _actprint: log.debug(" ! %-15s [ %s ]",a,m) not_used = 1 _actprint[(a,m)] = 1 if not_used: log.debug("") # Construct the goto table for this state nkeys = { } for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I,n) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: st_goto[n] = j log.info(" %-30s shift and go to state %d",n,j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self,modulename,outputdir='',signature=""): basemodulename = modulename.split(".")[-1] filename = os.path.join(outputdir,basemodulename) + ".py" try: f = open(filename,"w") f.write(""" # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r """ % (filename, __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = { } for s,nd in self.lr_action.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_action_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_action = { } for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = { } _lr_action[_x][_k] = _y del _lr_action_items """) else: f.write("\n_lr_action = { "); for k,v in self.lr_action.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); if smaller: # Factor out names to try and make smaller items = { } for s,nd in self.lr_goto.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_goto_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_goto = { } for _k, _v in _lr_goto_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_goto: _lr_goto[_x] = { } _lr_goto[_x][_k] = _y del _lr_goto_items """) else: f.write("\n_lr_goto = { "); for k,v in self.lr_goto.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); # Write production table f.write("_lr_productions = [\n") for p in self.lr_productions: if p.func: f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line)) else: f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len)) f.write("]\n") f.close() except IOError: e = sys.exc_info()[1] sys.stderr.write("Unable to create '%s'\n" % filename) sys.stderr.write(str(e)+"\n") return # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self,filename,signature=""): try: import cPickle as pickle except ImportError: import pickle outf = open(filename,"wb") pickle.dump(__tabversion__,outf,pickle_protocol) pickle.dump(self.lr_method,outf,pickle_protocol) pickle.dump(signature,outf,pickle_protocol) pickle.dump(self.lr_action,outf,pickle_protocol) pickle.dump(self.lr_goto,outf,pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str,p.name, p.len, p.func,p.file,p.line)) else: outp.append((str(p),p.name,p.len,None,None,None)) pickle.dump(outp,outf,pickle_protocol) outf.close() # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): try: raise RuntimeError except RuntimeError: e,b,t = sys.exc_info() f = t.tb_frame while levels > 0: f = f.f_back levels -= 1 ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc,file,line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline)) grammar.append((file,dline,prodname,syms)) except SyntaxError: raise except Exception: raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self,pdict,log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.files = {} self.grammar = [] self.error = 0 if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_files() return self.error # Compute a signature over the grammar def signature(self): try: from hashlib import md5 except ImportError: from md5 import md5 try: sig = md5() if self.start: sig.update(self.start.encode('latin-1')) if self.prec: sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1')) if self.tokens: sig.update(" ".join(self.tokens).encode('latin-1')) for f in self.pfuncs: if f[3]: sig.update(f[3].encode('latin-1')) except (TypeError,ValueError): pass return sig.digest() # ----------------------------------------------------------------------------- # validate_file() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_files(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for filename in self.files.keys(): base,ext = os.path.splitext(filename) if ext != '.py': return 1 # No idea. Assume it's okay. try: f = open(filename) lines = f.readlines() f.close() except IOError: continue counthash = { } for linen,l in enumerate(lines): linen += 1 m = fre.match(l) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start,str): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func,types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = 1 return eline = func_code(self.error_func).co_firstlineno efile = func_code(self.error_func).co_filename self.files[efile] = 1 if (func_code(self.error_func).co_argcount != 1+ismethod): self.log.error("%s:%d: p_error() requires 1 argument",efile,eline) self.error = 1 # Get the tokens map def get_tokens(self): tokens = self.pdict.get("tokens",None) if not tokens: self.log.error("No token list is defined") self.error = 1 return if not isinstance(tokens,(list, tuple)): self.log.error("tokens must be a list or tuple") self.error = 1 return if not tokens: self.log.error("tokens is empty") self.error = 1 return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = 1 return terminals = {} for n in self.tokens: if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get("precedence",None) # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec,(list,tuple)): self.log.error("precedence must be a list or tuple") self.error = 1 return for level,p in enumerate(self.prec): if not isinstance(p,(list,tuple)): self.log.error("Bad precedence table") self.error = 1 return if len(p) < 2: self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p) self.error = 1 return assoc = p[0] if not isinstance(assoc,str): self.log.error("precedence associativity must be a string") self.error = 1 return for term in p[1:]: if not isinstance(term,str): self.log.error("precedence items must be strings") self.error = 1 return preclist.append((term,assoc,level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if name[:2] != 'p_': continue if name == 'p_error': continue if isinstance(item,(types.FunctionType,types.MethodType)): line = func_code(item).co_firstlineno file = func_code(item).co_filename p_functions.append((line,file,name,item.__doc__)) # Sort all of the actions by line number p_functions.sort() self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error("no rules of the form p_rulename are defined") self.error = 1 return for line, file, name, doc in self.pfuncs: func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func_code(func).co_argcount > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__) self.error = 1 elif func_code(func).co_argcount < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__) self.error = 1 elif not func.__doc__: self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__) else: try: parsed_g = parse_grammar(doc,file,line) for g in parsed_g: grammar.append((name, g)) except SyntaxError: e = sys.exc_info()[1] self.log.error(str(e)) self.error = 1 # Looks like a valid grammar rule # Mark the file in which defined. self.files[file] = 1 # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n,v in self.pdict.items(): if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue if n[0:2] == 't_': continue if n[0:2] == 'p_' and n != 'p_error': self.log.warning("'%s' not defined as a function", n) if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)): try: doc = v.__doc__.split(" ") if doc[1] == ':': self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix", func_code(v).co_filename, func_code(v).co_firstlineno,n) except Exception: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='', debuglog=None, errorlog = None, picklefile=None): global parse # Reference to the parsing method of the last built parser # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k,getattr(module,k)) for k in dir(module)] pdict = dict(_items) else: pdict = get_caller_module_dict(2) # Collect parser information from the dictionary pinfo = ParserReflect(pdict,log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError("Unable to build parser") # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser except Exception: e = sys.exc_info()[1] errorlog.warning("There was a problem loading the table file: %s", repr(e)) except VersionError: e = sys.exc_info() errorlog.warning(str(e)) except Exception: pass if debuglog is None: if debug: debuglog = PlyLogger(open(debugfile,"w")) else: debuglog = NullLogger() debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__) errors = 0 # Validate the parser information if pinfo.validate_all(): raise YaccError("Unable to build parser") if not pinfo.error_func: errorlog.warning("no p_error() function is defined") # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term,assoc,level) except GrammarError: e = sys.exc_info()[1] errorlog.warning("%s",str(e)) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname,syms,funcname,file,line) except GrammarError: e = sys.exc_info()[1] errorlog.error("%s",str(e)) errors = 1 # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError: e = sys.exc_info()[1] errorlog.error(str(e)) errors = 1 if errors: raise YaccError("Unable to build parser") # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym) errors = 1 unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info("") debuglog.info("Unused terminals:") debuglog.info("") for term in unused_terminals: errorlog.warning("Token '%s' defined, but not used", term) debuglog.info(" %s", term) # Print out all productions to the debug log if debug: debuglog.info("") debuglog.info("Grammar") debuglog.info("") for n,p in enumerate(grammar.Productions): debuglog.info("Rule %-5d %s", n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning("There is 1 unused token") if len(unused_terminals) > 1: errorlog.warning("There are %d unused tokens", len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning("There is 1 unused rule") if len(unused_rules) > 1: errorlog.warning("There are %d unused rules", len(unused_rules)) if debug: debuglog.info("") debuglog.info("Terminals, with rules where they appear") debuglog.info("") terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]])) debuglog.info("") debuglog.info("Nonterminals, with rules where they appear") debuglog.info("") nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info("") if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning("Symbol '%s' is unreachable",u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error("Infinite recursion detected for symbol '%s'", inf) errors = 1 unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term) errors = 1 if errors: raise YaccError("Unable to build parser") # Run the LRGeneratedTable on the grammar if debug: errorlog.debug("Generating %s tables", method) lr = LRGeneratedTable(grammar,method,debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning("1 shift/reduce conflict") elif num_sr > 1: errorlog.warning("%d shift/reduce conflicts", num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning("1 reduce/reduce conflict") elif num_rr > 1: errorlog.warning("%d reduce/reduce conflicts", num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning("") debuglog.warning("Conflicts:") debuglog.warning("") for state, tok, resolution in lr.sr_conflicts: debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution) already_reported = {} for state, rule, rejected in lr.rr_conflicts: if (state,id(rule),id(rejected)) in already_reported: continue debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) debuglog.warning("rejected rule (%s) in state %d", rejected,state) errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) errorlog.warning("rejected rule (%s) in state %d", rejected, state) already_reported[state,id(rule),id(rejected)] = 1 warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning("Rule (%s) is never reduced", rejected) errorlog.warning("Rule (%s) is never reduced", rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: lr.write_table(tabmodule,outputdir,signature) # Write a pickled version of the tables if picklefile: lr.pickle_table(picklefile,signature) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser
apache-2.0
EllisV/dotfiles
scripts/spotify.py
1
1952
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import dbus import argparse class SpotifyControl: def __init__(self): self.bus = dbus.SessionBus().get_object('org.mpris.MediaPlayer2.spotify', '/org/mpris/MediaPlayer2') def property(self, name): interface = dbus.Interface(self.bus, 'org.freedesktop.DBus.Properties') return interface.Get('org.mpris.MediaPlayer2.Player', name) def print_current(self): playing = self.property('PlaybackStatus') == 'Playing' if not playing: return metadata = self.property('Metadata') title = metadata['xesam:artist'][0] + ' - ' + metadata['xesam:title'] trimmed_title = (title[:35] + '...') if len(title) > 38 else title print u'#[fg=black,bg=colour35] ♫ #[fg=white,bg=colour236] {} '.format(trimmed_title).encode('utf-8') def send(self, signal): player = dbus.Interface(self.bus, 'org.mpris.MediaPlayer2.Player') getattr(player, signal)() def main(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(metavar='action') parser_current = subparsers.add_parser('current', help='Print out a current song') parser_current.set_defaults(func=SpotifyControl.print_current) parser_toggle = subparsers.add_parser('toggle', help='Toggle between Play/Pause') parser_toggle.set_defaults(func=lambda spotify: spotify.send('PlayPause')) parser_next = subparsers.add_parser('next', help='Play a next song') parser_next.set_defaults(func=lambda spotify: spotify.send('Next')) parser_previous = subparsers.add_parser('previous', help='Play a previous song') parser_previous.set_defaults(func=lambda spotify: spotify.send('Previous')) args = parser.parse_args() try: spotify = SpotifyControl() args.func(spotify) except dbus.exceptions.DBusException: sys.exit(1) if __name__ == '__main__': main()
mit
aprefontaine/TMScheduler
userprefs/views.py
1
2273
from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from django.template import RequestContext from django.template import Context, loader from google.appengine.api import users from userprefs.models import * from clubs.models import Club import logging def index(request): user = users.get_current_user() club = None phone = "" if not user: auth_url = users.create_login_url(request.path + '/prefs') else: auth_url = users.create_logout_url(request.path) userPrefs = get_userprefs(user.user_id()) logging.info('userPrefs index: get_userprefs returns %s' % userPrefs) if userPrefs: logging.info('userPrefs: %s' % (userPrefs,)) club = userPrefs.club phone = userPrefs.phone else: # should we ever get here? We should always get a userPref but version of prefs may be 0. logging.info('userPrefs: no record yet. Why here??') clubList = Club.objects.all().order_by('Number') t = loader.get_template('userprefs/index.html') c = RequestContext(request, { 'user': user, 'auth_url': auth_url, 'club': club, 'phone': phone, 'clubList' : clubList }) return HttpResponse(t.render(c)) def update(request): try: user = users.get_current_user() clubNumber = request.POST['clubNumber'] phoneNumber = request.POST['phoneNumber'] except (): # return render_to_response('prefs/index.html', { 'error_message': "Prefs update failed.", }, context_instance=RequestContext(request)) else: logging.info('Save UserPrefs: club=['+clubNumber+'], phoneNumber=['+phoneNumber+'], gid=['+user.user_id()+']') newUserPrefs = UserPrefs(version=1,club=clubNumber,phone=phoneNumber,googleOpenId=user.user_id()) newUserPrefs.save() # messages.success(request, 'Add') # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('userprefs.views.index')) # return HttpResponse('Test')
bsd-3-clause
andris210296/andris-projeto
backend/venv/test/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py
1710
76929
from __future__ import absolute_import, division, unicode_literals try: chr = unichr # flake8: noqa except NameError: pass from collections import deque from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from .inputstream import HTMLInputStream from .trie import Trie entitiesTrie = Trie(entities) class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=True, lowercaseAttrName=True, parser=None): self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet) self.parser = parser # Perform case conversions? self.lowercaseElementName = lowercaseElementName self.lowercaseAttrName = lowercaseAttrName # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of &noti for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): if self.lowercaseElementName: token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.state = self.dataState else: # XXX data can be _'_... self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-char", "datavars": {"data": data}}) self.stream.unget(data) self.state = self.bogusCommentState return True def tagNameState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rcdataState return True def rawtextLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rawtextEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rawtextEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rawtextState return True def scriptDataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEndTagOpenState elif data == "!": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) self.state = self.scriptDataEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.scriptDataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapeStartDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.state = self.dataState else: chars = self.stream.charsUntil(("<", "-", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapeStartState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. if self.lowercaseAttrName: self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, value in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for i in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True
mit
davidharrigan/django
tests/auth_tests/test_management.py
37
27025
from __future__ import unicode_literals import locale import sys from datetime import date from django.apps import apps from django.contrib.auth import management, models from django.contrib.auth.checks import check_user_model from django.contrib.auth.management import create_permissions from django.contrib.auth.management.commands import ( changepassword, createsuperuser, ) from django.contrib.auth.models import Group, User from django.contrib.auth.tests.custom_user import CustomUser from django.contrib.contenttypes.models import ContentType from django.core import checks, exceptions from django.core.management import call_command from django.core.management.base import CommandError from django.test import ( SimpleTestCase, TestCase, override_settings, override_system_checks, ) from django.utils import six from django.utils.encoding import force_str from django.utils.translation import ugettext_lazy as _ from .models import ( CustomUserBadRequiredFields, CustomUserNonListRequiredFields, CustomUserNonUniqueUsername, CustomUserWithFK, Email, ) def mock_inputs(inputs): """ Decorator to temporarily replace input/getpass to allow interactive createsuperuser. """ def inner(test_func): def wrapped(*args): class mock_getpass: @staticmethod def getpass(prompt=b'Password: ', stream=None): if six.PY2: # getpass on Windows only supports prompt as bytestring (#19807) assert isinstance(prompt, six.binary_type) if callable(inputs['password']): return inputs['password']() return inputs['password'] def mock_input(prompt): # prompt should be encoded in Python 2. This line will raise an # Exception if prompt contains unencoded non-ASCII on Python 2. prompt = str(prompt) assert str('__proxy__') not in prompt response = '' for key, val in inputs.items(): if force_str(key) in prompt.lower(): response = val break return response old_getpass = createsuperuser.getpass old_input = createsuperuser.input createsuperuser.getpass = mock_getpass createsuperuser.input = mock_input try: test_func(*args) finally: createsuperuser.getpass = old_getpass createsuperuser.input = old_input return wrapped return inner class MockTTY(object): """ A fake stdin object that pretends to be a TTY to be used in conjunction with mock_inputs. """ def isatty(self): return True class GetDefaultUsernameTestCase(TestCase): def setUp(self): self.old_get_system_username = management.get_system_username def tearDown(self): management.get_system_username = self.old_get_system_username def test_actual_implementation(self): self.assertIsInstance(management.get_system_username(), six.text_type) def test_simple(self): management.get_system_username = lambda: 'joe' self.assertEqual(management.get_default_username(), 'joe') def test_existing(self): models.User.objects.create(username='joe') management.get_system_username = lambda: 'joe' self.assertEqual(management.get_default_username(), '') self.assertEqual( management.get_default_username(check_db=False), 'joe') def test_i18n(self): # 'Julia' with accented 'u': management.get_system_username = lambda: 'J\xfalia' self.assertEqual(management.get_default_username(), 'julia') @override_settings(AUTH_PASSWORD_VALIDATORS=[ {'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}, ]) class ChangepasswordManagementCommandTestCase(TestCase): def setUp(self): self.user = models.User.objects.create_user(username='joe', password='qwerty') self.stdout = six.StringIO() self.stderr = six.StringIO() def tearDown(self): self.stdout.close() self.stderr.close() def test_that_changepassword_command_changes_joes_password(self): "Executing the changepassword management command should change joe's password" self.assertTrue(self.user.check_password('qwerty')) command = changepassword.Command() command._get_pass = lambda *args: 'not qwerty' command.execute(username="joe", stdout=self.stdout) command_output = self.stdout.getvalue().strip() self.assertEqual( command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'" ) self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty")) def test_that_max_tries_exits_1(self): """ A CommandError should be thrown by handle() if the user enters in mismatched passwords three times. """ command = changepassword.Command() command._get_pass = lambda *args: str(args) or 'foo' with self.assertRaises(CommandError): command.execute(username="joe", stdout=self.stdout, stderr=self.stderr) def test_password_validation(self): """ A CommandError should be raised if the user enters in passwords which fail validation three times. """ command = changepassword.Command() command._get_pass = lambda *args: '1234567890' abort_msg = "Aborting password change for user 'joe' after 3 attempts" with self.assertRaisesMessage(CommandError, abort_msg): command.execute(username="joe", stdout=self.stdout, stderr=self.stderr) self.assertIn('This password is entirely numeric.', self.stderr.getvalue()) def test_that_changepassword_command_works_with_nonascii_output(self): """ #21627 -- Executing the changepassword management command should allow non-ASCII characters from the User object representation. """ # 'Julia' with accented 'u': models.User.objects.create_user(username='J\xfalia', password='qwerty') command = changepassword.Command() command._get_pass = lambda *args: 'not qwerty' command.execute(username="J\xfalia", stdout=self.stdout) @override_settings( SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True) AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}], ) class CreatesuperuserManagementCommandTestCase(TestCase): def test_basic_usage(self): "Check the operation of the createsuperuser management command" # We can use the management command to create a superuser new_io = six.StringIO() call_command( "createsuperuser", interactive=False, username="joe", email="joe@somewhere.org", stdout=new_io ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = User.objects.get(username="joe") self.assertEqual(u.email, 'joe@somewhere.org') # created password should be unusable self.assertFalse(u.has_usable_password()) @mock_inputs({'password': "nopasswd"}) def test_nolocale(self): """ Check that createsuperuser does not break when no locale is set. See ticket #16017. """ old_getdefaultlocale = locale.getdefaultlocale try: # Temporarily remove locale information locale.getdefaultlocale = lambda: (None, None) # Call the command in this new environment call_command( "createsuperuser", interactive=True, username="nolocale@somewhere.org", email="nolocale@somewhere.org", verbosity=0, stdin=MockTTY(), ) except TypeError: self.fail("createsuperuser fails if the OS provides no information about the current locale") finally: # Re-apply locale information locale.getdefaultlocale = old_getdefaultlocale # If we were successful, a user should have been created u = User.objects.get(username="nolocale@somewhere.org") self.assertEqual(u.email, 'nolocale@somewhere.org') @mock_inputs({ 'password': "nopasswd", 'u\u017eivatel': 'foo', # username (cz) 'email': 'nolocale@somewhere.org'}) def test_non_ascii_verbose_name(self): username_field = User._meta.get_field('username') old_verbose_name = username_field.verbose_name username_field.verbose_name = _('u\u017eivatel') new_io = six.StringIO() try: call_command( "createsuperuser", interactive=True, stdout=new_io, stdin=MockTTY(), ) finally: username_field.verbose_name = old_verbose_name command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') def test_verbosity_zero(self): # We can suppress output on the management command new_io = six.StringIO() call_command( "createsuperuser", interactive=False, username="joe2", email="joe2@somewhere.org", verbosity=0, stdout=new_io ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, '') u = User.objects.get(username="joe2") self.assertEqual(u.email, 'joe2@somewhere.org') self.assertFalse(u.has_usable_password()) def test_email_in_username(self): new_io = six.StringIO() call_command( "createsuperuser", interactive=False, username="joe+admin@somewhere.org", email="joe@somewhere.org", stdout=new_io ) u = User._default_manager.get(username="joe+admin@somewhere.org") self.assertEqual(u.email, 'joe@somewhere.org') self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL='auth.CustomUser') def test_swappable_user(self): "A superuser can be created when a custom User model is in use" # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = six.StringIO() call_command( "createsuperuser", interactive=False, email="joe@somewhere.org", date_of_birth="1976-04-01", stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUser._default_manager.get(email="joe@somewhere.org") self.assertEqual(u.date_of_birth, date(1976, 4, 1)) # created password should be unusable self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL='auth.CustomUser') def test_swappable_user_missing_required_field(self): "A Custom superuser won't be created when a required field isn't provided" # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = six.StringIO() with self.assertRaises(CommandError): call_command( "createsuperuser", interactive=False, username="joe@somewhere.org", stdout=new_io, stderr=new_io, ) self.assertEqual(CustomUser._default_manager.count(), 0) @override_settings( AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername', AUTHENTICATION_BACKENDS=['my.custom.backend'], ) def test_swappable_user_username_non_unique(self): @mock_inputs({ 'username': 'joe', 'password': 'nopasswd', }) def createsuperuser(): new_io = six.StringIO() call_command( "createsuperuser", interactive=True, email="joe@somewhere.org", stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') for i in range(2): createsuperuser() users = CustomUserNonUniqueUsername.objects.filter(username="joe") self.assertEqual(users.count(), 2) def test_skip_if_not_in_TTY(self): """ If the command is not called from a TTY, it should be skipped and a message should be displayed (#7423). """ class FakeStdin(object): """A fake stdin object that has isatty() return False.""" def isatty(self): return False out = six.StringIO() call_command( "createsuperuser", stdin=FakeStdin(), stdout=out, interactive=True, ) self.assertEqual(User._default_manager.count(), 0) self.assertIn("Superuser creation skipped", out.getvalue()) def test_passing_stdin(self): """ You can pass a stdin object as an option and it should be available on self.stdin. If no such option is passed, it defaults to sys.stdin. """ sentinel = object() command = createsuperuser.Command() command.check = lambda: [] command.execute( stdin=sentinel, stdout=six.StringIO(), stderr=six.StringIO(), interactive=False, verbosity=0, username='janet', email='janet@example.com', ) self.assertIs(command.stdin, sentinel) command = createsuperuser.Command() command.check = lambda: [] command.execute( stdout=six.StringIO(), stderr=six.StringIO(), interactive=False, verbosity=0, username='joe', email='joe@example.com', ) self.assertIs(command.stdin, sys.stdin) @override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK') def test_fields_with_fk(self): new_io = six.StringIO() group = Group.objects.create(name='mygroup') email = Email.objects.create(email='mymail@gmail.com') call_command( 'createsuperuser', interactive=False, username=email.pk, email=email.email, group=group.pk, stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) non_existent_email = 'mymail2@gmail.com' with self.assertRaisesMessage(CommandError, 'email instance with email %r does not exist.' % non_existent_email): call_command( 'createsuperuser', interactive=False, username=email.pk, email=non_existent_email, stdout=new_io, ) @override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK') def test_fields_with_fk_interactive(self): new_io = six.StringIO() group = Group.objects.create(name='mygroup') email = Email.objects.create(email='mymail@gmail.com') @mock_inputs({ 'password': 'nopasswd', 'username (email.id)': email.pk, 'email (email.email)': email.email, 'group (group.id)': group.pk, }) def test(self): call_command( 'createsuperuser', interactive=True, stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) test(self) def test_password_validation(self): """ Creation should fail if the password fails validation. """ new_io = six.StringIO() # Returns '1234567890' the first two times it is called, then # 'password' subsequently. def bad_then_good_password(index=[0]): index[0] += 1 if index[0] <= 2: return '1234567890' return 'password' @mock_inputs({ 'password': bad_then_good_password, 'username': 'joe1234567890', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "This password is entirely numeric.\n" "Superuser created successfully." ) test(self) def test_validation_mismatched_passwords(self): """ Creation should fail if the user enters mismatched passwords. """ new_io = six.StringIO() # The first two passwords do not match, but the second two do match and # are valid. entered_passwords = ["password", "not password", "password2", "password2"] def mismatched_passwords_then_matched(): return entered_passwords.pop(0) @mock_inputs({ 'password': mismatched_passwords_then_matched, 'username': 'joe1234567890', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Your passwords didn't match.\n" "Superuser created successfully." ) test(self) def test_validation_blank_password_entered(self): """ Creation should fail if the user enters blank passwords. """ new_io = six.StringIO() # The first two passwords are empty strings, but the second two are # valid. entered_passwords = ["", "", "password2", "password2"] def blank_passwords_then_valid(): return entered_passwords.pop(0) @mock_inputs({ 'password': blank_passwords_then_valid, 'username': 'joe1234567890', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Blank passwords aren't allowed.\n" "Superuser created successfully." ) test(self) class CustomUserModelValidationTestCase(SimpleTestCase): @override_settings(AUTH_USER_MODEL='auth.CustomUserNonListRequiredFields') @override_system_checks([check_user_model]) def test_required_fields_is_list(self): "REQUIRED_FIELDS should be a list." errors = checks.run_checks() expected = [ checks.Error( "'REQUIRED_FIELDS' must be a list or tuple.", hint=None, obj=CustomUserNonListRequiredFields, id='auth.E001', ), ] self.assertEqual(errors, expected) @override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields') @override_system_checks([check_user_model]) def test_username_not_in_required_fields(self): "USERNAME_FIELD should not appear in REQUIRED_FIELDS." errors = checks.run_checks() expected = [ checks.Error( ("The field named as the 'USERNAME_FIELD' for a custom user model " "must not be included in 'REQUIRED_FIELDS'."), hint=None, obj=CustomUserBadRequiredFields, id='auth.E002', ), ] self.assertEqual(errors, expected) @override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername') @override_system_checks([check_user_model]) def test_username_non_unique(self): "A non-unique USERNAME_FIELD should raise a model validation error." errors = checks.run_checks() expected = [ checks.Error( ("'CustomUserNonUniqueUsername.username' must be " "unique because it is named as the 'USERNAME_FIELD'."), hint=None, obj=CustomUserNonUniqueUsername, id='auth.E003', ), ] self.assertEqual(errors, expected) @override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername', AUTHENTICATION_BACKENDS=[ 'my.custom.backend', ]) @override_system_checks([check_user_model]) def test_username_non_unique_with_custom_backend(self): """ A non-unique USERNAME_FIELD should raise an error only if we use the default authentication backend. Otherwise, an warning should be raised. """ errors = checks.run_checks() expected = [ checks.Warning( ("'CustomUserNonUniqueUsername.username' is named as " "the 'USERNAME_FIELD', but it is not unique."), hint=('Ensure that your authentication backend(s) can handle ' 'non-unique usernames.'), obj=CustomUserNonUniqueUsername, id='auth.W004', ) ] self.assertEqual(errors, expected) class PermissionTestCase(TestCase): def setUp(self): self._original_permissions = models.Permission._meta.permissions[:] self._original_default_permissions = models.Permission._meta.default_permissions self._original_verbose_name = models.Permission._meta.verbose_name def tearDown(self): models.Permission._meta.permissions = self._original_permissions models.Permission._meta.default_permissions = self._original_default_permissions models.Permission._meta.verbose_name = self._original_verbose_name ContentType.objects.clear_cache() def test_duplicated_permissions(self): """ Test that we show proper error message if we are trying to create duplicate permissions. """ auth_app_config = apps.get_app_config('auth') # check duplicated default permission models.Permission._meta.permissions = [ ('change_permission', 'Can edit permission (duplicate)')] six.assertRaisesRegex(self, CommandError, "The permission codename 'change_permission' clashes with a " "builtin permission for model 'auth.Permission'.", create_permissions, auth_app_config, verbosity=0) # check duplicated custom permissions models.Permission._meta.permissions = [ ('my_custom_permission', 'Some permission'), ('other_one', 'Some other permission'), ('my_custom_permission', 'Some permission with duplicate permission code'), ] six.assertRaisesRegex(self, CommandError, "The permission codename 'my_custom_permission' is duplicated for model " "'auth.Permission'.", create_permissions, auth_app_config, verbosity=0) # should not raise anything models.Permission._meta.permissions = [ ('my_custom_permission', 'Some permission'), ('other_one', 'Some other permission'), ] create_permissions(auth_app_config, verbosity=0) def test_default_permissions(self): auth_app_config = apps.get_app_config('auth') permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission') models.Permission._meta.permissions = [ ('my_custom_permission', 'Some permission'), ] create_permissions(auth_app_config, verbosity=0) # add/change/delete permission by default + custom permission self.assertEqual(models.Permission.objects.filter( content_type=permission_content_type, ).count(), 4) models.Permission.objects.filter(content_type=permission_content_type).delete() models.Permission._meta.default_permissions = [] create_permissions(auth_app_config, verbosity=0) # custom permission only since default permissions is empty self.assertEqual(models.Permission.objects.filter( content_type=permission_content_type, ).count(), 1) def test_verbose_name_length(self): auth_app_config = apps.get_app_config('auth') permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission') models.Permission.objects.filter(content_type=permission_content_type).delete() models.Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5 six.assertRaisesRegex(self, exceptions.ValidationError, "The verbose_name of auth.permission is longer than 244 characters", create_permissions, auth_app_config, verbosity=0) def test_custom_permission_name_length(self): auth_app_config = apps.get_app_config('auth') ContentType.objects.get_by_natural_key('auth', 'permission') custom_perm_name = 'a' * 256 models.Permission._meta.permissions = [ ('my_custom_permission', custom_perm_name), ] try: msg = ( "The permission name %s of auth.permission is longer than " "255 characters" % custom_perm_name ) with self.assertRaisesMessage(exceptions.ValidationError, msg): create_permissions(auth_app_config, verbosity=0) finally: models.Permission._meta.permissions = []
bsd-3-clause
bdang2012/taiga-back
tests/integration/test_hooks_gitlab.py
1
15307
import pytest from unittest import mock from django.core.urlresolvers import reverse from django.core import mail from taiga.base.utils import json from taiga.hooks.gitlab import event_hooks from taiga.hooks.gitlab.api import GitLabViewSet from taiga.hooks.exceptions import ActionSyntaxException from taiga.projects.issues.models import Issue from taiga.projects.tasks.models import Task from taiga.projects.userstories.models import UserStory from taiga.projects.models import Membership from taiga.projects.notifications.choices import NotifyLevel from taiga.projects.notifications.models import NotifyPolicy from taiga.projects import services from .. import factories as f pytestmark = pytest.mark.django_db def test_bad_signature(client): project = f.ProjectFactory() f.ProjectModulesConfigFactory(project=project, config={ "gitlab": { "secret": "tpnIwJDz4e" } }) url = reverse("gitlab-hook-list") url = "{}?project={}&key={}".format(url, project.id, "badbadbad") data = {} response = client.post(url, json.dumps(data), content_type="application/json") response_content = json.loads(response.content.decode("utf-8")) assert response.status_code == 400 assert "Bad signature" in response_content["_error_message"] def test_ok_signature(client): project = f.ProjectFactory() f.ProjectModulesConfigFactory(project=project, config={ "gitlab": { "secret": "tpnIwJDz4e", "valid_origin_ips": ["111.111.111.111"], } }) url = reverse("gitlab-hook-list") url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e") data = {"test:": "data"} response = client.post(url, json.dumps(data), content_type="application/json", REMOTE_ADDR="111.111.111.111") assert response.status_code == 204 def test_invalid_ip(client): project = f.ProjectFactory() f.ProjectModulesConfigFactory(project=project, config={ "gitlab": { "secret": "tpnIwJDz4e", "valid_origin_ips": ["111.111.111.111"], } }) url = reverse("gitlab-hook-list") url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e") data = {"test:": "data"} response = client.post(url, json.dumps(data), content_type="application/json", REMOTE_ADDR="111.111.111.112") assert response.status_code == 400 def test_valid_local_network_ip(client): project = f.ProjectFactory() f.ProjectModulesConfigFactory(project=project, config={ "gitlab": { "secret": "tpnIwJDz4e", "valid_origin_ips": ["192.168.1.1"], } }) url = reverse("gitlab-hook-list") url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e") data = {"test:": "data"} response = client.post(url, json.dumps(data), content_type="application/json", REMOTE_ADDR="192.168.1.1") assert response.status_code == 204 def test_not_ip_filter(client): project = f.ProjectFactory() f.ProjectModulesConfigFactory(project=project, config={ "gitlab": { "secret": "tpnIwJDz4e", "valid_origin_ips": [], } }) url = reverse("gitlab-hook-list") url = "{}?project={}&key={}".format(url, project.id, "tpnIwJDz4e") data = {"test:": "data"} response = client.post(url, json.dumps(data), content_type="application/json", REMOTE_ADDR="111.111.111.111") assert response.status_code == 204 def test_push_event_detected(client): project = f.ProjectFactory() url = reverse("gitlab-hook-list") url = "%s?project=%s" % (url, project.id) data = {"commits": [ {"message": "test message"}, ]} GitLabViewSet._validate_signature = mock.Mock(return_value=True) with mock.patch.object(event_hooks.PushEventHook, "process_event") as process_event_mock: response = client.post(url, json.dumps(data), HTTP_X_GITHUB_EVENT="push", content_type="application/json") assert process_event_mock.call_count == 1 assert response.status_code == 204 def test_push_event_issue_processing(client): creation_status = f.IssueStatusFactory() role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"]) f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner) new_status = f.IssueStatusFactory(project=creation_status.project) issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) payload = {"commits": [ {"message": """test message test TG-%s #%s ok bye! """ % (issue.ref, new_status.slug)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(issue.project, payload) ev_hook.process_event() issue = Issue.objects.get(id=issue.id) assert issue.status.id == new_status.id assert len(mail.outbox) == 1 def test_push_event_task_processing(client): creation_status = f.TaskStatusFactory() role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"]) f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner) new_status = f.TaskStatusFactory(project=creation_status.project) task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) payload = {"commits": [ {"message": """test message test TG-%s #%s ok bye! """ % (task.ref, new_status.slug)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(task.project, payload) ev_hook.process_event() task = Task.objects.get(id=task.id) assert task.status.id == new_status.id assert len(mail.outbox) == 1 def test_push_event_user_story_processing(client): creation_status = f.UserStoryStatusFactory() role = f.RoleFactory(project=creation_status.project, permissions=["view_us"]) f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner) new_status = f.UserStoryStatusFactory(project=creation_status.project) user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) payload = {"commits": [ {"message": """test message test TG-%s #%s ok bye! """ % (user_story.ref, new_status.slug)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(user_story.project, payload) ev_hook.process_event() user_story = UserStory.objects.get(id=user_story.id) assert user_story.status.id == new_status.id assert len(mail.outbox) == 1 def test_push_event_multiple_actions(client): creation_status = f.IssueStatusFactory() role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"]) f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner) new_status = f.IssueStatusFactory(project=creation_status.project) issue1 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) issue2 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) payload = {"commits": [ {"message": """test message test TG-%s #%s ok test TG-%s #%s ok bye! """ % (issue1.ref, new_status.slug, issue2.ref, new_status.slug)}, ]} mail.outbox = [] ev_hook1 = event_hooks.PushEventHook(issue1.project, payload) ev_hook1.process_event() issue1 = Issue.objects.get(id=issue1.id) issue2 = Issue.objects.get(id=issue2.id) assert issue1.status.id == new_status.id assert issue2.status.id == new_status.id assert len(mail.outbox) == 2 def test_push_event_processing_case_insensitive(client): creation_status = f.TaskStatusFactory() role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"]) f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner) new_status = f.TaskStatusFactory(project=creation_status.project) task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner) payload = {"commits": [ {"message": """test message test tg-%s #%s ok bye! """ % (task.ref, new_status.slug.upper())}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(task.project, payload) ev_hook.process_event() task = Task.objects.get(id=task.id) assert task.status.id == new_status.id assert len(mail.outbox) == 1 def test_push_event_task_bad_processing_non_existing_ref(client): issue_status = f.IssueStatusFactory() payload = {"commits": [ {"message": """test message test TG-6666666 #%s ok bye! """ % (issue_status.slug)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(issue_status.project, payload) with pytest.raises(ActionSyntaxException) as excinfo: ev_hook.process_event() assert str(excinfo.value) == "The referenced element doesn't exist" assert len(mail.outbox) == 0 def test_push_event_us_bad_processing_non_existing_status(client): user_story = f.UserStoryFactory.create() payload = {"commits": [ {"message": """test message test TG-%s #non-existing-slug ok bye! """ % (user_story.ref)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(user_story.project, payload) with pytest.raises(ActionSyntaxException) as excinfo: ev_hook.process_event() assert str(excinfo.value) == "The status doesn't exist" assert len(mail.outbox) == 0 def test_push_event_bad_processing_non_existing_status(client): issue = f.IssueFactory.create() payload = {"commits": [ {"message": """test message test TG-%s #non-existing-slug ok bye! """ % (issue.ref)}, ]} mail.outbox = [] ev_hook = event_hooks.PushEventHook(issue.project, payload) with pytest.raises(ActionSyntaxException) as excinfo: ev_hook.process_event() assert str(excinfo.value) == "The status doesn't exist" assert len(mail.outbox) == 0 def test_issues_event_opened_issue(client): issue = f.IssueFactory.create() issue.project.default_issue_status = issue.status issue.project.default_issue_type = issue.type issue.project.default_severity = issue.severity issue.project.default_priority = issue.priority issue.project.save() Membership.objects.create(user=issue.owner, project=issue.project, role=f.RoleFactory.create(project=issue.project), is_owner=True) notify_policy = NotifyPolicy.objects.get(user=issue.owner, project=issue.project) notify_policy.notify_level = NotifyLevel.watch notify_policy.save() payload = { "object_kind": "issue", "object_attributes": { "title": "test-title", "description": "test-body", "url": "http://gitlab.com/test/project/issues/11", "action": "open", }, } mail.outbox = [] ev_hook = event_hooks.IssuesEventHook(issue.project, payload) ev_hook.process_event() assert Issue.objects.count() == 2 assert len(mail.outbox) == 1 def test_issues_event_other_than_opened_issue(client): issue = f.IssueFactory.create() issue.project.default_issue_status = issue.status issue.project.default_issue_type = issue.type issue.project.default_severity = issue.severity issue.project.default_priority = issue.priority issue.project.save() payload = { "object_kind": "issue", "object_attributes": { "title": "test-title", "description": "test-body", "url": "http://gitlab.com/test/project/issues/11", "action": "update", }, } mail.outbox = [] ev_hook = event_hooks.IssuesEventHook(issue.project, payload) ev_hook.process_event() assert Issue.objects.count() == 1 assert len(mail.outbox) == 0 def test_issues_event_bad_issue(client): issue = f.IssueFactory.create() issue.project.default_issue_status = issue.status issue.project.default_issue_type = issue.type issue.project.default_severity = issue.severity issue.project.default_priority = issue.priority issue.project.save() payload = { "object_kind": "issue", "object_attributes": { "action": "open", }, } mail.outbox = [] ev_hook = event_hooks.IssuesEventHook(issue.project, payload) with pytest.raises(ActionSyntaxException) as excinfo: ev_hook.process_event() assert str(excinfo.value) == "Invalid issue information" assert Issue.objects.count() == 1 assert len(mail.outbox) == 0 def test_api_get_project_modules(client): project = f.create_project() f.MembershipFactory(project=project, user=project.owner, is_owner=True) url = reverse("projects-modules", args=(project.id,)) client.login(project.owner) response = client.get(url) assert response.status_code == 200 content = json.loads(response.content.decode("utf-8")) assert "gitlab" in content assert content["gitlab"]["secret"] != "" assert content["gitlab"]["webhooks_url"] != "" def test_api_patch_project_modules(client): project = f.create_project() f.MembershipFactory(project=project, user=project.owner, is_owner=True) url = reverse("projects-modules", args=(project.id,)) client.login(project.owner) data = { "gitlab": { "secret": "test_secret", "url": "test_url", } } response = client.patch(url, json.dumps(data), content_type="application/json") assert response.status_code == 204 config = services.get_modules_config(project).config assert "gitlab" in config assert config["gitlab"]["secret"] == "test_secret" assert config["gitlab"]["webhooks_url"] != "test_url" def test_replace_gitlab_references(): assert event_hooks.replace_gitlab_references("project-url", "#2") == "[GitLab#2](project-url/issues/2)" assert event_hooks.replace_gitlab_references("project-url", "#2 ") == "[GitLab#2](project-url/issues/2) " assert event_hooks.replace_gitlab_references("project-url", " #2 ") == " [GitLab#2](project-url/issues/2) " assert event_hooks.replace_gitlab_references("project-url", " #2") == " [GitLab#2](project-url/issues/2)" assert event_hooks.replace_gitlab_references("project-url", "#test") == "#test" assert event_hooks.replace_gitlab_references("project-url", None) == ""
agpl-3.0
jmartu/testing
venv/lib/python3.6/site-packages/wheel/paths.py
70
1129
""" Installation paths. Map the .data/ subdirectory names to install paths. """ import distutils.command.install as install import distutils.dist as dist import os.path import sys def get_install_command(name): # late binding due to potential monkeypatching d = dist.Distribution({'name': name}) i = install.install(d) i.finalize_options() return i def get_install_paths(name): """ Return the (distutils) install paths for the named dist. A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys. """ paths = {} i = get_install_command(name) for key in install.SCHEME_KEYS: paths[key] = getattr(i, 'install_' + key) # pip uses a similar path as an alternative to the system's (read-only) # include directory: if hasattr(sys, 'real_prefix'): # virtualenv paths['headers'] = os.path.join(sys.prefix, 'include', 'site', 'python' + sys.version[:3], name) return paths
mit
JaneliaSciComp/osgpyplusplus
examples/rough_translated1/osgshaders.py
1
15875
#!/bin/env python # Automatically translated python version of # OpenSceneGraph example program "osgshaders" # !!! This program will need manual tuning before it will work. !!! import sys from osgpypp import osg from osgpypp import osgDB from osgpypp import osgGA from osgpypp import osgUtil from osgpypp import osgViewer # Translated from file 'GL2Scene.cpp' # OpenSceneGraph example, osgshaders. #* #* Permission is hereby granted, free of charge, to any person obtaining a copy #* of this software and associated documentation files (the "Software"), to deal #* in the Software without restriction, including without limitation the rights #* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #* copies of the Software, and to permit persons to whom the Software is #* furnished to do so, subject to the following conditions: #* #* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #* THE SOFTWARE. # # file: examples/osgshaders/GL2Scene.cpp # * author: Mike Weiblen 2005-05-01 # * # * Compose a scene of several instances of a model, with a different # * OpenGL Shading Language shader applied to each. # * # * See http:#www.3dlabs.com/opengl2/ for more information regarding # * the OpenGL Shading Language. # #include <osg/ShapeDrawable> #include <osg/PositionAttitudeTransform> #include <osg/Geode> #include <osg/Node> #include <osg/Material> #include <osg/Notify> #include <osg/Vec3> #include <osg/Texture1D> #include <osg/Texture2D> #include <osg/Texture3D> #include <osgDB/ReadFile> #include <osgDB/FileUtils> #include <osgUtil/Optimizer> #include <osg/Program> #include <osg/Shader> #include <osg/Uniform> #include <osgUtil/PerlinNoise> #include <iostream> #include "GL2Scene.h" #####################################/ static osg.Image* make1DSineImage( int texSize ) PI = 3.1415927 image = osg.Image() image.setImage(texSize, 1, 1, 4, GL_RGBA, GL_UNSIGNED_BYTE, unsigned char[4 * texSize], osg.Image.USE_NEW_DELETE)() ptr = image.data() inc = 2. * PI / (float)texSize for(int i = 0 i < texSize i++) *ptr++ = (GLubyte)((sinf(i * inc) * 0.5 + 0.5) * 255.) *ptr++ = 0 *ptr++ = 0 *ptr++ = 1 return image static osg.Texture1D* make1DSineTexture( int texSize ) sineTexture = osg.Texture1D() sineTexture.setWrap(osg.Texture1D.WRAP_S, osg.Texture1D.REPEAT) sineTexture.setFilter(osg.Texture1D.MIN_FILTER, osg.Texture1D.LINEAR) sineTexture.setFilter(osg.Texture1D.MAG_FILTER, osg.Texture1D.LINEAR) sineTexture.setImage( make1DSineImage(texSize) ) return sineTexture #####################################/ # in-line GLSL source code for the "microshader" example static char *microshaderVertSource = "# microshader - colors a fragment based on its position\n" "varying vec4 color\n" "void main(void)\n" "\n" " color = gl_Vertex\n" " gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex\n" "\n" static char *microshaderFragSource = "varying vec4 color\n" "void main(void)\n" "\n" " gl_FragColor = clamp( color, 0.0, 1.0 )\n" "\n" #####################################/ static osg.Group rootNode # Create some geometry upon which to render GLSL shaders. static osg.Geode* CreateModel() geode = osg.Geode() geode.addDrawable(osg.ShapeDrawable(osg.Sphere(osg.Vec3(0.0,0.0,0.0),1.0))) geode.addDrawable(osg.ShapeDrawable(osg.Cone(osg.Vec3(2.2,0.0,-0.4),0.9,1.8))) geode.addDrawable(osg.ShapeDrawable(osg.Cylinder(osg.Vec3(4.4,0.0,0.0),1.0,1.4))) return geode # Add a reference to the masterModel at the specified translation, and # return its StateSet so we can easily attach StateAttributes. static osg.StateSet* ModelInstance() static float zvalue = 0.0 static osg.Node* masterModel = CreateModel() xform = osg.PositionAttitudeTransform() xform.setPosition(osg.Vec3( 0.0, -1.0, zvalue )) zvalue = zvalue + 2.2 xform.addChild(masterModel) rootNode.addChild(xform) return xform.getOrCreateStateSet() # load source from a file. static void LoadShaderSource( osg.Shader* shader, str fileName ) fqFileName = osgDB.findDataFile(fileName) if fqFileName.length() not = 0 : shader.loadShaderSourceFromFile( fqFileName.c_str() ) else: osg.notify(osg.WARN), "File \"", fileName, "\" not found." #####################################/ # rude but convenient globals static osg.Program* BlockyProgram static osg.Shader* BlockyVertObj static osg.Shader* BlockyFragObj static osg.Program* ErodedProgram static osg.Shader* ErodedVertObj static osg.Shader* ErodedFragObj static osg.Program* MarbleProgram static osg.Shader* MarbleVertObj static osg.Shader* MarbleFragObj #####################################/ # for demo simplicity, this one callback animates all the shaders, instancing # for each uniform but with a specific operation each time. class AnimateCallback (osg.Uniform.Callback) : enum Operation OFFSET, SIN, COLOR1, COLOR2 AnimateCallback(Operation op) : _enabled(True),_operation(op) virtual void operator() ( osg.Uniform* uniform, osg.NodeVisitor* nv ) if _enabled : angle = 2.0 * nv.getFrameStamp().getSimulationTime() sine = sinf( angle ) # -1 . 1 v01 = 0.5 * sine + 0.5 # 0 . 1 v10 = 1.0 - v01 # 1 . 0 switch(_operation) case OFFSET : uniform.set( osg.Vec3(0.505, 0.8*v01, 0.0) ) break case SIN : uniform.set( sine ) break case COLOR1 : uniform.set( osg.Vec3(v10, 0.0, 0.0) ) break case COLOR2 : uniform.set( osg.Vec3(v01, v01, v10) ) break _enabled = bool() _operation = Operation() #####################################/ # Compose a scenegraph with examples of GLSL shaders #define TEXUNIT_SINE 1 #define TEXUNIT_NOISE 2 osg.Group GL2Scene.buildScene() noiseTexture = osgUtil.create3DNoiseTexture( 32 #128 ) sineTexture = make1DSineTexture( 32 #1024 ) # the root of our scenegraph. rootNode = osg.Group() # attach some Uniforms to the root, to be inherited by Programs. OffsetUniform = osg.Uniform( "Offset", osg.Vec3(0.0, 0.0, 0.0) ) SineUniform = osg.Uniform( "Sine", 0.0 ) Color1Uniform = osg.Uniform( "Color1", osg.Vec3(0.0, 0.0, 0.0) ) Color2Uniform = osg.Uniform( "Color2", osg.Vec3(0.0, 0.0, 0.0) ) OffsetUniform.setUpdateCallback(AnimateCallback(AnimateCallback.OFFSET)) SineUniform.setUpdateCallback(AnimateCallback(AnimateCallback.SIN)) Color1Uniform.setUpdateCallback(AnimateCallback(AnimateCallback.COLOR1)) Color2Uniform.setUpdateCallback(AnimateCallback(AnimateCallback.COLOR2)) ss = rootNode.getOrCreateStateSet() ss.addUniform( OffsetUniform ) ss.addUniform( SineUniform ) ss.addUniform( Color1Uniform ) ss.addUniform( Color2Uniform ) # the simple Microshader (its source appears earlier in this file) ss = ModelInstance() program = osg.Program() program.setName( "microshader" ) _programList.push_back( program ) program.addShader( osg.Shader( osg.Shader.VERTEX, microshaderVertSource ) ) program.addShader( osg.Shader( osg.Shader.FRAGMENT, microshaderFragSource ) ) ss.setAttributeAndModes( program, osg.StateAttribute.ON ) # the "blocky" shader, a simple animation test ss = ModelInstance() BlockyProgram = osg.Program() BlockyProgram.setName( "blocky" ) _programList.push_back( BlockyProgram ) BlockyVertObj = osg.Shader( osg.Shader.VERTEX ) BlockyFragObj = osg.Shader( osg.Shader.FRAGMENT ) BlockyProgram.addShader( BlockyFragObj ) BlockyProgram.addShader( BlockyVertObj ) ss.setAttributeAndModes(BlockyProgram, osg.StateAttribute.ON) # the "eroded" shader, uses a noise texture to discard fragments ss = ModelInstance() ss.setTextureAttribute(TEXUNIT_NOISE, noiseTexture) ErodedProgram = osg.Program() ErodedProgram.setName( "eroded" ) _programList.push_back( ErodedProgram ) ErodedVertObj = osg.Shader( osg.Shader.VERTEX ) ErodedFragObj = osg.Shader( osg.Shader.FRAGMENT ) ErodedProgram.addShader( ErodedFragObj ) ErodedProgram.addShader( ErodedVertObj ) ss.setAttributeAndModes(ErodedProgram, osg.StateAttribute.ON) ss.addUniform( osg.Uniform("LightPosition", osg.Vec3(0.0, 0.0, 4.0)) ) ss.addUniform( osg.Uniform("Scale", 1.0) ) ss.addUniform( osg.Uniform("sampler3d", TEXUNIT_NOISE) ) # the "marble" shader, uses two textures ss = ModelInstance() ss.setTextureAttribute(TEXUNIT_NOISE, noiseTexture) ss.setTextureAttribute(TEXUNIT_SINE, sineTexture) MarbleProgram = osg.Program() MarbleProgram.setName( "marble" ) _programList.push_back( MarbleProgram ) MarbleVertObj = osg.Shader( osg.Shader.VERTEX ) MarbleFragObj = osg.Shader( osg.Shader.FRAGMENT ) MarbleProgram.addShader( MarbleFragObj ) MarbleProgram.addShader( MarbleVertObj ) ss.setAttributeAndModes(MarbleProgram, osg.StateAttribute.ON) ss.addUniform( osg.Uniform("NoiseTex", TEXUNIT_NOISE) ) ss.addUniform( osg.Uniform("SineTex", TEXUNIT_SINE) ) #ifdef INTERNAL_3DLABS #[ # regular GL 1.x texturing for comparison. ss = ModelInstance() tex0 = osg.Texture2D() tex0.setImage( osgDB.readImageFile( "images/3dl-ge100.png" ) ) ss.setTextureAttributeAndModes(0, tex0, osg.StateAttribute.ON) #endif #] reloadShaderSource() #ifdef INTERNAL_3DLABS #[ # add logo overlays rootNode.addChild( osgDB.readNodeFile( "3dl_ogl.logo" ) ) #endif #] return rootNode #####################################/ #####################################/ GL2Scene.GL2Scene() _rootNode = buildScene() _shadersEnabled = True GL2Scene.~GL2Scene() void GL2Scene.reloadShaderSource() osg.notify(osg.INFO), "reloadShaderSource()" LoadShaderSource( BlockyVertObj, "shaders/blocky.vert" ) LoadShaderSource( BlockyFragObj, "shaders/blocky.frag" ) LoadShaderSource( ErodedVertObj, "shaders/eroded.vert" ) LoadShaderSource( ErodedFragObj, "shaders/eroded.frag" ) LoadShaderSource( MarbleVertObj, "shaders/marble.vert" ) LoadShaderSource( MarbleFragObj, "shaders/marble.frag" ) # mew 2003-09-19 : TODO Need to revisit how to better control # osg.Program enable state in OSG core. glProgram are # different enough from other GL state that StateSet.setAttributeAndModes() # doesn't fit well, so came up with a local implementation. void GL2Scene.toggleShaderEnable() _shadersEnabled = not _shadersEnabled osg.notify(osg.WARN), "shader enable = ", "ON" : "OFF"), std: if (((_shadersEnabled)) else endl for( unsigned int i = 0 i < _programList.size() i++ ) #_programList[i].enable( _shadersEnabled ) #EOF # Translated from file 'GL2Scene.h' # -*-c++-*- #* #* OpenSceneGraph example, osgshaders. #* #* Permission is hereby granted, free of charge, to any person obtaining a copy #* of this software and associated documentation files (the "Software"), to deal #* in the Software without restriction, including without limitation the rights #* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #* copies of the Software, and to permit persons to whom the Software is #* furnished to do so, subject to the following conditions: #* #* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #* THE SOFTWARE. # # file: examples/osgglsl/GL2Scene.h # * author: Mike Weiblen 2005-03-30 # * # * See http:#www.3dlabs.com/opengl2/ for more information regarding # * the OpenGL Shading Language. # #include <osg/Node> #include <osg/Referenced> #include <osg/ref_ptr> #include <osg/Program> class GL2Scene (osg.Referenced) : GL2Scene() def getRootNode(): return _rootNode reloadShaderSource = void() toggleShaderEnable = void() ~GL2Scene() #methods buildScene = osg.Group() #data _rootNode = osg.Group() _programList = std.vector< osg.Program >() _shadersEnabled = bool() typedef GL2Scene GL2ScenePtr #EOF # Translated from file 'osgshaders.cpp' # OpenSceneGraph example, osgshaders. #* #* Permission is hereby granted, free of charge, to any person obtaining a copy #* of this software and associated documentation files (the "Software"), to deal #* in the Software without restriction, including without limitation the rights #* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #* copies of the Software, and to permit persons to whom the Software is #* furnished to do so, subject to the following conditions: #* #* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #* THE SOFTWARE. # # file: examples/osgglsl/osgshaders.cpp # * author: Mike Weiblen 2005-04-05 # * # * A demo of the OpenGL Shading Language shaders using core OSG. # * # * See http:#www.3dlabs.com/opengl2/ for more information regarding # * the OpenGL Shading Language. # #include <osg/Notify> #include <osgGA/GUIEventAdapter> #include <osgGA/GUIActionAdapter> #include <osgDB/ReadFile> #include <osgUtil/Optimizer> #include <osgViewer/Viewer> #include "GL2Scene.h" using namespace osg #####################################/ class KeyHandler (osgGA.GUIEventHandler) : KeyHandler( GL2ScenePtr gl2Scene ) : _gl2Scene(gl2Scene) bool handle( osgGA.GUIEventAdapter ea, osgGA.GUIActionAdapter ) if ea.getEventType() not = osgGA.GUIEventAdapter.KEYDOWN : return False switch( ea.getKey() ) case ord("x"): _gl2Scene.reloadShaderSource() return True case ord("y"): _gl2Scene.toggleShaderEnable() return True return False _gl2Scene = GL2ScenePtr() #####################################/ int main(int, char **) # construct the viewer. viewer = osgViewer.Viewer() # create the scene gl2Scene = GL2Scene() viewer.setSceneData( gl2Scene.getRootNode() ) viewer.addEventHandler( KeyHandler(gl2Scene) ) return viewer.run() #EOF if __name__ == "__main__": main(sys.argv)
bsd-3-clause
Belxjander/Kirito
Python-3.5.0-main/Lib/test/test_exception_variations.py
21
3948
import unittest class ExceptionTestCase(unittest.TestCase): def test_try_except_else_finally(self): hit_except = False hit_else = False hit_finally = False try: raise Exception('nyaa!') except: hit_except = True else: hit_else = True finally: hit_finally = True self.assertTrue(hit_except) self.assertTrue(hit_finally) self.assertFalse(hit_else) def test_try_except_else_finally_no_exception(self): hit_except = False hit_else = False hit_finally = False try: pass except: hit_except = True else: hit_else = True finally: hit_finally = True self.assertFalse(hit_except) self.assertTrue(hit_finally) self.assertTrue(hit_else) def test_try_except_finally(self): hit_except = False hit_finally = False try: raise Exception('yarr!') except: hit_except = True finally: hit_finally = True self.assertTrue(hit_except) self.assertTrue(hit_finally) def test_try_except_finally_no_exception(self): hit_except = False hit_finally = False try: pass except: hit_except = True finally: hit_finally = True self.assertFalse(hit_except) self.assertTrue(hit_finally) def test_try_except(self): hit_except = False try: raise Exception('ahoy!') except: hit_except = True self.assertTrue(hit_except) def test_try_except_no_exception(self): hit_except = False try: pass except: hit_except = True self.assertFalse(hit_except) def test_try_except_else(self): hit_except = False hit_else = False try: raise Exception('foo!') except: hit_except = True else: hit_else = True self.assertFalse(hit_else) self.assertTrue(hit_except) def test_try_except_else_no_exception(self): hit_except = False hit_else = False try: pass except: hit_except = True else: hit_else = True self.assertFalse(hit_except) self.assertTrue(hit_else) def test_try_finally_no_exception(self): hit_finally = False try: pass finally: hit_finally = True self.assertTrue(hit_finally) def test_nested(self): hit_finally = False hit_inner_except = False hit_inner_finally = False try: try: raise Exception('inner exception') except: hit_inner_except = True finally: hit_inner_finally = True finally: hit_finally = True self.assertTrue(hit_inner_except) self.assertTrue(hit_inner_finally) self.assertTrue(hit_finally) def test_nested_else(self): hit_else = False hit_finally = False hit_except = False hit_inner_except = False hit_inner_else = False try: try: pass except: hit_inner_except = True else: hit_inner_else = True raise Exception('outer exception') except: hit_except = True else: hit_else = True finally: hit_finally = True self.assertFalse(hit_inner_except) self.assertTrue(hit_inner_else) self.assertFalse(hit_else) self.assertTrue(hit_finally) self.assertTrue(hit_except) if __name__ == '__main__': unittest.main()
gpl-3.0
onceuponatimeforever/oh-mainline
vendor/packages/Django/tests/modeltests/timezones/tests.py
44
49954
from __future__ import unicode_literals import datetime import os import sys import time import warnings from xml.dom.minidom import parseString try: import pytz except ImportError: pytz = None from django.conf import settings from django.core import serializers from django.core.urlresolvers import reverse from django.db import connection from django.db.models import Min, Max from django.http import HttpRequest from django.template import Context, RequestContext, Template, TemplateSyntaxError from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from django.test.utils import override_settings from django.utils import six from django.utils import timezone from django.utils.tzinfo import FixedOffset from django.utils.unittest import skipIf, skipUnless from .forms import EventForm, EventSplitForm, EventModelForm from .models import Event, MaybeEvent, Session, SessionEvent, Timestamp, AllDayEvent # These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time) # who don't have Daylight Saving Time, so we can represent them easily # with FixedOffset, and use them directly as tzinfo in the constructors. # settings.TIME_ZONE is forced to EAT. Most tests use a variant of # datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to # 10:20:30 in UTC and 17:20:30 in ICT. UTC = timezone.utc EAT = FixedOffset(180) # Africa/Nairobi ICT = FixedOffset(420) # Asia/Bangkok TZ_SUPPORT = hasattr(time, 'tzset') # On OSes that don't provide tzset (Windows), we can't set the timezone # in which the program runs. As a consequence, we must skip tests that # don't enforce a specific timezone (with timezone.override or equivalent), # or attempt to interpret naive datetimes in the default timezone. requires_tz_support = skipUnless(TZ_SUPPORT, "This test relies on the ability to run a program in an arbitrary " "time zone, but your operating system isn't able to do that.") @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False) class LegacyDatabaseTests(TestCase): def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_microsecond_precision') def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipIfDBFeature('supports_microsecond_precision') def test_naive_datetime_with_microsecond_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) Event.objects.create(dt=dt) event = Event.objects.get() # microseconds are lost during a round-trip in the database self.assertEqual(event.dt, dt.replace(microsecond=0)) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') @skipUnlessDBFeature('supports_microsecond_precision') def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) # This combination actually never happens. @skipUnlessDBFeature('supports_timezones') @skipIfDBFeature('supports_microsecond_precision') def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value # microseconds are lost during a round-trip in the database self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0)) @skipUnlessDBFeature('supports_timezones') @skipIfDBFeature('needs_datetime_string_cast') def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) # This combination is no longer possible since timezone support # was removed from the SQLite backend -- it didn't work. @skipUnlessDBFeature('supports_timezones') @skipUnlessDBFeature('needs_datetime_string_cast') def test_aware_datetime_in_utc_unsupported(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # django.db.backend.utils.typecast_dt will just drop the # timezone, so a round-trip in the database alters the data (!) # interpret the naive datetime in local time and you get a wrong value self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt) # interpret the naive datetime in original time to get the correct value self.assertEqual(event.dt.replace(tzinfo=UTC), dt) @skipUnlessDBFeature('supports_timezones') @skipIfDBFeature('needs_datetime_string_cast') def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) # This combination is no longer possible since timezone support # was removed from the SQLite backend -- it didn't work. @skipUnlessDBFeature('supports_timezones') @skipUnlessDBFeature('needs_datetime_string_cast') def test_aware_datetime_in_other_timezone_unsupported(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # django.db.backend.utils.typecast_dt will just drop the # timezone, so a round-trip in the database alters the data (!) # interpret the naive datetime in local time and you get a wrong value self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt) # interpret the naive datetime in original time to get the correct value self.assertEqual(event.dt.replace(tzinfo=ICT), dt) @skipIfDBFeature('supports_timezones') def test_aware_datetime_unspported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) with self.assertRaises(ValueError): Event.objects.create(dt=dt) def test_auto_now_and_auto_now_add(self): now = datetime.datetime.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) def test_query_date_related_filters(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2) self.assertEqual(Event.objects.filter(dt__month=1).count(), 2) self.assertEqual(Event.objects.filter(dt__day=1).count(), 2) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt) def test_query_dates(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertQuerysetEqual(Event.objects.dates('dt', 'year'), [datetime.datetime(2011, 1, 1)], transform=lambda d: d) self.assertQuerysetEqual(Event.objects.dates('dt', 'month'), [datetime.datetime(2011, 1, 1)], transform=lambda d: d) self.assertQuerysetEqual(Event.objects.dates('dt', 'day'), [datetime.datetime(2011, 1, 1)], transform=lambda d: d) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30) event = Event.objects.create(dt=dt) self.assertQuerysetEqual( Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]), [event], transform=lambda d: d) def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) event = AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists()) @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True) class NewDatabaseTests(TestCase): @requires_tz_support def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) with warnings.catch_warnings(record=True) as recorded: warnings.simplefilter('always') Event.objects.create(dt=dt) self.assertEqual(len(recorded), 1) msg = str(recorded[0].message) self.assertTrue(msg.startswith("DateTimeField received a naive datetime")) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) @requires_tz_support def test_datetime_from_date(self): dt = datetime.date(2011, 9, 1) with warnings.catch_warnings(record=True) as recorded: warnings.simplefilter('always') Event.objects.create(dt=dt) self.assertEqual(len(recorded), 1) msg = str(recorded[0].message) self.assertTrue(msg.startswith("DateTimeField received a naive datetime")) event = Event.objects.get() self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT)) @requires_tz_support @skipUnlessDBFeature('supports_microsecond_precision') def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) with warnings.catch_warnings(record=True) as recorded: warnings.simplefilter('always') Event.objects.create(dt=dt) self.assertEqual(len(recorded), 1) msg = str(recorded[0].message) self.assertTrue(msg.startswith("DateTimeField received a naive datetime")) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) @requires_tz_support @skipIfDBFeature('supports_microsecond_precision') def test_naive_datetime_with_microsecond_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) with warnings.catch_warnings(record=True) as recorded: warnings.simplefilter('always') Event.objects.create(dt=dt) self.assertEqual(len(recorded), 1) msg = str(recorded[0].message) self.assertTrue(msg.startswith("DateTimeField received a naive datetime")) event = Event.objects.get() # microseconds are lost during a round-trip in the database # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT)) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_microsecond_precision') def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipIfDBFeature('supports_microsecond_precision') def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() # microseconds are lost during a round-trip in the database self.assertEqual(event.dt, dt.replace(microsecond=0)) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_auto_now_and_auto_now_add(self): now = timezone.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) @skipIf(pytz is None, "this test requires pytz") def test_query_filter_with_pytz_timezones(self): tz = pytz.timezone('Europe/Paris') dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz) Event.objects.create(dt=dt) next = dt + datetime.timedelta(seconds=3) prev = dt - datetime.timedelta(seconds=3) self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1) self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1) @requires_tz_support def test_query_filter_with_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) dt = dt.replace(tzinfo=None) with warnings.catch_warnings(record=True) as recorded: warnings.simplefilter('always') # naive datetimes are interpreted in local time self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0) self.assertEqual(len(recorded), 3) for warning in recorded: msg = str(warning.message) self.assertTrue(msg.startswith("DateTimeField received a naive datetime")) def test_query_date_related_filters(self): # These two dates fall in the same day in EAT, but in different days, # years and months in UTC, and aggregation is performed in UTC when # time zone support is enabled. This test could be changed if the # implementation is changed to perform the aggregation is local time. Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1) self.assertEqual(Event.objects.filter(dt__month=1).count(), 1) self.assertEqual(Event.objects.filter(dt__day=1).count(), 1) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt) def test_query_dates(self): # Same comment as in test_query_date_related_filters. Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertQuerysetEqual(Event.objects.dates('dt', 'year'), [datetime.datetime(2010, 1, 1, tzinfo=UTC), datetime.datetime(2011, 1, 1, tzinfo=UTC)], transform=lambda d: d) self.assertQuerysetEqual(Event.objects.dates('dt', 'month'), [datetime.datetime(2010, 12, 1, tzinfo=UTC), datetime.datetime(2011, 1, 1, tzinfo=UTC)], transform=lambda d: d) self.assertQuerysetEqual(Event.objects.dates('dt', 'day'), [datetime.datetime(2010, 12, 31, tzinfo=UTC), datetime.datetime(2011, 1, 1, tzinfo=UTC)], transform=lambda d: d) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) event = Event.objects.create(dt=dt) self.assertQuerysetEqual( Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]), [event], transform=lambda d: d) @requires_tz_support def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) event = AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists()) def test_null_datetime(self): # Regression test for #17294 e = MaybeEvent.objects.create() self.assertEqual(e.dt, None) @override_settings(TIME_ZONE='Africa/Nairobi') class SerializationTests(TestCase): # Backend-specific notes: # - JSON supports only milliseconds, microseconds will be truncated. # - PyYAML dumps the UTC offset correctly for timezone-aware datetimes, # but when it loads this representation, it substracts the offset and # returns a naive datetime object in UTC (http://pyyaml.org/ticket/202). # Tests are adapted to take these quirks into account. def assert_python_contains_datetime(self, objects, dt): self.assertEqual(objects[0]['fields']['dt'], dt) def assert_json_contains_datetime(self, json, dt): self.assertIn('"fields": {"dt": "%s"}' % dt, json) def assert_xml_contains_datetime(self, xml, dt): field = parseString(xml).getElementsByTagName('field')[0] self.assertXMLEqual(field.childNodes[0].wholeText, dt) def assert_yaml_contains_datetime(self, yaml, dt): self.assertIn("- fields: {dt: !!timestamp '%s'}" % dt, yaml) def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_aware_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if 'yaml' in serializers.get_serializer_formats(): data = serializers.serialize('yaml', [Event(dt=dt)]) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class TemplateTests(TestCase): @requires_tz_support def test_localtime_templatetag_and_filters(self): """ Test the {% localtime %} templatetag and related filters. """ datetimes = { 'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), 'naive': datetime.datetime(2011, 9, 1, 13, 20, 30), } templates = { 'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"), 'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"), 'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"), 'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"), } # Transform a list of keys in 'datetimes' to the expected template # output. This makes the definition of 'results' more readable. def t(*result): return '|'.join(datetimes[key].isoformat() for key in result) # Results for USE_TZ = True results = { 'utc': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('utc', 'eat', 'utc', 'ict'), }, 'eat': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('eat', 'eat', 'utc', 'ict'), }, 'ict': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('ict', 'eat', 'utc', 'ict'), }, 'naive': { 'notag': t('naive', 'eat', 'utc', 'ict'), 'noarg': t('naive', 'eat', 'utc', 'ict'), 'on': t('naive', 'eat', 'utc', 'ict'), 'off': t('naive', 'eat', 'utc', 'ict'), } } for k1, dt in six.iteritems(datetimes): for k2, tpl in six.iteritems(templates): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) # Changes for USE_TZ = False results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict') results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict') with self.settings(USE_TZ=False): for k1, dt in six.iteritems(datetimes): for k2, tpl in six.iteritems(templates): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) @skipIf(pytz is None, "this test requires pytz") def test_localtime_filters_with_pytz(self): """ Test the |localtime, |utc, and |timezone filters with pytz. """ # Use a pytz timezone as local time tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)}) with self.settings(TIME_ZONE='Europe/Paris'): self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00") # Use a pytz timezone as argument tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': pytz.timezone('Europe/Paris')}) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") # Use a pytz timezone name as argument tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': pytz.timezone('Europe/Paris')}) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") def test_localtime_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render() def test_localtime_filters_do_not_raise_exceptions(self): """ Test the |localtime, |utc, and |timezone filters on bad inputs. """ tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}") with self.settings(USE_TZ=True): # bad datetime value ctx = Context({'dt': None, 'tz': ICT}) self.assertEqual(tpl.render(ctx), "None|||") ctx = Context({'dt': 'not a date', 'tz': ICT}) self.assertEqual(tpl.render(ctx), "not a date|||") # bad timezone value tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None}) self.assertEqual(tpl.render(ctx), "") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'}) self.assertEqual(tpl.render(ctx), "") @requires_tz_support def test_timezone_templatetag(self): """ Test the {% timezone %} templatetag. """ tpl = Template("{% load tz %}" "{{ dt }}|" "{% timezone tz1 %}" "{{ dt }}|" "{% timezone tz2 %}" "{{ dt }}" "{% endtimezone %}" "{% endtimezone %}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'tz1': ICT, 'tz2': None}) self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00") @skipIf(pytz is None, "this test requires pytz") def test_timezone_templatetag_with_pytz(self): """ Test the {% timezone %} templatetag with pytz. """ tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}") # Use a pytz timezone as argument ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': pytz.timezone('Europe/Paris')}) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") # Use a pytz timezone name as argument ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': 'Europe/Paris'}) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") def test_timezone_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% timezone %}{% endtimezone %}").render() with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError): Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'})) @skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names") def test_get_current_timezone_templatetag(self): """ Test the {% get_current_timezone %} templatetag. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT") with timezone.override(UTC): self.assertEqual(tpl.render(Context()), "UTC") tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}") self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") with timezone.override(UTC): self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") @skipIf(pytz is None, "this test requires pytz") def test_get_current_timezone_templatetag_with_pytz(self): """ Test the {% get_current_timezone %} templatetag with pytz. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") with timezone.override(pytz.timezone('Europe/Paris')): self.assertEqual(tpl.render(Context()), "Europe/Paris") tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}") self.assertEqual(tpl.render(Context()), "Europe/Paris") def test_get_current_timezone_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% get_current_timezone %}").render() @skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names") def test_tz_template_context_processor(self): """ Test the django.core.context_processors.tz template context processor. """ tpl = Template("{{ TIME_ZONE }}") self.assertEqual(tpl.render(Context()), "") self.assertEqual(tpl.render(RequestContext(HttpRequest())), "Africa/Nairobi" if pytz else "EAT") @requires_tz_support def test_date_and_time_template_filters(self): tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20") def test_date_and_time_template_filters_honor_localtime(self): tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") def test_localtime_with_time_zone_setting_set_to_none(self): # Regression for #17274 tpl = Template("{% load tz %}{{ dt }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)}) with self.settings(TIME_ZONE=None): # the actual value depends on the system time zone of the host self.assertTrue(tpl.render(ctx).startswith("2011")) @requires_tz_support def test_now_template_tag_uses_current_time_zone(self): # Regression for #17343 tpl = Template("{% now \"O\" %}") self.assertEqual(tpl.render(Context({})), "+0300") with timezone.override(ICT): self.assertEqual(tpl.render(Context({})), "+0700") @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False) class LegacyFormsTests(TestCase): def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) @skipIf(pytz is None, "this test requires pytz") def test_form_with_non_existent_time(self): form = EventForm({'dt': '2011-03-27 02:30:00'}) with timezone.override(pytz.timezone('Europe/Paris')): # this is obviously a bug self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0)) @skipIf(pytz is None, "this test requires pytz") def test_form_with_ambiguous_time(self): form = EventForm({'dt': '2011-10-30 02:30:00'}) with timezone.override(pytz.timezone('Europe/Paris')): # this is obviously a bug self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0)) def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30)) @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class NewFormsTests(TestCase): @requires_tz_support def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) def test_form_with_other_timezone(self): form = EventForm({'dt': '2011-09-01 17:20:30'}) with timezone.override(ICT): self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @skipIf(pytz is None, "this test requires pytz") def test_form_with_non_existent_time(self): with timezone.override(pytz.timezone('Europe/Paris')): form = EventForm({'dt': '2011-03-27 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['dt'], ["2011-03-27 02:30:00 couldn't be interpreted in time zone " "Europe/Paris; it may be ambiguous or it may not exist."]) @skipIf(pytz is None, "this test requires pytz") def test_form_with_ambiguous_time(self): with timezone.override(pytz.timezone('Europe/Paris')): form = EventForm({'dt': '2011-10-30 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['dt'], ["2011-10-30 02:30:00 couldn't be interpreted in time zone " "Europe/Paris; it may be ambiguous or it may not exist."]) @requires_tz_support def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @requires_tz_support def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class AdminTests(TestCase): urls = 'modeltests.timezones.urls' fixtures = ['tz_users.xml'] def setUp(self): self.client.login(username='super', password='secret') @requires_tz_support def test_changelist(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(EAT).isoformat()) def test_changelist_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(ICT).isoformat()) @requires_tz_support def test_change_editable(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(EAT).date().isoformat()) self.assertContains(response, e.dt.astimezone(EAT).time().isoformat()) def test_change_editable_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(ICT).date().isoformat()) self.assertContains(response, e.dt.astimezone(ICT).time().isoformat()) @requires_tz_support def test_change_readonly(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(EAT).isoformat()) def test_change_readonly_in_other_timezone(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() with timezone.override(ICT): response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(ICT).isoformat()) @override_settings(TIME_ZONE='Africa/Nairobi') class UtilitiesTests(TestCase): def test_make_aware(self): self.assertEqual( timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT), datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) ) self.assertEqual( timezone.make_aware(datetime.datetime(2011, 9, 1, 10, 20, 30), UTC), datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) ) def test_make_naive(self): self.assertEqual( timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT), datetime.datetime(2011, 9, 1, 13, 20, 30) ) self.assertEqual( timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), UTC), datetime.datetime(2011, 9, 1, 10, 20, 30) ) self.assertEqual( timezone.make_naive(datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), UTC), datetime.datetime(2011, 9, 1, 10, 20, 30) )
agpl-3.0
pschmitt/home-assistant
homeassistant/components/shopping_list/__init__.py
7
9108
"""Support to manage a shopping list.""" import logging import uuid import voluptuous as vol from homeassistant import config_entries from homeassistant.components import http, websocket_api from homeassistant.components.http.data_validator import RequestDataValidator from homeassistant.const import HTTP_BAD_REQUEST, HTTP_NOT_FOUND from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.util.json import load_json, save_json from .const import DOMAIN ATTR_NAME = "name" _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA) EVENT = "shopping_list_updated" ITEM_UPDATE_SCHEMA = vol.Schema({"complete": bool, ATTR_NAME: str}) PERSISTENCE = ".shopping_list.json" SERVICE_ADD_ITEM = "add_item" SERVICE_COMPLETE_ITEM = "complete_item" SERVICE_ITEM_SCHEMA = vol.Schema({vol.Required(ATTR_NAME): vol.Any(None, cv.string)}) WS_TYPE_SHOPPING_LIST_ITEMS = "shopping_list/items" WS_TYPE_SHOPPING_LIST_ADD_ITEM = "shopping_list/items/add" WS_TYPE_SHOPPING_LIST_UPDATE_ITEM = "shopping_list/items/update" WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS = "shopping_list/items/clear" SCHEMA_WEBSOCKET_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend( {vol.Required("type"): WS_TYPE_SHOPPING_LIST_ITEMS} ) SCHEMA_WEBSOCKET_ADD_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend( {vol.Required("type"): WS_TYPE_SHOPPING_LIST_ADD_ITEM, vol.Required("name"): str} ) SCHEMA_WEBSOCKET_UPDATE_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend( { vol.Required("type"): WS_TYPE_SHOPPING_LIST_UPDATE_ITEM, vol.Required("item_id"): str, vol.Optional("name"): str, vol.Optional("complete"): bool, } ) SCHEMA_WEBSOCKET_CLEAR_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend( {vol.Required("type"): WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS} ) async def async_setup(hass, config): """Initialize the shopping list.""" if DOMAIN not in config: return True hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) ) return True async def async_setup_entry(hass, config_entry): """Set up shopping list from config flow.""" async def add_item_service(call): """Add an item with `name`.""" data = hass.data[DOMAIN] name = call.data.get(ATTR_NAME) if name is not None: data.async_add(name) async def complete_item_service(call): """Mark the item provided via `name` as completed.""" data = hass.data[DOMAIN] name = call.data.get(ATTR_NAME) if name is None: return try: item = [item for item in data.items if item["name"] == name][0] except IndexError: _LOGGER.error("Removing of item failed: %s cannot be found", name) else: data.async_update(item["id"], {"name": name, "complete": True}) data = hass.data[DOMAIN] = ShoppingData(hass) await data.async_load() hass.services.async_register( DOMAIN, SERVICE_ADD_ITEM, add_item_service, schema=SERVICE_ITEM_SCHEMA ) hass.services.async_register( DOMAIN, SERVICE_COMPLETE_ITEM, complete_item_service, schema=SERVICE_ITEM_SCHEMA ) hass.http.register_view(ShoppingListView) hass.http.register_view(CreateShoppingListItemView) hass.http.register_view(UpdateShoppingListItemView) hass.http.register_view(ClearCompletedItemsView) hass.components.frontend.async_register_built_in_panel( "shopping-list", "shopping_list", "mdi:cart" ) hass.components.websocket_api.async_register_command( WS_TYPE_SHOPPING_LIST_ITEMS, websocket_handle_items, SCHEMA_WEBSOCKET_ITEMS ) hass.components.websocket_api.async_register_command( WS_TYPE_SHOPPING_LIST_ADD_ITEM, websocket_handle_add, SCHEMA_WEBSOCKET_ADD_ITEM ) hass.components.websocket_api.async_register_command( WS_TYPE_SHOPPING_LIST_UPDATE_ITEM, websocket_handle_update, SCHEMA_WEBSOCKET_UPDATE_ITEM, ) hass.components.websocket_api.async_register_command( WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS, websocket_handle_clear, SCHEMA_WEBSOCKET_CLEAR_ITEMS, ) return True class ShoppingData: """Class to hold shopping list data.""" def __init__(self, hass): """Initialize the shopping list.""" self.hass = hass self.items = [] @callback def async_add(self, name): """Add a shopping list item.""" item = {"name": name, "id": uuid.uuid4().hex, "complete": False} self.items.append(item) self.hass.async_add_job(self.save) return item @callback def async_update(self, item_id, info): """Update a shopping list item.""" item = next((itm for itm in self.items if itm["id"] == item_id), None) if item is None: raise KeyError info = ITEM_UPDATE_SCHEMA(info) item.update(info) self.hass.async_add_job(self.save) return item @callback def async_clear_completed(self): """Clear completed items.""" self.items = [itm for itm in self.items if not itm["complete"]] self.hass.async_add_job(self.save) async def async_load(self): """Load items.""" def load(): """Load the items synchronously.""" return load_json(self.hass.config.path(PERSISTENCE), default=[]) self.items = await self.hass.async_add_executor_job(load) def save(self): """Save the items.""" save_json(self.hass.config.path(PERSISTENCE), self.items) class ShoppingListView(http.HomeAssistantView): """View to retrieve shopping list content.""" url = "/api/shopping_list" name = "api:shopping_list" @callback def get(self, request): """Retrieve shopping list items.""" return self.json(request.app["hass"].data[DOMAIN].items) class UpdateShoppingListItemView(http.HomeAssistantView): """View to retrieve shopping list content.""" url = "/api/shopping_list/item/{item_id}" name = "api:shopping_list:item:id" async def post(self, request, item_id): """Update a shopping list item.""" data = await request.json() try: item = request.app["hass"].data[DOMAIN].async_update(item_id, data) request.app["hass"].bus.async_fire(EVENT) return self.json(item) except KeyError: return self.json_message("Item not found", HTTP_NOT_FOUND) except vol.Invalid: return self.json_message("Item not found", HTTP_BAD_REQUEST) class CreateShoppingListItemView(http.HomeAssistantView): """View to retrieve shopping list content.""" url = "/api/shopping_list/item" name = "api:shopping_list:item" @RequestDataValidator(vol.Schema({vol.Required("name"): str})) async def post(self, request, data): """Create a new shopping list item.""" item = request.app["hass"].data[DOMAIN].async_add(data["name"]) request.app["hass"].bus.async_fire(EVENT) return self.json(item) class ClearCompletedItemsView(http.HomeAssistantView): """View to retrieve shopping list content.""" url = "/api/shopping_list/clear_completed" name = "api:shopping_list:clear_completed" @callback def post(self, request): """Retrieve if API is running.""" hass = request.app["hass"] hass.data[DOMAIN].async_clear_completed() hass.bus.async_fire(EVENT) return self.json_message("Cleared completed items.") @callback def websocket_handle_items(hass, connection, msg): """Handle get shopping_list items.""" connection.send_message( websocket_api.result_message(msg["id"], hass.data[DOMAIN].items) ) @callback def websocket_handle_add(hass, connection, msg): """Handle add item to shopping_list.""" item = hass.data[DOMAIN].async_add(msg["name"]) hass.bus.async_fire(EVENT, {"action": "add", "item": item}) connection.send_message(websocket_api.result_message(msg["id"], item)) @websocket_api.async_response async def websocket_handle_update(hass, connection, msg): """Handle update shopping_list item.""" msg_id = msg.pop("id") item_id = msg.pop("item_id") msg.pop("type") data = msg try: item = hass.data[DOMAIN].async_update(item_id, data) hass.bus.async_fire(EVENT, {"action": "update", "item": item}) connection.send_message(websocket_api.result_message(msg_id, item)) except KeyError: connection.send_message( websocket_api.error_message(msg_id, "item_not_found", "Item not found") ) @callback def websocket_handle_clear(hass, connection, msg): """Handle clearing shopping_list items.""" hass.data[DOMAIN].async_clear_completed() hass.bus.async_fire(EVENT, {"action": "clear"}) connection.send_message(websocket_api.result_message(msg["id"]))
apache-2.0
mnahm5/django-estore
Lib/site-packages/boto/manage/__init__.py
271
1108
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #
mit
DorRosenblum/tf_flstm_f-lm
tmp/tensorflow/mnist/fully_connected_feed.py
33
9650
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trains and Evaluates the MNIST network using a feed dictionary.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=missing-docstring import argparse import os import sys import time from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow.examples.tutorials.mnist import mnist # Basic model parameters as external flags. FLAGS = None def placeholder_inputs(batch_size): """Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder. """ # Note that the shapes of the placeholders match the shapes of the full # image and label tensors, except the first dimension is now batch_size # rather than the full size of the train or test data sets. images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS)) labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size)) return images_placeholder, labels_placeholder def fill_feed_dict(data_set, images_pl, labels_pl): """Fills the feed_dict for training the given step. A feed_dict takes the form of: feed_dict = { <placeholder>: <tensor of values to be passed for placeholder>, .... } Args: data_set: The set of images and labels, from input_data.read_data_sets() images_pl: The images placeholder, from placeholder_inputs(). labels_pl: The labels placeholder, from placeholder_inputs(). Returns: feed_dict: The feed dictionary mapping from placeholders to values. """ # Create the feed_dict for the placeholders filled with the next # `batch size` examples. images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data) feed_dict = { images_pl: images_feed, labels_pl: labels_feed, } return feed_dict def do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set): """Runs one evaluation against the full epoch of data. Args: sess: The session in which the model has been trained. eval_correct: The Tensor that returns the number of correct predictions. images_placeholder: The images placeholder. labels_placeholder: The labels placeholder. data_set: The set of images and labels to evaluate, from input_data.read_data_sets(). """ # And run one epoch of eval. true_count = 0 # Counts the number of correct predictions. steps_per_epoch = data_set.num_examples // FLAGS.batch_size num_examples = steps_per_epoch * FLAGS.batch_size for step in xrange(steps_per_epoch): feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder) true_count += sess.run(eval_correct, feed_dict=feed_dict) precision = float(true_count) / num_examples print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision)) def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) def main(_): if tf.gfile.Exists(FLAGS.log_dir): tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.gfile.MakeDirs(FLAGS.log_dir) run_training() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--learning_rate', type=float, default=0.01, help='Initial learning rate.' ) parser.add_argument( '--max_steps', type=int, default=2000, help='Number of steps to run trainer.' ) parser.add_argument( '--hidden1', type=int, default=128, help='Number of units in hidden layer 1.' ) parser.add_argument( '--hidden2', type=int, default=32, help='Number of units in hidden layer 2.' ) parser.add_argument( '--batch_size', type=int, default=100, help='Batch size. Must divide evenly into the dataset sizes.' ) parser.add_argument( '--input_data_dir', type=str, default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 'tensorflow/mnist/input_data'), help='Directory to put the input data.' ) parser.add_argument( '--log_dir', type=str, default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 'tensorflow/mnist/logs/fully_connected_feed'), help='Directory to put the log data.' ) parser.add_argument( '--fake_data', default=False, help='If true, uses fake data for unit testing.', action='store_true' ) FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
mit
Ashaba/rms
rmslocalenv/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.py
21
1565
# # The Python Imaging Library # $Id$ # # HDF5 stub adapter # # Copyright (c) 2000-2003 by Fredrik Lundh # # See the README file for information on usage and redistribution. # from PIL import Image, ImageFile _handler = None def register_handler(handler): """ Install application-specific HDF5 image handler. :param handler: Handler object. """ global _handler _handler = handler # -------------------------------------------------------------------- # Image adapter def _accept(prefix): return prefix[:8] == b"\x89HDF\r\n\x1a\n" class HDF5StubImageFile(ImageFile.StubImageFile): format = "HDF5" format_description = "HDF5" def _open(self): offset = self.fp.tell() if not _accept(self.fp.read(8)): raise SyntaxError("Not an HDF file") self.fp.seek(offset) # make something up self.mode = "F" self.size = 1, 1 loader = self._load() if loader: loader.open(self) def _load(self): return _handler def _save(im, fp, filename): if _handler is None or not hasattr("_handler", "save"): raise IOError("HDF5 save handler not installed") _handler.save(im, fp, filename) # -------------------------------------------------------------------- # Registry Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) Image.register_save(HDF5StubImageFile.format, _save) Image.register_extension(HDF5StubImageFile.format, ".h5") Image.register_extension(HDF5StubImageFile.format, ".hdf")
mit
polimediaupv/edx-platform
lms/djangoapps/mobile_api/social_facebook/friends/tests.py
128
14336
# pylint: disable=E1101 """ Tests for friends """ import json import httpretty from django.core.urlresolvers import reverse from xmodule.modulestore.tests.factories import CourseFactory from ..test_utils import SocialFacebookTestCase class TestFriends(SocialFacebookTestCase): """ Tests for /api/mobile/v0.5/friends/... """ def setUp(self): super(TestFriends, self).setUp() self.course = CourseFactory.create() @httpretty.activate def test_no_friends_enrolled(self): # User 1 set up self.user_create_and_signin(1) # Link user_1's edX account to FB self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # Set the interceptor self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_on_facebook(self): # User 1 set up self.user_create_and_signin(1) # Enroll user_1 in the course self.enroll_in_course(self.users[1], self.course) self.set_sharing_preferences(self.users[1], True) # Link user_1's edX account to FB self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) # Set the interceptor self.set_facebook_interceptor_for_friends({'data': []}) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_linked_to_edx(self): # User 1 set up self.user_create_and_signin(1) # Enroll user_1 in the course self.enroll_in_course(self.users[1], self.course) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) # Enroll user_2 in the course self.enroll_in_course(self.users[2], self.course) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) # Enroll user_3 in the course self.enroll_in_course(self.users[3], self.course) self.set_sharing_preferences(self.users[3], True) # Set the interceptor self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_share_settings_false(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], False) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_no_oauth_token(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], False) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json'}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 400) @httpretty.activate def test_one_friend_in_course(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) self.assertTrue('id' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID']) self.assertTrue('name' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME']) @httpretty.activate def test_three_friends_in_course(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) self.enroll_in_course(self.users[2], self.course) self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID']) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) self.enroll_in_course(self.users[3], self.course) self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID']) self.set_sharing_preferences(self.users[3], True) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) # Assert that USERNAME_1 is returned self.assertTrue( 'id' in response.data['friends'][0] and response.data['friends'][0]['id'] == self.USERS[1]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][0] and response.data['friends'][0]['name'] == self.USERS[1]['USERNAME'] ) # Assert that USERNAME_2 is returned self.assertTrue( 'id' in response.data['friends'][1] and response.data['friends'][1]['id'] == self.USERS[2]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][1] and response.data['friends'][1]['name'] == self.USERS[2]['USERNAME'] ) # Assert that USERNAME_3 is returned self.assertTrue( 'id' in response.data['friends'][2] and response.data['friends'][2]['id'] == self.USERS[3]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][2] and response.data['friends'][2]['name'] == self.USERS[3]['USERNAME'] ) @httpretty.activate def test_three_friends_in_paged_response(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) self.enroll_in_course(self.users[2], self.course) self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID']) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) self.enroll_in_course(self.users[3], self.course) self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID']) self.set_sharing_preferences(self.users[3], True) self.set_facebook_interceptor_for_friends( { 'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}], "paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_1"}, "summary": {"total_count": 652} } ) # Set the interceptor for the first paged content httpretty.register_uri( httpretty.GET, "https://graph.facebook.com/v2.2/me/friends/next_1", body=json.dumps( { "data": [{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}], "paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_2"}, "summary": {"total_count": 652} } ), status=201 ) # Set the interceptor for the last paged content httpretty.register_uri( httpretty.GET, "https://graph.facebook.com/v2.2/me/friends/next_2", body=json.dumps( { "data": [{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}], "paging": { "previous": "https://graph.facebook.com/v2.2/10154805434030300/friends?limit=25&offset=25" }, "summary": {"total_count": 652} } ), status=201 ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) # Assert that USERNAME_1 is returned self.assertTrue('id' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID']) self.assertTrue('name' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME']) # Assert that USERNAME_2 is returned self.assertTrue('id' in response.data['friends'][1]) self.assertTrue(response.data['friends'][1]['id'] == self.USERS[2]['FB_ID']) self.assertTrue('name' in response.data['friends'][1]) self.assertTrue(response.data['friends'][1]['name'] == self.USERS[2]['USERNAME']) # Assert that USERNAME_3 is returned self.assertTrue('id' in response.data['friends'][2]) self.assertTrue(response.data['friends'][2]['id'] == self.USERS[3]['FB_ID']) self.assertTrue('name' in response.data['friends'][2]) self.assertTrue(response.data['friends'][2]['name'] == self.USERS[3]['USERNAME'])
agpl-3.0
michalliu/OpenWrt-Firefly-Libraries
staging_dir/host/lib/scons-2.3.1/SCons/Tool/packaging/tarbz2.py
8
1821
"""SCons.Tool.Packaging.tarbz2 The tarbz2 SRC packager. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/packaging/tarbz2.py 2014/03/02 14:18:15 garyo" from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot def package(env, target, source, PACKAGEROOT, **kw): bld = env['BUILDERS']['Tar'] bld.set_suffix('.tar.gz') target, source = putintopackageroot(target, source, env, PACKAGEROOT) target, source = stripinstallbuilder(target, source, env) return bld(env, target, source, TARFLAGS='-jc') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-2.0
rex-xxx/mt6572_x201
external/v8/tools/testrunner/server/work_handler.py
123
5569
# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import SocketServer import stat import subprocess import threading from . import compression from . import constants from . import signatures from ..network import endpoint from ..objects import workpacket class WorkHandler(SocketServer.BaseRequestHandler): def handle(self): rec = compression.Receiver(self.request) while not rec.IsDone(): data = rec.Current() with self.server.job_lock: self._WorkOnWorkPacket(data) rec.Advance() def _WorkOnWorkPacket(self, data): server_root = self.server.daemon.root v8_root = os.path.join(server_root, "v8") os.chdir(v8_root) packet = workpacket.WorkPacket.Unpack(data) self.ctx = packet.context self.ctx.shell_dir = os.path.join("out", "%s.%s" % (self.ctx.arch, self.ctx.mode)) if not os.path.isdir(self.ctx.shell_dir): os.makedirs(self.ctx.shell_dir) for binary in packet.binaries: if not self._UnpackBinary(binary, packet.pubkey_fingerprint): return if not self._CheckoutRevision(packet.base_revision): return if not self._ApplyPatch(packet.patch): return tests = packet.tests endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon) self._SendResponse() def _SendResponse(self, error_message=None): try: if error_message: compression.Send([[-1, error_message]], self.request) compression.Send(constants.END_OF_STREAM, self.request) return except Exception, e: pass # Peer is gone. There's nothing we can do. # Clean up. self._Call("git checkout -f") self._Call("git clean -f -d") self._Call("rm -rf %s" % self.ctx.shell_dir) def _UnpackBinary(self, binary, pubkey_fingerprint): binary_name = binary["name"] if binary_name == "libv8.so": libdir = os.path.join(self.ctx.shell_dir, "lib.target") if not os.path.exists(libdir): os.makedirs(libdir) target = os.path.join(libdir, binary_name) else: target = os.path.join(self.ctx.shell_dir, binary_name) pubkeyfile = "../trusted/%s.pem" % pubkey_fingerprint if not signatures.VerifySignature(target, binary["blob"], binary["sign"], pubkeyfile): self._SendResponse("Signature verification failed") return False os.chmod(target, stat.S_IRWXU) return True def _CheckoutRevision(self, base_svn_revision): get_hash_cmd = ( "git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'" % base_svn_revision) try: base_revision = subprocess.check_output(get_hash_cmd, shell=True) if not base_revision: raise ValueError except: self._Call("git fetch") try: base_revision = subprocess.check_output(get_hash_cmd, shell=True) if not base_revision: raise ValueError except: self._SendResponse("Base revision not found.") return False code = self._Call("git checkout -f %s" % base_revision) if code != 0: self._SendResponse("Error trying to check out base revision.") return False code = self._Call("git clean -f -d") if code != 0: self._SendResponse("Failed to reset checkout") return False return True def _ApplyPatch(self, patch): if not patch: return True # Just skip if the patch is empty. patchfilename = "_dtest_incoming_patch.patch" with open(patchfilename, "w") as f: f.write(patch) code = self._Call("git apply %s" % patchfilename) if code != 0: self._SendResponse("Error applying patch.") return False return True def _Call(self, cmd): return subprocess.call(cmd, shell=True) class WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): def __init__(self, daemon): address = (daemon.ip, constants.PEER_PORT) SocketServer.TCPServer.__init__(self, address, WorkHandler) self.job_lock = threading.Lock() self.daemon = daemon
gpl-2.0
ujfjhz/vnpy
docker/dockerTrader/ctaStrategy/strategy/strategyEmaDemo.py
5
11388
# encoding: UTF-8 """ 这里的Demo是一个最简单的策略实现,并未考虑太多实盘中的交易细节,如: 1. 委托价格超出涨跌停价导致的委托失败 2. 委托未成交,需要撤单后重新委托 3. 断网后恢复交易状态 4. 等等 这些点是作者选择特意忽略不去实现,因此想实盘的朋友请自己多多研究CTA交易的一些细节, 做到了然于胸后再去交易,对自己的money和时间负责。 也希望社区能做出一个解决了以上潜在风险的Demo出来。 """ from ..ctaBase import * from ..ctaTemplate import CtaTemplate ######################################################################## class EmaDemoStrategy(CtaTemplate): """双指数均线策略Demo""" className = 'EmaDemoStrategy' author = u'用Python的交易员' # 策略参数 fastK = 0.9 # 快速EMA参数 slowK = 0.1 # 慢速EMA参数 initDays = 10 # 初始化数据所用的天数 # 策略变量 bar = None barMinute = EMPTY_STRING fastMa = [] # 快速EMA均线数组 fastMa0 = EMPTY_FLOAT # 当前最新的快速EMA fastMa1 = EMPTY_FLOAT # 上一根的快速EMA slowMa = [] # 与上面相同 slowMa0 = EMPTY_FLOAT slowMa1 = EMPTY_FLOAT # 参数列表,保存了参数的名称 paramList = ['name', 'className', 'author', 'vtSymbol', 'fastK', 'slowK'] # 变量列表,保存了变量的名称 varList = ['inited', 'trading', 'pos', 'fastMa0', 'fastMa1', 'slowMa0', 'slowMa1'] #---------------------------------------------------------------------- def __init__(self, ctaEngine, setting): """Constructor""" super(EmaDemoStrategy, self).__init__(ctaEngine, setting) # 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建, # 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险, # 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读 # 策略时方便(更多是个编程习惯的选择) self.fastMa = [] self.slowMa = [] #---------------------------------------------------------------------- def onInit(self): """初始化策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略初始化') initData = self.loadBar(self.initDays) for bar in initData: self.onBar(bar) self.putEvent() #---------------------------------------------------------------------- def onStart(self): """启动策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略启动') self.putEvent() #---------------------------------------------------------------------- def onStop(self): """停止策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略停止') self.putEvent() #---------------------------------------------------------------------- def onTick(self, tick): """收到行情TICK推送(必须由用户继承实现)""" # 计算K线 tickMinute = tick.datetime.minute if tickMinute != self.barMinute: if self.bar: self.onBar(self.bar) bar = CtaBarData() bar.vtSymbol = tick.vtSymbol bar.symbol = tick.symbol bar.exchange = tick.exchange bar.open = tick.lastPrice bar.high = tick.lastPrice bar.low = tick.lastPrice bar.close = tick.lastPrice bar.date = tick.date bar.time = tick.time bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间 # 实盘中用不到的数据可以选择不算,从而加快速度 #bar.volume = tick.volume #bar.openInterest = tick.openInterest self.bar = bar # 这种写法为了减少一层访问,加快速度 self.barMinute = tickMinute # 更新当前的分钟 else: # 否则继续累加新的K线 bar = self.bar # 写法同样为了加快速度 bar.high = max(bar.high, tick.lastPrice) bar.low = min(bar.low, tick.lastPrice) bar.close = tick.lastPrice #---------------------------------------------------------------------- def onBar(self, bar): """收到Bar推送(必须由用户继承实现)""" # 计算快慢均线 if not self.fastMa0: self.fastMa0 = bar.close self.fastMa.append(self.fastMa0) else: self.fastMa1 = self.fastMa0 self.fastMa0 = bar.close * self.fastK + self.fastMa0 * (1 - self.fastK) self.fastMa.append(self.fastMa0) if not self.slowMa0: self.slowMa0 = bar.close self.slowMa.append(self.slowMa0) else: self.slowMa1 = self.slowMa0 self.slowMa0 = bar.close * self.slowK + self.slowMa0 * (1 - self.slowK) self.slowMa.append(self.slowMa0) # 判断买卖 crossOver = self.fastMa0>self.slowMa0 and self.fastMa1<self.slowMa1 # 金叉上穿 crossBelow = self.fastMa0<self.slowMa0 and self.fastMa1>self.slowMa1 # 死叉下穿 # 金叉和死叉的条件是互斥 # 所有的委托均以K线收盘价委托(这里有一个实盘中无法成交的风险,考虑添加对模拟市价单类型的支持) if crossOver: # 如果金叉时手头没有持仓,则直接做多 if self.pos == 0: self.buy(bar.close, 1) # 如果有空头持仓,则先平空,再做多 elif self.pos < 0: self.cover(bar.close, 1) self.buy(bar.close, 1) # 死叉和金叉相反 elif crossBelow: if self.pos == 0: self.short(bar.close, 1) elif self.pos > 0: self.sell(bar.close, 1) self.short(bar.close, 1) # 发出状态更新事件 self.putEvent() #---------------------------------------------------------------------- def onOrder(self, order): """收到委托变化推送(必须由用户继承实现)""" # 对于无需做细粒度委托控制的策略,可以忽略onOrder pass #---------------------------------------------------------------------- def onTrade(self, trade): """收到成交推送(必须由用户继承实现)""" # 对于无需做细粒度委托控制的策略,可以忽略onOrder pass ######################################################################################## class OrderManagementDemoStrategy(CtaTemplate): """基于tick级别细粒度撤单追单测试demo""" className = 'OrderManagementDemoStrategy' author = u'用Python的交易员' # 策略参数 initDays = 10 # 初始化数据所用的天数 # 策略变量 bar = None barMinute = EMPTY_STRING # 参数列表,保存了参数的名称 paramList = ['name', 'className', 'author', 'vtSymbol'] # 变量列表,保存了变量的名称 varList = ['inited', 'trading', 'pos'] #---------------------------------------------------------------------- def __init__(self, ctaEngine, setting): """Constructor""" super(OrderManagementDemoStrategy, self).__init__(ctaEngine, setting) self.lastOrder = None self.orderType = '' #---------------------------------------------------------------------- def onInit(self): """初始化策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略初始化') initData = self.loadBar(self.initDays) for bar in initData: self.onBar(bar) self.putEvent() #---------------------------------------------------------------------- def onStart(self): """启动策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略启动') self.putEvent() #---------------------------------------------------------------------- def onStop(self): """停止策略(必须由用户继承实现)""" self.writeCtaLog(u'双EMA演示策略停止') self.putEvent() #---------------------------------------------------------------------- def onTick(self, tick): """收到行情TICK推送(必须由用户继承实现)""" # 建立不成交买单测试单 if self.lastOrder == None: self.buy(tick.lastprice - 10.0, 1) # CTA委托类型映射 if self.lastOrder != None and self.lastOrder.direction == u'多' and self.lastOrder.offset == u'开仓': self.orderType = u'买开' elif self.lastOrder != None and self.lastOrder.direction == u'多' and self.lastOrder.offset == u'平仓': self.orderType = u'买平' elif self.lastOrder != None and self.lastOrder.direction == u'空' and self.lastOrder.offset == u'开仓': self.orderType = u'卖开' elif self.lastOrder != None and self.lastOrder.direction == u'空' and self.lastOrder.offset == u'平仓': self.orderType = u'卖平' # 不成交,即撤单,并追单 if self.lastOrder != None and self.lastOrder.status == u'未成交': self.cancelOrder(self.lastOrder.vtOrderID) self.lastOrder = None elif self.lastOrder != None and self.lastOrder.status == u'已撤销': # 追单并设置为不能成交 self.sendOrder(self.orderType, self.tick.lastprice - 10, 1) self.lastOrder = None #---------------------------------------------------------------------- def onBar(self, bar): """收到Bar推送(必须由用户继承实现)""" pass #---------------------------------------------------------------------- def onOrder(self, order): """收到委托变化推送(必须由用户继承实现)""" # 对于无需做细粒度委托控制的策略,可以忽略onOrder self.lastOrder = order #---------------------------------------------------------------------- def onTrade(self, trade): """收到成交推送(必须由用户继承实现)""" # 对于无需做细粒度委托控制的策略,可以忽略onOrder pass
mit
mccheung/kbengine
kbe/src/lib/python/Lib/tkinter/test/support.py
59
3128
import sys import tkinter import unittest from test.support import requires class AbstractTkTest: @classmethod def setUpClass(cls): cls._old_support_default_root = tkinter._support_default_root destroy_default_root() tkinter.NoDefaultRoot() cls.root = tkinter.Tk() cls.wantobjects = cls.root.wantobjects() # De-maximize main window. # Some window managers can maximize new windows. cls.root.wm_state('normal') try: cls.root.wm_attributes('-zoomed', False) except tkinter.TclError: pass @classmethod def tearDownClass(cls): cls.root.destroy() cls.root = None tkinter._default_root = None tkinter._support_default_root = cls._old_support_default_root def setUp(self): self.root.deiconify() def tearDown(self): for w in self.root.winfo_children(): w.destroy() self.root.withdraw() def destroy_default_root(): if getattr(tkinter, '_default_root', None): tkinter._default_root.update_idletasks() tkinter._default_root.destroy() tkinter._default_root = None def simulate_mouse_click(widget, x, y): """Generate proper events to click at the x, y position (tries to act like an X server).""" widget.event_generate('<Enter>', x=0, y=0) widget.event_generate('<Motion>', x=x, y=y) widget.event_generate('<ButtonPress-1>', x=x, y=y) widget.event_generate('<ButtonRelease-1>', x=x, y=y) import _tkinter tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.'))) def requires_tcl(*version): return unittest.skipUnless(tcl_version >= version, 'requires Tcl version >= ' + '.'.join(map(str, version))) _tk_patchlevel = None def get_tk_patchlevel(): global _tk_patchlevel if _tk_patchlevel is None: tcl = tkinter.Tcl() patchlevel = [] for x in tcl.call('info', 'patchlevel').split('.'): try: x = int(x, 10) except ValueError: x = -1 patchlevel.append(x) _tk_patchlevel = tuple(patchlevel) return _tk_patchlevel units = { 'c': 72 / 2.54, # centimeters 'i': 72, # inches 'm': 72 / 25.4, # millimeters 'p': 1, # points } def pixels_conv(value): return float(value[:-1]) * units[value[-1:]] def tcl_obj_eq(actual, expected): if actual == expected: return True if isinstance(actual, _tkinter.Tcl_Obj): if isinstance(expected, str): return str(actual) == expected if isinstance(actual, tuple): if isinstance(expected, tuple): return (len(actual) == len(expected) and all(tcl_obj_eq(act, exp) for act, exp in zip(actual, expected))) return False def widget_eq(actual, expected): if actual == expected: return True if isinstance(actual, (str, tkinter.Widget)): if isinstance(expected, (str, tkinter.Widget)): return str(actual) == str(expected) return False
lgpl-3.0
hamzehd/edx-platform
cms/djangoapps/contentstore/views/entrance_exam.py
2
11144
""" Entrance Exams view module -- handles all requests related to entrance exam management via Studio Intended to be utilized as an AJAX callback handler, versus a proper view/screen """ from functools import wraps import json import logging from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import ensure_csrf_cookie from django.http import HttpResponse, HttpResponseBadRequest from openedx.core.lib.js_utils import escape_json_dumps from contentstore.views.helpers import create_xblock, remove_entrance_exam_graders from contentstore.views.item import delete_item from models.settings.course_metadata import CourseMetadata from opaque_keys.edx.keys import CourseKey, UsageKey from opaque_keys import InvalidKeyError from student.auth import has_course_author_access from util import milestones_helpers from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from django.conf import settings from django.utils.translation import ugettext as _ __all__ = ['entrance_exam', ] log = logging.getLogger(__name__) # pylint: disable=invalid-name def _get_default_entrance_exam_minimum_pct(): """ Helper method to return the default value from configuration Converts integer values to decimals, since that what we use internally """ entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT) if entrance_exam_minimum_score_pct.is_integer(): entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100 return entrance_exam_minimum_score_pct # pylint: disable=missing-docstring def check_feature_enabled(feature_name): """ Ensure the specified feature is turned on. Return an HTTP 400 code if not. """ def _check_feature_enabled(view_func): def _decorator(request, *args, **kwargs): # Deny access if the entrance exam feature is disabled if not settings.FEATURES.get(feature_name, False): return HttpResponseBadRequest() return view_func(request, *args, **kwargs) return wraps(view_func)(_decorator) return _check_feature_enabled @login_required @ensure_csrf_cookie @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def entrance_exam(request, course_key_string): """ The restful handler for entrance exams. It allows retrieval of all the assets (as an HTML page), as well as uploading new assets, deleting assets, and changing the "locked" state of an asset. GET Retrieves the entrance exam module (metadata) for the specified course POST Adds an entrance exam module to the specified course. DELETE Removes the entrance exam from the course """ course_key = CourseKey.from_string(course_key_string) # Deny access if the user is valid, but they lack the proper object access privileges if not has_course_author_access(request.user, course_key): return HttpResponse(status=403) # Retrieve the entrance exam module for the specified course (returns 404 if none found) if request.method == 'GET': return _get_entrance_exam(request, course_key) # Create a new entrance exam for the specified course (returns 201 if created) elif request.method == 'POST': response_format = request.REQUEST.get('format', 'html') http_accept = request.META.get('http_accept') if response_format == 'json' or 'application/json' in http_accept: ee_min_score = request.POST.get('entrance_exam_minimum_score_pct', None) # if request contains empty value or none then save the default one. entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct() if ee_min_score != '' and ee_min_score is not None: entrance_exam_minimum_score_pct = float(ee_min_score) return create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct) return HttpResponse(status=400) # Remove the entrance exam module for the specified course (returns 204 regardless of existence) elif request.method == 'DELETE': return delete_entrance_exam(request, course_key) # No other HTTP verbs/methods are supported at this time else: return HttpResponse(status=405) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct): """ api method to create an entrance exam. First clean out any old entrance exams. """ _delete_entrance_exam(request, course_key) return _create_entrance_exam( request=request, course_key=course_key, entrance_exam_minimum_score_pct=entrance_exam_minimum_score_pct ) def _create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct=None): """ Internal workflow operation to create an entrance exam """ # Provide a default value for the minimum score percent if nothing specified if entrance_exam_minimum_score_pct is None: entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct() # Confirm the course exists course = modulestore().get_course(course_key) if course is None: return HttpResponse(status=400) # Create the entrance exam item (currently it's just a chapter) payload = { 'category': "chapter", 'display_name': _("Entrance Exam"), 'parent_locator': unicode(course.location), 'is_entrance_exam': True, 'in_entrance_exam': True, } parent_locator = unicode(course.location) created_block = create_xblock( parent_locator=parent_locator, user=request.user, category='chapter', display_name=_('Entrance Exam'), is_entrance_exam=True ) # Set the entrance exam metadata flags for this course # Reload the course so we don't overwrite the new child reference course = modulestore().get_course(course_key) metadata = { 'entrance_exam_enabled': True, 'entrance_exam_minimum_score_pct': unicode(entrance_exam_minimum_score_pct), 'entrance_exam_id': unicode(created_block.location), } CourseMetadata.update_from_dict(metadata, course, request.user) # Create the entrance exam section item. create_xblock( parent_locator=unicode(created_block.location), user=request.user, category='sequential', display_name=_('Entrance Exam - Subsection') ) add_entrance_exam_milestone(course.id, created_block) return HttpResponse(status=201) def _get_entrance_exam(request, course_key): # pylint: disable=W0613 """ Internal workflow operation to retrieve an entrance exam """ course = modulestore().get_course(course_key) if course is None: return HttpResponse(status=400) if not getattr(course, 'entrance_exam_id'): return HttpResponse(status=404) try: exam_key = UsageKey.from_string(course.entrance_exam_id) except InvalidKeyError: return HttpResponse(status=404) try: exam_descriptor = modulestore().get_item(exam_key) return HttpResponse( escape_json_dumps({'locator': unicode(exam_descriptor.location)}), status=200, content_type='application/json') except ItemNotFoundError: return HttpResponse(status=404) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def update_entrance_exam(request, course_key, exam_data): """ Operation to update course fields pertaining to entrance exams The update operation is not currently exposed directly via the API Because the operation is not exposed directly, we do not return a 200 response But we do return a 400 in the error case because the workflow is executed in a request context """ course = modulestore().get_course(course_key) if course: metadata = exam_data CourseMetadata.update_from_dict(metadata, course, request.user) @check_feature_enabled(feature_name='ENTRANCE_EXAMS') def delete_entrance_exam(request, course_key): """ api method to delete an entrance exam """ return _delete_entrance_exam(request=request, course_key=course_key) def _delete_entrance_exam(request, course_key): """ Internal workflow operation to remove an entrance exam """ store = modulestore() course = store.get_course(course_key) if course is None: return HttpResponse(status=400) remove_entrance_exam_milestone_reference(request, course_key) # Reset the entrance exam flags on the course # Reload the course so we have the latest state course = store.get_course(course_key) if getattr(course, 'entrance_exam_id'): metadata = { 'entrance_exam_enabled': False, 'entrance_exam_minimum_score_pct': None, 'entrance_exam_id': None, } CourseMetadata.update_from_dict(metadata, course, request.user) # Clean up any pre-existing entrance exam graders remove_entrance_exam_graders(course_key, request.user) return HttpResponse(status=204) def add_entrance_exam_milestone(course_id, x_block): # Add an entrance exam milestone if one does not already exist for given xBlock # As this is a standalone method for entrance exam, We should check that given xBlock should be an entrance exam. if x_block.is_entrance_exam: namespace_choices = milestones_helpers.get_namespace_choices() milestone_namespace = milestones_helpers.generate_milestone_namespace( namespace_choices.get('ENTRANCE_EXAM'), course_id ) milestones = milestones_helpers.get_milestones(milestone_namespace) if len(milestones): milestone = milestones[0] else: description = 'Autogenerated during {} entrance exam creation.'.format(unicode(course_id)) milestone = milestones_helpers.add_milestone({ 'name': _('Completed Course Entrance Exam'), 'namespace': milestone_namespace, 'description': description }) relationship_types = milestones_helpers.get_milestone_relationship_types() milestones_helpers.add_course_milestone( unicode(course_id), relationship_types['REQUIRES'], milestone ) milestones_helpers.add_course_content_milestone( unicode(course_id), unicode(x_block.location), relationship_types['FULFILLS'], milestone ) def remove_entrance_exam_milestone_reference(request, course_key): """ Remove content reference for entrance exam. """ course_children = modulestore().get_items( course_key, qualifiers={'category': 'chapter'} ) for course_child in course_children: if course_child.is_entrance_exam: delete_item(request, course_child.scope_ids.usage_id) milestones_helpers.remove_content_references(unicode(course_child.scope_ids.usage_id))
agpl-3.0
bittlingmayer/Theano-Lights
models/lm_draw.py
11
4829
import theano import theano.tensor as T from theano.sandbox.rng_mrg import MRG_RandomStreams from theano.tensor.nnet.conv import conv2d from theano.tensor.signal.downsample import max_pool_2d from theano.tensor.shared_randomstreams import RandomStreams import numpy as np from toolbox import * from modelbase import * class LM_draw(ModelLMBase): def __init__(self, data, hp): super(LM_draw, self).__init__(self.__class__.__name__, data, hp) self.n_h = 1024 self.n_zpt = 256 self.dropout = 0.0 self.params = Parameters() self.hiddenstates = Parameters() n_tokens = self.data['n_tokens'] gates = 4 with self.hiddenstates: b1_h = shared_zeros((self.hp.batch_size, self.n_h)) b1_c = shared_zeros((self.hp.batch_size, self.n_h)) b2_h = shared_zeros((self.hp.batch_size, self.n_h)) b2_c = shared_zeros((self.hp.batch_size, self.n_h)) if hp.load_model and os.path.isfile(self.filename): self.params.load(self.filename) else: with self.params: W_emb = shared_normal((n_tokens, self.n_h), scale=hp.init_scale) W1 = shared_normal((self.n_h*2, self.n_h*gates), scale=hp.init_scale*1.5) V1 = shared_normal((self.n_h, self.n_h*gates), scale=hp.init_scale*1.5) b1 = shared_zeros((self.n_h*gates,)) Wmu = shared_normal((self.n_h, self.n_zpt), scale=hp.init_scale) Wsi = shared_normal((self.n_h, self.n_zpt), scale=hp.init_scale) bmu = shared_zeros((self.n_zpt,)) bsi = shared_zeros((self.n_zpt,)) W2 = shared_normal((self.n_zpt, self.n_h*gates), scale=hp.init_scale*1.5) V2 = shared_normal((self.n_h, self.n_h*gates), scale=hp.init_scale*1.5) b2 = shared_zeros((self.n_h*gates,)) def lstm(X, h, c, W, U, b): g_on = T.dot(X,W) + T.dot(h,U) + b i_on = T.nnet.sigmoid(g_on[:,:self.n_h]) f_on = T.nnet.sigmoid(g_on[:,self.n_h:2*self.n_h]) o_on = T.nnet.sigmoid(g_on[:,2*self.n_h:3*self.n_h]) c = f_on * c + i_on * T.tanh(g_on[:,3*self.n_h:]) h = o_on * T.tanh(c) return h, c def model(x, p, p_dropout, noise): input_size = x.shape[1] h0 = p.W_emb[x] # (seq_len, batch_size, emb_size) h0 = dropout(h0, p_dropout) cost, h1, c1, h2, c2 = [0., b1_h, b1_c, b2_h, b2_c] eps = srnd.normal((self.hp.seq_size, input_size, self.n_zpt), dtype=theano.config.floatX) for t in xrange(0, self.hp.seq_size): if t >= self.hp.warmup_size: pyx = softmax(T.dot(h2, T.transpose(p.W_emb))) cost += T.sum(T.nnet.categorical_crossentropy(pyx, theano_one_hot(x[t], n_tokens))) h_x = concatenate([h0[t], h2], axis=1) h1, c1 = lstm(h_x, h1, c1, p.W1, p.V1, p.b1) h1 = dropout(h1, p_dropout) mu_encoder = T.dot(h1, p.Wmu) + p.bmu if noise: log_sigma_encoder = 0.5*(T.dot(h1, p.Wsi) + p.bsi) cost += -0.5* T.sum(1 + 2*log_sigma_encoder - mu_encoder**2 - T.exp(2*log_sigma_encoder)) * 0.01 z = mu_encoder + eps[t]*T.exp(log_sigma_encoder) else: z = mu_encoder h2, c2 = lstm(z, h2, c2, p.W2, p.V2, p.b2) h2 = dropout(h2, p_dropout) h_updates = [(b1_h, h1), (b1_c, c1), (b2_h, h2), (b2_c, c2)] return cost, h_updates cost, h_updates = model(self.X, self.params, self.dropout, True) te_cost, te_h_updates = model(self.X, self.params, 0.0, False) def generate(seed_idx, p): spx = T.zeros((self.hp.seq_size, n_tokens)) h1, c1, h2, c2 = [T.zeros((self.n_h)), T.zeros((self.n_h)), T.zeros((self.n_h)), T.zeros((self.n_h))] spx = T.set_subtensor(spx[0, seed_idx], 1) #for t in xrange(0, self.hp.seq_size): # if t > 0: # pyx = softmax(T.dot(h2, T.transpose(p.W_emb))) # spx = T.set_subtensor(spx[t,:], srnd.multinomial(pvals=pyx)[0]) # h1, c1 = lstm(p.W_emb[T.cast(spx[t], dtype='int32')], h1, c1, p.W1, p.V1, p.b1) # h2, c2 = lstm(h1, h2, c2, p.W2, p.V2, p.b2) return spx spx = generate(self.seed_idx, self.params) self.compile(cost, te_cost, h_updates, te_h_updates, spx)
mit
sg0/Elemental
include/El/core/Element.py
1
18575
# # Copyright (c) 2009-2015, Jack Poulson # All rights reserved. # # This file is part of Elemental and is under the BSD 2-Clause License, # which can be found in the LICENSE file in the root directory, or at # http://opensource.org/licenses/BSD-2-Clause # from environment import * # Basic element manipulation # ========================== # Return the complex argument of a scalar # --------------------------------------- lib.ElArg_s.argtypes = [sType,POINTER(sType)] lib.ElArg_s.restype = c_uint lib.ElArg_d.argtypes = [dType,POINTER(dType)] lib.ElArg_d.restype = c_uint lib.ElArg_c.argtypes = [cType,POINTER(sType)] lib.ElArg_c.restype = c_uint lib.ElArg_z.argtypes = [zType,POINTER(dType)] lib.ElArg_z.restype = c_uint def Arg(alpha): if type(alpha) is sType: result = sType() lib.ElArg_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElArg_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = sType() lib.ElArg_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = dType() lib.ElArg_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') # Construct a complex number from its polar coordinates # ----------------------------------------------------- lib.ElComplexFromPolar_c.argtypes = [sType,sType,POINTER(cType)] lib.ElComplexFromPolar_c.restype = c_uint lib.ElComplexFromPolar_z.argtyped = [dType,dType,POINTER(zType)] lib.ElComplexFromPolar_z.restype = c_uint def ComplexFromPolar(r,theta): if type(r) is sType: result = cType() lib.ElComplexFromPolar_c(r,theta,pointer(result)) return result elif type(r) is dType: result = zType() lib.ElComplexFromPolar_z(r,theta,pointer(result)) return result else: raise Exception('Unsupported datatype') # Magnitude and sign # ================== lib.ElAbs_i.argtypes = [iType,POINTER(iType)] lib.ElAbs_i.restype = c_uint lib.ElAbs_s.argtypes = [sType,POINTER(sType)] lib.ElAbs_s.restype = c_uint lib.ElAbs_d.argtypes = [dType,POINTER(dType)] lib.ElAbs_d.restype = c_uint lib.ElAbs_c.argtypes = [cType,POINTER(sType)] lib.ElAbs_c.restype = c_uint lib.ElAbs_z.argtypes = [zType,POINTER(dType)] lib.ElAbs_z.restype = c_uint def Abs(alpha): if type(alpha) is iType: result = iType() lib.ElAbs_i(alpha,pointer(result)) return result elif type(alpha) is sType: result = sType() lib.ElAbs_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAbs_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = sType() lib.ElAbs_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = dType() lib.ElAbs_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElSafeAbs_c.argtypes = [cType,POINTER(sType)] lib.ElSafeAbs_c.restype = c_uint lib.ElSafeAbs_z.argtypes = [zType,POINTER(dType)] lib.ElSafeAbs_z.restype = c_uint def SafeAbs(alpha): if type(alpha) is iType: result = iType() lib.ElAbs_i(alpha,pointer(result)) return result elif type(alpha) is sType: result = sType() lib.ElAbs_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAbs_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = sType() lib.ElSafeAbs_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = dType() lib.ElSafeAbs_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElFastAbs_c.argtypes = [cType,POINTER(sType)] lib.ElFastAbs_c.restype = c_uint lib.ElFastAbs_z.argtypes = [zType,POINTER(dType)] lib.ElFastAbs_z.restype = c_uint def FastAbs(alpha): if type(alpha) is iType: result = iType() lib.ElAbs_i(alpha,pointer(result)) return result elif type(alpha) is sType: result = sType() lib.ElAbs_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAbs_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = sType() lib.ElFastAbs_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = dType() lib.ElFastAbs_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElSgn_i.argtypes = [iType,bType,POINTER(iType)] lib.ElSgn_i.restype = c_uint lib.ElSgn_s.argtypes = [sType,bType,POINTER(sType)] lib.ElSgn_s.restype = c_uint lib.ElSgn_d.argtypes = [dType,bType,POINTER(dType)] lib.ElSgn_d.restype = c_uint def Sgn(alpha,symm=True): if type(alpha) is iType: result = iType() lib.ElSgn_i(alpha,symm,pointer(result)) return result elif type(alpha) is sType: result = sType() lib.ElSgn_s(alpha,symm,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElSgn_d(alpha,symm,pointer(result)) return result else: raise Exception('Unsupported datatype') # Exponentiation # ============== lib.ElExp_s.argtypes = [sType,POINTER(sType)] lib.ElExp_s.restype = c_uint lib.ElExp_d.argtypes = [dType,POINTER(dType)] lib.ElExp_d.restype = c_uint lib.ElExp_c.argtypes = [cType,POINTER(cType)] lib.ElExp_c.restype = c_uint lib.ElExp_z.argtypes = [zType,POINTER(zType)] lib.ElExp_z.restype = c_uint def Exp(alpha): if type(alpha) is sType: result = sType() lib.ElExp_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElExp_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElExp_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElExp_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElPow_s.argtypes = [sType,sType,POINTER(sType)] lib.ElPow_s.restype = c_uint lib.ElPow_d.argtypes = [dType,dType,POINTER(dType)] lib.ElPow_d.restype = c_uint lib.ElPow_c.argtypes = [cType,cType,POINTER(cType)] lib.ElPow_c.restype = c_uint lib.ElPow_z.argtypes = [zType,zType,POINTER(zType)] lib.ElPow_z.restype = c_uint def Pow(alpha,beta): if type(alpha) is sType: result = sType() lib.ElPow_s(alpha,beta,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElPow_d(alpha,beta,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElPow_c(alpha,beta,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElPow_z(alpha,beta,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElLog_s.argtypes = [sType,POINTER(sType)] lib.ElLog_s.restype = c_uint lib.ElLog_d.argtypes = [dType,POINTER(dType)] lib.ElLog_d.restype = c_uint lib.ElLog_c.argtypes = [cType,POINTER(cType)] lib.ElLog_c.restype = c_uint lib.ElLog_z.argtypes = [zType,POINTER(zType)] lib.ElLog_z.restype = c_uint def Log(alpha): if type(alpha) is sType: result = sType() lib.ElLog_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElLog_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElLog_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElLog_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElSqrt_s.argtypes = [sType,POINTER(sType)] lib.ElSqrt_s.restype = c_uint lib.ElSqrt_d.argtypes = [dType,POINTER(dType)] lib.ElSqrt_d.restype = c_uint lib.ElSqrt_c.argtypes = [cType,POINTER(cType)] lib.ElSqrt_c.restype = c_uint lib.ElSqrt_z.argtypes = [zType,POINTER(zType)] lib.ElSqrt_z.restype = c_uint def Sqrt(alpha): if type(alpha) is sType: result = sType() lib.ElSqrt_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElSqrt_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElSqrt_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElSqrt_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') # Trigonometric functions # ======================= lib.ElCos_s.argtypes = [sType,POINTER(sType)] lib.ElCos_s.restype = c_uint lib.ElCos_d.argtypes = [dType,POINTER(dType)] lib.ElCos_d.restype = c_uint lib.ElCos_c.argtypes = [cType,POINTER(cType)] lib.ElCos_c.restype = c_uint lib.ElCos_z.argtypes = [zType,POINTER(zType)] lib.ElCos_z.restype = c_uint def Cos(alpha): if type(alpha) is sType: result = sType() lib.ElCos_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElCos_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElCos_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElCos_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElSin_s.argtypes = [sType,POINTER(sType)] lib.ElSin_s.restype = c_uint lib.ElSin_d.argtypes = [dType,POINTER(dType)] lib.ElSin_d.restype = c_uint lib.ElSin_c.argtypes = [cType,POINTER(cType)] lib.ElSin_c.restype = c_uint lib.ElSin_z.argtypes = [zType,POINTER(zType)] lib.ElSin_z.restype = c_uint def Sin(alpha): if type(alpha) is sType: result = sType() lib.ElSin_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElSin_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElSin_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElSin_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElTan_s.argtypes = [sType,POINTER(sType)] lib.ElTan_s.restype = c_uint lib.ElTan_d.argtypes = [dType,POINTER(dType)] lib.ElTan_d.restype = c_uint lib.ElTan_c.argtypes = [cType,POINTER(cType)] lib.ElTan_c.restype = c_uint lib.ElTan_z.argtypes = [zType,POINTER(zType)] lib.ElTan_z.restype = c_uint def Tan(alpha): if type(alpha) is sType: result = sType() lib.ElTan_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElTan_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElTan_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElTan_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAcos_s.argtypes = [sType,POINTER(sType)] lib.ElAcos_s.restype = c_uint lib.ElAcos_d.argtypes = [dType,POINTER(dType)] lib.ElAcos_d.restype = c_uint lib.ElAcos_c.argtypes = [cType,POINTER(cType)] lib.ElAcos_c.restype = c_uint lib.ElAcos_z.argtypes = [zType,POINTER(zType)] lib.ElAcos_z.restype = c_uint def Acos(alpha): if type(alpha) is sType: result = sType() lib.ElAcos_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAcos_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAcos_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAcos_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAsin_s.argtypes = [sType,POINTER(sType)] lib.ElAsin_s.restype = c_uint lib.ElAsin_d.argtypes = [dType,POINTER(dType)] lib.ElAsin_d.restype = c_uint lib.ElAsin_c.argtypes = [cType,POINTER(cType)] lib.ElAsin_c.restype = c_uint lib.ElAsin_z.argtypes = [zType,POINTER(zType)] lib.ElAsin_z.restype = c_uint def Asin(alpha): if type(alpha) is sType: result = sType() lib.ElAsin_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAsin_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAsin_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAsin_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAtan_s.argtypes = [sType,POINTER(sType)] lib.ElAtan_s.restype = c_uint lib.ElAtan_d.argtypes = [dType,POINTER(dType)] lib.ElAtan_d.restype = c_uint lib.ElAtan_c.argtypes = [cType,POINTER(cType)] lib.ElAtan_c.restype = c_uint lib.ElAtan_z.argtypes = [zType,POINTER(zType)] lib.ElAtan_z.restype = c_uint def Atan(alpha): if type(alpha) is sType: result = sType() lib.ElAtan_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAtan_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAtan_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAtan_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAtan2_s.argtypes = [sType,sType,POINTER(sType)] lib.ElAtan2_s.restype = c_uint lib.ElAtan2_d.argtypes = [dType,dType,POINTER(dType)] lib.ElAtan2_d.restype = c_uint def Atan2(y,x): if type(y) is sType: result = sType() lib.ElAtan2_s(y,x,pointer(result)) return result elif type(y) is dType: result = dType() lib.ElAtan2_d(y,x,pointer(result)) return result else: raise Exception('Unsupported datatype') # Hyperbolic functions # ==================== lib.ElCosh_s.argtypes = [sType,POINTER(sType)] lib.ElCosh_s.restype = c_uint lib.ElCosh_d.argtypes = [dType,POINTER(dType)] lib.ElCosh_d.restype = c_uint lib.ElCosh_c.argtypes = [cType,POINTER(cType)] lib.ElCosh_c.restype = c_uint lib.ElCosh_z.argtypes = [zType,POINTER(zType)] lib.ElCosh_z.restype = c_uint def Cosh(alpha): if type(alpha) is sType: result = sType() lib.ElCosh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElCosh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElCosh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElCosh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElSinh_s.argtypes = [sType,POINTER(sType)] lib.ElSinh_s.restype = c_uint lib.ElSinh_d.argtypes = [dType,POINTER(dType)] lib.ElSinh_d.restype = c_uint lib.ElSinh_c.argtypes = [cType,POINTER(cType)] lib.ElSinh_c.restype = c_uint lib.ElSinh_z.argtypes = [zType,POINTER(zType)] lib.ElSinh_z.restype = c_uint def Sinh(alpha): if type(alpha) is sType: result = sType() lib.ElSinh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElSinh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElSinh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElSinh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElTanh_s.argtypes = [sType,POINTER(sType)] lib.ElTanh_s.restype = c_uint lib.ElTanh_d.argtypes = [dType,POINTER(dType)] lib.ElTanh_d.restype = c_uint lib.ElTanh_c.argtypes = [cType,POINTER(cType)] lib.ElTanh_c.restype = c_uint lib.ElTanh_z.argtypes = [zType,POINTER(zType)] lib.ElTanh_z.restype = c_uint def Tanh(alpha): if type(alpha) is sType: result = sType() lib.ElTanh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElTanh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElTanh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElTanh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAcosh_s.argtypes = [sType,POINTER(sType)] lib.ElAcosh_s.restype = c_uint lib.ElAcosh_d.argtypes = [dType,POINTER(dType)] lib.ElAcosh_d.restype = c_uint lib.ElAcosh_c.argtypes = [cType,POINTER(cType)] lib.ElAcosh_c.restype = c_uint lib.ElAcosh_z.argtypes = [zType,POINTER(zType)] lib.ElAcosh_z.restype = c_uint def Acosh(alpha): if type(alpha) is sType: result = sType() lib.ElAcosh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAcosh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAcosh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAcosh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAsinh_s.argtypes = [sType,POINTER(sType)] lib.ElAsinh_s.restype = c_uint lib.ElAsinh_d.argtypes = [dType,POINTER(dType)] lib.ElAsinh_d.restype = c_uint lib.ElAsinh_c.argtypes = [cType,POINTER(cType)] lib.ElAsinh_c.restype = c_uint lib.ElAsinh_z.argtypes = [zType,POINTER(zType)] lib.ElAsinh_z.restype = c_uint def Asinh(alpha): if type(alpha) is sType: result = sType() lib.ElAsinh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAsinh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAsinh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAsinh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype') lib.ElAtanh_s.argtypes = [sType,POINTER(sType)] lib.ElAtanh_s.restype = c_uint lib.ElAtanh_d.argtypes = [dType,POINTER(dType)] lib.ElAtanh_d.restype = c_uint lib.ElAtanh_c.argtypes = [cType,POINTER(cType)] lib.ElAtanh_c.restype = c_uint lib.ElAtanh_z.argtypes = [zType,POINTER(zType)] lib.ElAtanh_z.restype = c_uint def Atanh(alpha): if type(alpha) is sType: result = sType() lib.ElAtanh_s(alpha,pointer(result)) return result elif type(alpha) is dType: result = dType() lib.ElAtanh_d(alpha,pointer(result)) return result elif type(alpha) is cType: result = cType() lib.ElAtanh_c(alpha,pointer(result)) return result elif type(alpha) is zType: result = zType() lib.ElAtanh_z(alpha,pointer(result)) return result else: raise Exception('Unsupported datatype')
bsd-3-clause
clemkoa/scikit-learn
sklearn/cluster/affinity_propagation_.py
15
13973
"""Affinity Propagation clustering algorithm.""" # Author: Alexandre Gramfort alexandre.gramfort@inria.fr # Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause import numpy as np import warnings from sklearn.exceptions import ConvergenceWarning from ..base import BaseEstimator, ClusterMixin from ..utils import as_float_array, check_array from ..utils.validation import check_is_fitted from ..metrics import euclidean_distances from ..metrics import pairwise_distances_argmin def _equal_similarities_and_preferences(S, preference): def all_equal_preferences(): return np.all(preference == preference.flat[0]) def all_equal_similarities(): # Create mask to ignore diagonal of S mask = np.ones(S.shape, dtype=bool) np.fill_diagonal(mask, 0) return np.all(S[mask].flat == S[mask].flat[0]) return all_equal_preferences() and all_equal_similarities() def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200, damping=0.5, copy=True, verbose=False, return_n_iter=False): """Perform Affinity Propagation Clustering of data Read more in the :ref:`User Guide <affinity_propagation>`. Parameters ---------- S : array-like, shape (n_samples, n_samples) Matrix of similarities between points preference : array-like, shape (n_samples,) or float, optional Preferences for each point - points with larger values of preferences are more likely to be chosen as exemplars. The number of exemplars, i.e. of clusters, is influenced by the input preferences value. If the preferences are not passed as arguments, they will be set to the median of the input similarities (resulting in a moderate number of clusters). For a smaller amount of clusters, this can be set to the minimum value of the similarities. convergence_iter : int, optional, default: 15 Number of iterations with no change in the number of estimated clusters that stops the convergence. max_iter : int, optional, default: 200 Maximum number of iterations damping : float, optional, default: 0.5 Damping factor between 0.5 and 1. copy : boolean, optional, default: True If copy is False, the affinity matrix is modified inplace by the algorithm, for memory efficiency verbose : boolean, optional, default: False The verbosity level return_n_iter : bool, default False Whether or not to return the number of iterations. Returns ------- cluster_centers_indices : array, shape (n_clusters,) index of clusters centers labels : array, shape (n_samples,) cluster labels for each point n_iter : int number of iterations run. Returned only if `return_n_iter` is set to True. Notes ----- For an example, see :ref:`examples/cluster/plot_affinity_propagation.py <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`. When the algorithm does not converge, it returns an empty array as ``cluster_center_indices`` and ``-1`` as label for each training sample. When all training samples have equal similarities and equal preferences, the assignment of cluster centers and labels depends on the preference. If the preference is smaller than the similarities, a single cluster center and label ``0`` for every sample will be returned. Otherwise, every training sample becomes its own cluster center and is assigned a unique label. References ---------- Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007 """ S = as_float_array(S, copy=copy) n_samples = S.shape[0] if S.shape[0] != S.shape[1]: raise ValueError("S must be a square array (shape=%s)" % repr(S.shape)) if preference is None: preference = np.median(S) if damping < 0.5 or damping >= 1: raise ValueError('damping must be >= 0.5 and < 1') preference = np.array(preference) if (n_samples == 1 or _equal_similarities_and_preferences(S, preference)): # It makes no sense to run the algorithm in this case, so return 1 or # n_samples clusters, depending on preferences warnings.warn("All samples have mutually equal similarities. " "Returning arbitrary cluster center(s).") if preference.flat[0] >= S.flat[n_samples - 1]: return ((np.arange(n_samples), np.arange(n_samples), 0) if return_n_iter else (np.arange(n_samples), np.arange(n_samples))) else: return ((np.array([0]), np.array([0] * n_samples), 0) if return_n_iter else (np.array([0]), np.array([0] * n_samples))) random_state = np.random.RandomState(0) # Place preference on the diagonal of S S.flat[::(n_samples + 1)] = preference A = np.zeros((n_samples, n_samples)) R = np.zeros((n_samples, n_samples)) # Initialize messages # Intermediate results tmp = np.zeros((n_samples, n_samples)) # Remove degeneracies S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) * random_state.randn(n_samples, n_samples)) # Execute parallel affinity propagation updates e = np.zeros((n_samples, convergence_iter)) ind = np.arange(n_samples) for it in range(max_iter): # tmp = A + S; compute responsibilities np.add(A, S, tmp) I = np.argmax(tmp, axis=1) Y = tmp[ind, I] # np.max(A + S, axis=1) tmp[ind, I] = -np.inf Y2 = np.max(tmp, axis=1) # tmp = Rnew np.subtract(S, Y[:, None], tmp) tmp[ind, I] = S[ind, I] - Y2 # Damping tmp *= 1 - damping R *= damping R += tmp # tmp = Rp; compute availabilities np.maximum(R, 0, tmp) tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1] # tmp = -Anew tmp -= np.sum(tmp, axis=0) dA = np.diag(tmp).copy() tmp.clip(0, np.inf, tmp) tmp.flat[::n_samples + 1] = dA # Damping tmp *= 1 - damping A *= damping A -= tmp # Check for convergence E = (np.diag(A) + np.diag(R)) > 0 e[:, it % convergence_iter] = E K = np.sum(E, axis=0) if it >= convergence_iter: se = np.sum(e, axis=1) unconverged = (np.sum((se == convergence_iter) + (se == 0)) != n_samples) if (not unconverged and (K > 0)) or (it == max_iter): if verbose: print("Converged after %d iterations." % it) break else: if verbose: print("Did not converge") I = np.flatnonzero(E) K = I.size # Identify exemplars if K > 0: c = np.argmax(S[:, I], axis=1) c[I] = np.arange(K) # Identify clusters # Refine the final set of exemplars and clusters and return results for k in range(K): ii = np.where(c == k)[0] j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0)) I[k] = ii[j] c = np.argmax(S[:, I], axis=1) c[I] = np.arange(K) labels = I[c] # Reduce labels to a sorted, gapless, list cluster_centers_indices = np.unique(labels) labels = np.searchsorted(cluster_centers_indices, labels) else: warnings.warn("Affinity propagation did not converge, this model " "will not have any cluster centers.", ConvergenceWarning) labels = np.array([-1] * n_samples) cluster_centers_indices = [] if return_n_iter: return cluster_centers_indices, labels, it + 1 else: return cluster_centers_indices, labels ############################################################################### class AffinityPropagation(BaseEstimator, ClusterMixin): """Perform Affinity Propagation Clustering of data. Read more in the :ref:`User Guide <affinity_propagation>`. Parameters ---------- damping : float, optional, default: 0.5 Damping factor (between 0.5 and 1) is the extent to which the current value is maintained relative to incoming values (weighted 1 - damping). This in order to avoid numerical oscillations when updating these values (messages). max_iter : int, optional, default: 200 Maximum number of iterations. convergence_iter : int, optional, default: 15 Number of iterations with no change in the number of estimated clusters that stops the convergence. copy : boolean, optional, default: True Make a copy of input data. preference : array-like, shape (n_samples,) or float, optional Preferences for each point - points with larger values of preferences are more likely to be chosen as exemplars. The number of exemplars, ie of clusters, is influenced by the input preferences value. If the preferences are not passed as arguments, they will be set to the median of the input similarities. affinity : string, optional, default=``euclidean`` Which affinity to use. At the moment ``precomputed`` and ``euclidean`` are supported. ``euclidean`` uses the negative squared euclidean distance between points. verbose : boolean, optional, default: False Whether to be verbose. Attributes ---------- cluster_centers_indices_ : array, shape (n_clusters,) Indices of cluster centers cluster_centers_ : array, shape (n_clusters, n_features) Cluster centers (if affinity != ``precomputed``). labels_ : array, shape (n_samples,) Labels of each point affinity_matrix_ : array, shape (n_samples, n_samples) Stores the affinity matrix used in ``fit``. n_iter_ : int Number of iterations taken to converge. Notes ----- For an example, see :ref:`examples/cluster/plot_affinity_propagation.py <sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`. The algorithmic complexity of affinity propagation is quadratic in the number of points. When ``fit`` does not converge, ``cluster_centers_`` becomes an empty array and all training samples will be labelled as ``-1``. In addition, ``predict`` will then label every sample as ``-1``. When all training samples have equal similarities and equal preferences, the assignment of cluster centers and labels depends on the preference. If the preference is smaller than the similarities, ``fit`` will result in a single cluster center and label ``0`` for every sample. Otherwise, every training sample becomes its own cluster center and is assigned a unique label. References ---------- Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007 """ def __init__(self, damping=.5, max_iter=200, convergence_iter=15, copy=True, preference=None, affinity='euclidean', verbose=False): self.damping = damping self.max_iter = max_iter self.convergence_iter = convergence_iter self.copy = copy self.verbose = verbose self.preference = preference self.affinity = affinity @property def _pairwise(self): return self.affinity == "precomputed" def fit(self, X, y=None): """ Create affinity matrix from negative euclidean distances, then apply affinity propagation clustering. Parameters ---------- X : array-like, shape (n_samples, n_features) or (n_samples, n_samples) Data matrix or, if affinity is ``precomputed``, matrix of similarities / affinities. y : Ignored """ X = check_array(X, accept_sparse='csr') if self.affinity == "precomputed": self.affinity_matrix_ = X elif self.affinity == "euclidean": self.affinity_matrix_ = -euclidean_distances(X, squared=True) else: raise ValueError("Affinity must be 'precomputed' or " "'euclidean'. Got %s instead" % str(self.affinity)) self.cluster_centers_indices_, self.labels_, self.n_iter_ = \ affinity_propagation( self.affinity_matrix_, self.preference, max_iter=self.max_iter, convergence_iter=self.convergence_iter, damping=self.damping, copy=self.copy, verbose=self.verbose, return_n_iter=True) if self.affinity != "precomputed": self.cluster_centers_ = X[self.cluster_centers_indices_].copy() return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data to predict. Returns ------- labels : array, shape (n_samples,) Index of the cluster each sample belongs to. """ check_is_fitted(self, "cluster_centers_indices_") if not hasattr(self, "cluster_centers_"): raise ValueError("Predict method is not supported when " "affinity='precomputed'.") if self.cluster_centers_.size > 0: return pairwise_distances_argmin(X, self.cluster_centers_) else: warnings.warn("This model does not have any cluster centers " "because affinity propagation did not converge. " "Labeling every sample as '-1'.") return np.array([-1] * X.shape[0])
bsd-3-clause
usc-isi/extra-specs
nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py
19
7062
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. # Copyright 2011 University of Southern California # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import webob from nova.api.openstack.compute.contrib import volumetypes from nova import test from nova.tests.api.openstack import fakes import nova.wsgi def return_create_volume_type_extra_specs(context, volume_type_id, extra_specs): return stub_volume_type_extra_specs() def return_volume_type_extra_specs(context, volume_type_id): return stub_volume_type_extra_specs() def return_empty_volume_type_extra_specs(context, volume_type_id): return {} def delete_volume_type_extra_specs(context, volume_type_id, key): pass def stub_volume_type_extra_specs(): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return specs class VolumeTypesExtraSpecsTest(test.TestCase): def setUp(self): super(VolumeTypesExtraSpecsTest, self).setUp() fakes.stub_out_key_pair_funcs(self.stubs) self.api_path = '/v2/fake/os-volume-types/1/extra_specs' self.controller = volumetypes.VolumeTypeExtraSpecsController() def test_index(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.index(req, 1) self.assertEqual('value1', res_dict['extra_specs']['key1']) def test_index_no_data(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_get', return_empty_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.index(req, 1) self.assertEqual(0, len(res_dict['extra_specs'])) def test_show(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key5') res_dict = self.controller.show(req, 1, 'key5') self.assertEqual('value5', res_dict['key5']) def test_show_spec_not_found(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_get', return_empty_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key6') def test_delete(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_delete', delete_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key5') self.controller.delete(req, 1, 'key5') def test_create(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"extra_specs": {"key1": "value1"}} req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.create(req, 1, body) self.assertEqual('value1', res_dict['extra_specs']['key1']) def test_create_empty_body(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, '') def test_update_item(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/key1') res_dict = self.controller.update(req, 1, 'key1', body) self.assertEqual('value1', res_dict['key1']) def test_update_item_empty_body(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', '') def test_update_item_too_many_keys(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1", "key2": "value2"} req = fakes.HTTPRequest.blank(self.api_path + '/key1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(nova.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/bad') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) class VolumeTypeExtraSpecsSerializerTest(test.TestCase): def test_index_create_serializer(self): serializer = volumetypes.VolumeTypeExtraSpecsTemplate() # Just getting some input data extra_specs = stub_volume_type_extra_specs() text = serializer.serialize(dict(extra_specs=extra_specs)) print text tree = etree.fromstring(text) self.assertEqual('extra_specs', tree.tag) self.assertEqual(len(extra_specs), len(tree)) seen = set(extra_specs.keys()) for child in tree: self.assertTrue(child.tag in seen) self.assertEqual(extra_specs[child.tag], child.text) seen.remove(child.tag) self.assertEqual(len(seen), 0) def test_update_show_serializer(self): serializer = volumetypes.VolumeTypeExtraSpecTemplate() exemplar = dict(key1='value1') text = serializer.serialize(exemplar) print text tree = etree.fromstring(text) self.assertEqual('key1', tree.tag) self.assertEqual('value1', tree.text) self.assertEqual(0, len(tree))
apache-2.0
Ziqi-Li/bknqgis
pandas/pandas/tests/test_nanops.py
2
42408
# -*- coding: utf-8 -*- from __future__ import division, print_function from functools import partial import pytest import warnings import numpy as np import pandas as pd from pandas import Series, isna, _np_version_under1p9 from pandas.core.dtypes.common import is_integer_dtype import pandas.core.nanops as nanops import pandas.util.testing as tm use_bn = nanops._USE_BOTTLENECK class TestnanopsDataFrame(object): def setup_method(self, method): np.random.seed(11235) nanops._USE_BOTTLENECK = False self.arr_shape = (11, 7, 5) self.arr_float = np.random.randn(*self.arr_shape) self.arr_float1 = np.random.randn(*self.arr_shape) self.arr_complex = self.arr_float + self.arr_float1 * 1j self.arr_int = np.random.randint(-10, 10, self.arr_shape) self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0 self.arr_str = np.abs(self.arr_float).astype('S') self.arr_utf = np.abs(self.arr_float).astype('U') self.arr_date = np.random.randint(0, 20000, self.arr_shape).astype('M8[ns]') self.arr_tdelta = np.random.randint(0, 20000, self.arr_shape).astype('m8[ns]') self.arr_nan = np.tile(np.nan, self.arr_shape) self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) self.arr_inf = self.arr_float * np.inf self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf]) self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1]) self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf]) self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf, self.arr_nan]) self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) self.arr_obj = np.vstack([self.arr_float.astype( 'O'), self.arr_int.astype('O'), self.arr_bool.astype( 'O'), self.arr_complex.astype('O'), self.arr_str.astype( 'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'), self.arr_tdelta.astype('O')]) with np.errstate(invalid='ignore'): self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) self.arr_nan_infj = self.arr_inf * 1j self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj]) self.arr_float_2d = self.arr_float[:, :, 0] self.arr_float1_2d = self.arr_float1[:, :, 0] self.arr_complex_2d = self.arr_complex[:, :, 0] self.arr_int_2d = self.arr_int[:, :, 0] self.arr_bool_2d = self.arr_bool[:, :, 0] self.arr_str_2d = self.arr_str[:, :, 0] self.arr_utf_2d = self.arr_utf[:, :, 0] self.arr_date_2d = self.arr_date[:, :, 0] self.arr_tdelta_2d = self.arr_tdelta[:, :, 0] self.arr_nan_2d = self.arr_nan[:, :, 0] self.arr_float_nan_2d = self.arr_float_nan[:, :, 0] self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0] self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0] self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0] self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0] self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0] self.arr_inf_2d = self.arr_inf[:, :, 0] self.arr_float_inf_2d = self.arr_float_inf[:, :, 0] self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0] self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0] self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0] self.arr_float_1d = self.arr_float[:, 0, 0] self.arr_float1_1d = self.arr_float1[:, 0, 0] self.arr_complex_1d = self.arr_complex[:, 0, 0] self.arr_int_1d = self.arr_int[:, 0, 0] self.arr_bool_1d = self.arr_bool[:, 0, 0] self.arr_str_1d = self.arr_str[:, 0, 0] self.arr_utf_1d = self.arr_utf[:, 0, 0] self.arr_date_1d = self.arr_date[:, 0, 0] self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0] self.arr_nan_1d = self.arr_nan[:, 0, 0] self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0] self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0] self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0] self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0] self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0] self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0] self.arr_inf_1d = self.arr_inf.ravel() self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0] self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0] self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0] self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0] def teardown_method(self, method): nanops._USE_BOTTLENECK = use_bn def check_results(self, targ, res, axis, check_dtype=True): res = getattr(res, 'asm8', res) res = getattr(res, 'values', res) # timedeltas are a beast here def _coerce_tds(targ, res): if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]': if len(targ) == 1: targ = targ[0].item() res = res.item() else: targ = targ.view('i8') return targ, res try: if axis != 0 and hasattr( targ, 'shape') and targ.ndim and targ.shape != res.shape: res = np.split(res, [targ.shape[0]], axis=0)[0] except: targ, res = _coerce_tds(targ, res) try: tm.assert_almost_equal(targ, res, check_dtype=check_dtype) except: # handle timedelta dtypes if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]': targ, res = _coerce_tds(targ, res) tm.assert_almost_equal(targ, res, check_dtype=check_dtype) return # There are sometimes rounding errors with # complex and object dtypes. # If it isn't one of those, re-raise the error. if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']: raise # convert object dtypes to something that can be split into # real and imaginary parts if res.dtype.kind == 'O': if targ.dtype.kind != 'O': res = res.astype(targ.dtype) else: try: res = res.astype('c16') except: res = res.astype('f8') try: targ = targ.astype('c16') except: targ = targ.astype('f8') # there should never be a case where numpy returns an object # but nanops doesn't, so make that an exception elif targ.dtype.kind == 'O': raise tm.assert_almost_equal(targ.real, res.real, check_dtype=check_dtype) tm.assert_almost_equal(targ.imag, res.imag, check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, targarnanval, check_dtype=True, **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval try: targ = targfunc(targartempval, axis=axis, **kwargs) res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, check_dtype=check_dtype) if skipna: res = testfunc(testarval, axis=axis, **kwargs) self.check_results(targ, res, axis, check_dtype=check_dtype) if axis is None: res = testfunc(testarval, skipna=skipna, **kwargs) self.check_results(targ, res, axis, check_dtype=check_dtype) if skipna and axis is None: res = testfunc(testarval, **kwargs) self.check_results(targ, res, axis, check_dtype=check_dtype) except BaseException as exc: exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1), 'skipna: %s' % skipna, 'kwargs: %s' % kwargs) raise if testarval.ndim <= 1: return try: testarval2 = np.take(testarval, 0, axis=-1) targarval2 = np.take(targarval, 0, axis=-1) targarnanval2 = np.take(targarnanval, 0, axis=-1) except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, targarnanval2, check_dtype=check_dtype, **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, targarnan=None, **kwargs): if targar is None: targar = testar if targarnan is None: targarnan = testar testarval = getattr(self, testar) targarval = getattr(self, targar) targarnanval = getattr(self, targarnan) try: self.check_fun_data(testfunc, targfunc, testarval, targarval, targarnanval, **kwargs) except BaseException as exc: exc.args += ('testar: %s' % testar, 'targar: %s' % targar, 'targarnan: %s' % targarnan) raise def check_funs(self, testfunc, targfunc, allow_complex=True, allow_all_nan=True, allow_str=True, allow_date=True, allow_tdelta=True, allow_obj=True, **kwargs): self.check_fun(testfunc, targfunc, 'arr_float', **kwargs) self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float', **kwargs) self.check_fun(testfunc, targfunc, 'arr_int', **kwargs) self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs) objs = [self.arr_float.astype('O'), self.arr_int.astype('O'), self.arr_bool.astype('O')] if allow_all_nan: self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs) if allow_complex: self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs) self.check_fun(testfunc, targfunc, 'arr_complex_nan', 'arr_complex', **kwargs) if allow_all_nan: self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs) objs += [self.arr_complex.astype('O')] if allow_str: self.check_fun(testfunc, targfunc, 'arr_str', **kwargs) self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs) objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')] if allow_date: try: targfunc(self.arr_date) except TypeError: pass else: self.check_fun(testfunc, targfunc, 'arr_date', **kwargs) objs += [self.arr_date.astype('O')] if allow_tdelta: try: targfunc(self.arr_tdelta) except TypeError: pass else: self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs) objs += [self.arr_tdelta.astype('O')] if allow_obj: self.arr_obj = np.vstack(objs) # some nanops handle object dtypes better than their numpy # counterparts, so the numpy functions need to be given something # else if allow_obj == 'convert': targfunc = partial(self._badobj_wrap, func=targfunc, allow_complex=allow_complex) self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs) def check_funs_ddof(self, testfunc, targfunc, allow_complex=True, allow_all_nan=True, allow_str=True, allow_date=False, allow_tdelta=False, allow_obj=True, ): for ddof in range(3): try: self.check_funs(testfunc, targfunc, allow_complex, allow_all_nan, allow_str, allow_date, allow_tdelta, allow_obj, ddof=ddof) except BaseException as exc: exc.args += ('ddof %s' % ddof, ) raise def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): if value.dtype.kind == 'O': if allow_complex: value = value.astype('c16') else: value = value.astype('f8') return func(value, **kwargs) def test_nanany(self): self.check_funs(nanops.nanany, np.any, allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False) def test_nanall(self): self.check_funs(nanops.nanall, np.all, allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False) def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, allow_date=False, allow_tdelta=True, check_dtype=False) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, allow_obj=False, allow_str=False, allow_date=False, allow_tdelta=True) def test_nanmean_overflow(self): # GH 10155 # In the previous implementation mean can overflow for int dtypes, it # is now consistent with numpy # numpy < 1.9.0 is not computing this correctly if not _np_version_under1p9: for a in [2 ** 55, -2 ** 55, 20150515061816532]: s = Series(a, index=range(500), dtype=np.int64) result = s.mean() np_result = s.values.mean() assert result == a assert result == np_result assert result.dtype == np.float64 def test_returned_dtype(self): dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64] if hasattr(np, 'float128'): dtypes.append(np.float128) for dtype in dtypes: s = Series(range(10), dtype=dtype) group_a = ['mean', 'std', 'var', 'skew', 'kurt'] group_b = ['min', 'max'] for method in group_a + group_b: result = getattr(s, method)() if is_integer_dtype(dtype) and method in group_a: assert result.dtype == np.float64 else: assert result.dtype == dtype def test_nanmedian(self): with warnings.catch_warnings(record=True): self.check_funs(nanops.nanmedian, np.median, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=True, allow_obj='convert') def test_nanvar(self): self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=True, allow_obj='convert') def test_nanstd(self): self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=True, allow_obj='convert') def test_nansem(self): tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import sem with np.errstate(invalid='ignore'): self.check_funs_ddof(nanops.nansem, sem, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False, allow_obj='convert') def _minmax_wrap(self, value, axis=None, func=None): res = func(value, axis) if res.dtype.kind == 'm': res = np.atleast_1d(res) return res def test_nanmin(self): func = partial(self._minmax_wrap, func=np.min) self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False) def test_nanmax(self): func = partial(self._minmax_wrap, func=np.max) self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False) def _argminmax_wrap(self, value, axis=None, func=None): res = func(value, axis) nans = np.min(value, axis) nullnan = isna(nans) if res.ndim: res[nullnan] = -1 elif (hasattr(nullnan, 'all') and nullnan.all() or not hasattr(nullnan, 'all') and nullnan): res = -1 return res def test_nanargmax(self): func = partial(self._argminmax_wrap, func=np.argmax) self.check_funs(nanops.nanargmax, func, allow_str=False, allow_obj=False, allow_date=True, allow_tdelta=True) def test_nanargmin(self): func = partial(self._argminmax_wrap, func=np.argmin) if tm.sys.version_info[0:2] == (2, 6): self.check_funs(nanops.nanargmin, func, allow_date=True, allow_tdelta=True, allow_str=False, allow_obj=False) else: self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False) def _skew_kurt_wrap(self, values, axis=None, func=None): if not isinstance(values.dtype.type, np.floating): values = values.astype('f8') result = func(values, axis=axis, bias=False) # fix for handling cases where all elements in an axis are the same if isinstance(result, np.ndarray): result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 return result elif np.max(values) == np.min(values): return 0. return result def test_nanskew(self): tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import skew func = partial(self._skew_kurt_wrap, func=skew) with np.errstate(invalid='ignore'): self.check_funs(nanops.nanskew, func, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False) def test_nankurt(self): tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import kurtosis func1 = partial(kurtosis, fisher=True) func = partial(self._skew_kurt_wrap, func=func1) with np.errstate(invalid='ignore'): self.check_funs(nanops.nankurt, func, allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False) def test_nanprod(self): self.check_funs(nanops.nanprod, np.prod, allow_str=False, allow_date=False, allow_tdelta=False) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) res01 = checkfun(self.arr_float_2d, self.arr_float1_2d, min_periods=len(self.arr_float_2d) - 1, **kwargs) tm.assert_almost_equal(targ0, res00) tm.assert_almost_equal(targ0, res01) res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs) res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, min_periods=len(self.arr_float_2d) - 1, **kwargs) tm.assert_almost_equal(targ1, res10) tm.assert_almost_equal(targ1, res11) targ2 = np.nan res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs) res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs) res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs) res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs) res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, min_periods=len(self.arr_float_2d) - 1, **kwargs) res25 = checkfun(self.arr_float_2d, self.arr_float1_2d, min_periods=len(self.arr_float_2d) + 1, **kwargs) tm.assert_almost_equal(targ2, res20) tm.assert_almost_equal(targ2, res21) tm.assert_almost_equal(targ2, res22) tm.assert_almost_equal(targ2, res23) tm.assert_almost_equal(targ2, res24) tm.assert_almost_equal(targ2, res25) def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs) res01 = checkfun(self.arr_float_1d, self.arr_float1_1d, min_periods=len(self.arr_float_1d) - 1, **kwargs) tm.assert_almost_equal(targ0, res00) tm.assert_almost_equal(targ0, res01) res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs) res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, min_periods=len(self.arr_float_1d) - 1, **kwargs) tm.assert_almost_equal(targ1, res10) tm.assert_almost_equal(targ1, res11) targ2 = np.nan res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs) res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs) res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs) res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs) res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, min_periods=len(self.arr_float_1d) - 1, **kwargs) res25 = checkfun(self.arr_float_1d, self.arr_float1_1d, min_periods=len(self.arr_float_1d) + 1, **kwargs) tm.assert_almost_equal(targ2, res20) tm.assert_almost_equal(targ2, res21) tm.assert_almost_equal(targ2, res22) tm.assert_almost_equal(targ2, res23) tm.assert_almost_equal(targ2, res24) tm.assert_almost_equal(targ2, res25) def test_nancorr(self): targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='pearson') def test_nancorr_pearson(self): targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method='pearson') targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='pearson') def test_nancorr_kendall(self): tm.skip_if_no_package('scipy.stats') from scipy.stats import kendalltau targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method='kendall') targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='kendall') def test_nancorr_spearman(self): tm.skip_if_no_package('scipy.stats') from scipy.stats import spearmanr targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method='spearman') targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='spearman') def test_nancov(self): targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) def check_nancomp(self, checkfun, targ0): arr_float = self.arr_float arr_float1 = self.arr_float1 arr_nan = self.arr_nan arr_nan_nan = self.arr_nan_nan arr_float_nan = self.arr_float_nan arr_float1_nan = self.arr_float1_nan arr_nan_float1 = self.arr_nan_float1 while targ0.ndim: try: res0 = checkfun(arr_float, arr_float1) tm.assert_almost_equal(targ0, res0) if targ0.ndim > 1: targ1 = np.vstack([targ0, arr_nan]) else: targ1 = np.hstack([targ0, arr_nan]) res1 = checkfun(arr_float_nan, arr_float1_nan) tm.assert_numpy_array_equal(targ1, res1, check_dtype=False) targ2 = arr_nan_nan res2 = checkfun(arr_float_nan, arr_nan_float1) tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) except Exception as exc: exc.args += ('ndim: %s' % arr_float.ndim, ) raise try: arr_float = np.take(arr_float, 0, axis=-1) arr_float1 = np.take(arr_float1, 0, axis=-1) arr_nan = np.take(arr_nan, 0, axis=-1) arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) arr_float_nan = np.take(arr_float_nan, 0, axis=-1) arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) targ0 = np.take(targ0, 0, axis=-1) except ValueError: break def test_nangt(self): targ0 = self.arr_float > self.arr_float1 self.check_nancomp(nanops.nangt, targ0) def test_nange(self): targ0 = self.arr_float >= self.arr_float1 self.check_nancomp(nanops.nange, targ0) def test_nanlt(self): targ0 = self.arr_float < self.arr_float1 self.check_nancomp(nanops.nanlt, targ0) def test_nanle(self): targ0 = self.arr_float <= self.arr_float1 self.check_nancomp(nanops.nanle, targ0) def test_naneq(self): targ0 = self.arr_float == self.arr_float1 self.check_nancomp(nanops.naneq, targ0) def test_nanne(self): targ0 = self.arr_float != self.arr_float1 self.check_nancomp(nanops.nanne, targ0) def check_bool(self, func, value, correct, *args, **kwargs): while getattr(value, 'ndim', True): try: res0 = func(value, *args, **kwargs) if correct: assert res0 else: assert not res0 except BaseException as exc: exc.args += ('dim: %s' % getattr(value, 'ndim', value), ) raise if not hasattr(value, 'ndim'): break try: value = np.take(value, 0, axis=-1) except ValueError: break def test__has_infs(self): pairs = [('arr_complex', False), ('arr_int', False), ('arr_bool', False), ('arr_str', False), ('arr_utf', False), ('arr_complex', False), ('arr_complex_nan', False), ('arr_nan_nanj', False), ('arr_nan_infj', True), ('arr_complex_nan_infj', True)] pairs_float = [('arr_float', False), ('arr_nan', False), ('arr_float_nan', False), ('arr_nan_nan', False), ('arr_float_inf', True), ('arr_inf', True), ('arr_nan_inf', True), ('arr_float_nan_inf', True), ('arr_nan_nan_inf', True)] for arr, correct in pairs: val = getattr(self, arr) try: self.check_bool(nanops._has_infs, val, correct) except BaseException as exc: exc.args += (arr, ) raise for arr, correct in pairs_float: val = getattr(self, arr) try: self.check_bool(nanops._has_infs, val, correct) self.check_bool(nanops._has_infs, val.astype('f4'), correct) self.check_bool(nanops._has_infs, val.astype('f2'), correct) except BaseException as exc: exc.args += (arr, ) raise def test__isfinite(self): pairs = [('arr_complex', False), ('arr_int', False), ('arr_bool', False), ('arr_str', False), ('arr_utf', False), ('arr_complex', False), ('arr_complex_nan', True), ('arr_nan_nanj', True), ('arr_nan_infj', True), ('arr_complex_nan_infj', True)] pairs_float = [('arr_float', False), ('arr_nan', True), ('arr_float_nan', True), ('arr_nan_nan', True), ('arr_float_inf', True), ('arr_inf', True), ('arr_nan_inf', True), ('arr_float_nan_inf', True), ('arr_nan_nan_inf', True)] func1 = lambda x: np.any(nanops._isfinite(x).ravel()) # TODO: unused? # func2 = lambda x: np.any(nanops._isfinite(x).values.ravel()) for arr, correct in pairs: val = getattr(self, arr) try: self.check_bool(func1, val, correct) except BaseException as exc: exc.args += (arr, ) raise for arr, correct in pairs_float: val = getattr(self, arr) try: self.check_bool(func1, val, correct) self.check_bool(func1, val.astype('f4'), correct) self.check_bool(func1, val.astype('f2'), correct) except BaseException as exc: exc.args += (arr, ) raise def test__bn_ok_dtype(self): assert nanops._bn_ok_dtype(self.arr_float.dtype, 'test') assert nanops._bn_ok_dtype(self.arr_complex.dtype, 'test') assert nanops._bn_ok_dtype(self.arr_int.dtype, 'test') assert nanops._bn_ok_dtype(self.arr_bool.dtype, 'test') assert nanops._bn_ok_dtype(self.arr_str.dtype, 'test') assert nanops._bn_ok_dtype(self.arr_utf.dtype, 'test') assert not nanops._bn_ok_dtype(self.arr_date.dtype, 'test') assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test') assert not nanops._bn_ok_dtype(self.arr_obj.dtype, 'test') class TestEnsureNumeric(object): def test_numeric_values(self): # Test integer assert nanops._ensure_numeric(1) == 1 # Test float assert nanops._ensure_numeric(1.1) == 1.1 # Test complex assert nanops._ensure_numeric(1 + 2j) == 1 + 2j def test_ndarray(self): # Test numeric ndarray values = np.array([1, 2, 3]) assert np.allclose(nanops._ensure_numeric(values), values) # Test object ndarray o_values = values.astype(object) assert np.allclose(nanops._ensure_numeric(o_values), values) # Test convertible string ndarray s_values = np.array(['1', '2', '3'], dtype=object) assert np.allclose(nanops._ensure_numeric(s_values), values) # Test non-convertible string ndarray s_values = np.array(['foo', 'bar', 'baz'], dtype=object) pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values)) def test_convertable_values(self): assert np.allclose(nanops._ensure_numeric('1'), 1.0) assert np.allclose(nanops._ensure_numeric('1.1'), 1.1) assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j) def test_non_convertable_values(self): pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo')) pytest.raises(TypeError, lambda: nanops._ensure_numeric({})) pytest.raises(TypeError, lambda: nanops._ensure_numeric([])) class TestNanvarFixedValues(object): # xref GH10242 def setup_method(self, method): # Samples from a normal distribution. self.variance = variance = 3.0 self.samples = self.prng.normal(scale=variance ** 0.5, size=100000) def test_nanvar_all_finite(self): samples = self.samples actual_variance = nanops.nanvar(samples) tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2) def test_nanvar_nans(self): samples = np.nan * np.ones(2 * self.samples.shape[0]) samples[::2] = self.samples actual_variance = nanops.nanvar(samples, skipna=True) tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2) actual_variance = nanops.nanvar(samples, skipna=False) tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2) def test_nanstd_nans(self): samples = np.nan * np.ones(2 * self.samples.shape[0]) samples[::2] = self.samples actual_std = nanops.nanstd(samples, skipna=True) tm.assert_almost_equal(actual_std, self.variance ** 0.5, check_less_precise=2) actual_std = nanops.nanvar(samples, skipna=False) tm.assert_almost_equal(actual_std, np.nan, check_less_precise=2) def test_nanvar_axis(self): # Generate some sample data. samples_norm = self.samples samples_unif = self.prng.uniform(size=samples_norm.shape[0]) samples = np.vstack([samples_norm, samples_unif]) actual_variance = nanops.nanvar(samples, axis=1) tm.assert_almost_equal(actual_variance, np.array( [self.variance, 1.0 / 12]), check_less_precise=2) def test_nanvar_ddof(self): n = 5 samples = self.prng.uniform(size=(10000, n + 1)) samples[:, -1] = np.nan # Force use of our own algorithm. variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean() variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean() variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean() # The unbiased estimate. var = 1.0 / 12 tm.assert_almost_equal(variance_1, var, check_less_precise=2) # The underestimated variance. tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, check_less_precise=2) # The overestimated variance. tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, check_less_precise=2) def test_ground_truth(self): # Test against values that were precomputed with Numpy. samples = np.empty((4, 4)) samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287 ], [0.72980153, 0.03109364, 0.99155171], [0.09317602, 0.60078248, 0.15871292]]) samples[3] = samples[:, 3] = np.nan # Actual variances along axis=0, 1 for ddof=0, 1, 2 variance = np.array([[[0.13762259, 0.05619224, 0.11568816 ], [0.20643388, 0.08428837, 0.17353224], [0.41286776, 0.16857673, 0.34706449]], [[0.09519783, 0.16435395, 0.05082054 ], [0.14279674, 0.24653093, 0.07623082], [0.28559348, 0.49306186, 0.15246163]]]) # Test nanvar. for axis in range(2): for ddof in range(3): var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof) tm.assert_almost_equal(var[:3], variance[axis, ddof]) assert np.isnan(var[3]) # Test nanstd. for axis in range(2): for ddof in range(3): std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof) tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5) assert np.isnan(std[3]) def test_nanstd_roundoff(self): # Regression test for GH 10242 (test data taken from GH 10489). Ensure # that variance is stable. data = Series(766897346 * np.ones(10)) for ddof in range(3): result = data.std(ddof=ddof) assert result == 0.0 @property def prng(self): return np.random.RandomState(1234) class TestNanskewFixedValues(object): # xref GH 11974 def setup_method(self, method): # Test data + skewness value (computed with scipy.stats.skew) self.samples = np.sin(np.linspace(0, 1, 200)) self.actual_skew = -0.1875895205961754 def test_constant_series(self): # xref GH 11974 for val in [3075.2, 3075.3, 3075.5]: data = val * np.ones(300) skew = nanops.nanskew(data) assert skew == 0.0 def test_all_finite(self): alpha, beta = 0.3, 0.1 left_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nanskew(left_tailed) < 0 alpha, beta = 0.1, 0.3 right_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nanskew(right_tailed) > 0 def test_ground_truth(self): skew = nanops.nanskew(self.samples) tm.assert_almost_equal(skew, self.actual_skew) def test_axis(self): samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))]) skew = nanops.nanskew(samples, axis=1) tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan])) def test_nans(self): samples = np.hstack([self.samples, np.nan]) skew = nanops.nanskew(samples, skipna=False) assert np.isnan(skew) def test_nans_skipna(self): samples = np.hstack([self.samples, np.nan]) skew = nanops.nanskew(samples, skipna=True) tm.assert_almost_equal(skew, self.actual_skew) @property def prng(self): return np.random.RandomState(1234) class TestNankurtFixedValues(object): # xref GH 11974 def setup_method(self, method): # Test data + kurtosis value (computed with scipy.stats.kurtosis) self.samples = np.sin(np.linspace(0, 1, 200)) self.actual_kurt = -1.2058303433799713 def test_constant_series(self): # xref GH 11974 for val in [3075.2, 3075.3, 3075.5]: data = val * np.ones(300) kurt = nanops.nankurt(data) assert kurt == 0.0 def test_all_finite(self): alpha, beta = 0.3, 0.1 left_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nankurt(left_tailed) < 0 alpha, beta = 0.1, 0.3 right_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nankurt(right_tailed) > 0 def test_ground_truth(self): kurt = nanops.nankurt(self.samples) tm.assert_almost_equal(kurt, self.actual_kurt) def test_axis(self): samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))]) kurt = nanops.nankurt(samples, axis=1) tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan])) def test_nans(self): samples = np.hstack([self.samples, np.nan]) kurt = nanops.nankurt(samples, skipna=False) assert np.isnan(kurt) def test_nans_skipna(self): samples = np.hstack([self.samples, np.nan]) kurt = nanops.nankurt(samples, skipna=True) tm.assert_almost_equal(kurt, self.actual_kurt) @property def prng(self): return np.random.RandomState(1234) def test_use_bottleneck(): if nanops._BOTTLENECK_INSTALLED: pd.set_option('use_bottleneck', True) assert pd.get_option('use_bottleneck') pd.set_option('use_bottleneck', False) assert not pd.get_option('use_bottleneck') pd.set_option('use_bottleneck', use_bn)
gpl-2.0
SOM-st/PySOM
src/som/primitives/integer_primitives.py
2
7007
import sys from math import sqrt from rlib.arithmetic import ( ovfcheck, LONG_BIT, bigint_from_int, string_to_int, bigint_from_str, ParseStringOverflowError, ) from rlib.llop import as_32_bit_unsigned_value, unsigned_right_shift from som.primitives.primitives import Primitives from som.vm.globals import nilObject, falseObject from som.vmobjects.array import Array from som.vmobjects.biginteger import BigInteger from som.vmobjects.double import Double from som.vmobjects.integer import Integer from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive from som.vmobjects.string import String def _as_string(rcvr): return rcvr.prim_as_string() def _as_double(rcvr): return rcvr.prim_as_double() def _as_32_bit_signed_value(rcvr): return rcvr.prim_as_32_bit_signed_value() def _as_32_bit_unsigned_value(rcvr): val = as_32_bit_unsigned_value(rcvr.get_embedded_integer()) return Integer(val) def _sqrt(rcvr): assert isinstance(rcvr, Integer) res = sqrt(rcvr.get_embedded_integer()) if res == float(int(res)): return Integer(int(res)) return Double(res) def _plus(left, right): return left.prim_add(right) def _minus(left, right): return left.prim_subtract(right) def _multiply(left, right): return left.prim_multiply(right) def _double_div(left, right): return left.prim_double_div(right) def _int_div(left, right): return left.prim_int_div(right) def _mod(left, right): return left.prim_modulo(right) def _remainder(left, right): return left.prim_remainder(right) def _and(left, right): return left.prim_and(right) def _equals_equals(left, right): if isinstance(right, Integer) or isinstance(right, BigInteger): return left.prim_equals(right) return falseObject def _equals(left, right): return left.prim_equals(right) def _unequals(left, right): return left.prim_unequals(right) def _less_than(left, right): return left.prim_less_than(right) def _less_than_or_equal(left, right): return left.prim_less_than_or_equal(right) def _greater_than(left, right): return left.prim_greater_than(right) def _left_shift(left, right): assert isinstance(right, Integer) left_val = left.get_embedded_integer() right_val = right.get_embedded_integer() assert isinstance(left_val, int) assert isinstance(right_val, int) try: if not (left_val == 0 or 0 <= right_val < LONG_BIT): raise OverflowError result = ovfcheck(left_val << right_val) return Integer(result) except OverflowError: return BigInteger(bigint_from_int(left_val).lshift(right_val)) def _unsigned_right_shift(left, right): assert isinstance(right, Integer) left_val = left.get_embedded_integer() right_val = right.get_embedded_integer() return Integer(unsigned_right_shift(left_val, right_val)) def _bit_xor(left, right): assert isinstance(right, Integer) result = left.get_embedded_integer() ^ right.get_embedded_integer() return Integer(result) def _abs(rcvr): return rcvr.prim_abs() def _max(left, right): return left.prim_max(right) if sys.version_info.major <= 2: def _to(rcvr, arg): assert isinstance(rcvr, Integer) assert isinstance(arg, Integer) return Array.from_integers( range(rcvr.get_embedded_integer(), arg.get_embedded_integer() + 1) ) else: def _to(rcvr, arg): assert isinstance(rcvr, Integer) assert isinstance(arg, Integer) return Array.from_integers( list(range(rcvr.get_embedded_integer(), arg.get_embedded_integer() + 1)) ) def _from_string(_rcvr, param): if not isinstance(param, String): return nilObject str_val = param.get_embedded_string() try: i = string_to_int(str_val) return Integer(i) except ParseStringOverflowError: bigint = bigint_from_str(str_val) return BigInteger(bigint) class IntegerPrimitivesBase(Primitives): def install_primitives(self): self._install_instance_primitive( UnaryPrimitive("asString", self.universe, _as_string) ) self._install_instance_primitive( UnaryPrimitive("asDouble", self.universe, _as_double) ) self._install_instance_primitive( UnaryPrimitive("as32BitSignedValue", self.universe, _as_32_bit_signed_value) ) self._install_instance_primitive( UnaryPrimitive( "as32BitUnsignedValue", self.universe, _as_32_bit_unsigned_value ) ) self._install_instance_primitive(UnaryPrimitive("sqrt", self.universe, _sqrt)) self._install_instance_primitive(BinaryPrimitive("+", self.universe, _plus)) self._install_instance_primitive(BinaryPrimitive("-", self.universe, _minus)) self._install_instance_primitive(BinaryPrimitive("*", self.universe, _multiply)) self._install_instance_primitive( BinaryPrimitive("//", self.universe, _double_div) ) self._install_instance_primitive(BinaryPrimitive("/", self.universe, _int_div)) self._install_instance_primitive(BinaryPrimitive("%", self.universe, _mod)) self._install_instance_primitive( BinaryPrimitive("rem:", self.universe, _remainder) ) self._install_instance_primitive(BinaryPrimitive("&", self.universe, _and)) self._install_instance_primitive( BinaryPrimitive("==", self.universe, _equals_equals) ) self._install_instance_primitive(BinaryPrimitive("=", self.universe, _equals)) self._install_instance_primitive( BinaryPrimitive("<", self.universe, _less_than) ) self._install_instance_primitive( BinaryPrimitive("<=", self.universe, _less_than_or_equal) ) self._install_instance_primitive( BinaryPrimitive(">", self.universe, _greater_than) ) self._install_instance_primitive( BinaryPrimitive("<>", self.universe, _unequals) ) self._install_instance_primitive( BinaryPrimitive("~=", self.universe, _unequals) ) self._install_instance_primitive( BinaryPrimitive("<<", self.universe, _left_shift) ) self._install_instance_primitive( BinaryPrimitive("bitXor:", self.universe, _bit_xor) ) self._install_instance_primitive( BinaryPrimitive(">>>", self.universe, _unsigned_right_shift) ) self._install_instance_primitive(UnaryPrimitive("abs", self.universe, _abs)) self._install_instance_primitive(BinaryPrimitive("max:", self.universe, _max)) self._install_instance_primitive(BinaryPrimitive("to:", self.universe, _to)) self._install_class_primitive( BinaryPrimitive("fromString:", self.universe, _from_string) )
mit
JohnDevitt/appengine-django-skeleton-master
lib/django/views/decorators/clickjacking.py
550
1759
from functools import wraps from django.utils.decorators import available_attrs def xframe_options_deny(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'DENY' as long as the response doesn't already have that header set. e.g. @xframe_options_deny def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options', None) is None: resp['X-Frame-Options'] = 'DENY' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_sameorigin(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'SAMEORIGIN' as long as the response doesn't already have that header set. e.g. @xframe_options_sameorigin def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options', None) is None: resp['X-Frame-Options'] = 'SAMEORIGIN' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_exempt(view_func): """ Modifies a view function by setting a response variable that instructs XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header. e.g. @xframe_options_exempt def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) resp.xframe_options_exempt = True return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
bsd-3-clause
arenadata/ambari
ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
3
25615
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from mock.mock import MagicMock, call, patch from resource_management.libraries.script.script import Script from resource_management.libraries.functions import get_kinit_path from stacks.utils.RMFTestCase import * import json import sys from only_for_platform import not_for_platform, PLATFORM_WINDOWS @not_for_platform(PLATFORM_WINDOWS) class TestMetadataServer(RMFTestCase): COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package" STACK_VERSION = "2.3" stack_root = Script.get_stack_root() conf_dir = stack_root + "/current/atlas-server/conf" def configureResourcesCalled(self): # Both server and client self.assertResourceCalled('Directory', self.conf_dir, owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755) # Pid dir self.assertResourceCalled('Directory', '/var/run/atlas', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755 ) self.assertResourceCalled('Directory', self.conf_dir + "/solr", owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755, recursive_ownership = True ) # Log dir self.assertResourceCalled('Directory', '/var/log/atlas', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755 ) # Data dir self.assertResourceCalled('Directory', self.stack_root+"/current/atlas-server/data", owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0644 ) # Expanded war dir self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/server/webapp', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0644 ) self.assertResourceCalled('File', self.stack_root+'/current/atlas-server/server/webapp/atlas.war', content = StaticFile(self.stack_root+'/current/atlas-server/server/webapp/atlas.war'), ) host_name = u"c6401.ambari.apache.org" app_props = dict(self.getConfig()['configurations']['application-properties']) app_props['atlas.server.bind.address'] = host_name metadata_protocol = "https" if app_props["atlas.enableTLS"] is True else "http" metadata_port = app_props["atlas.server.https.port"] if metadata_protocol == "https" else app_props["atlas.server.http.port"] app_props["atlas.rest.address"] = u'%s://%s:%s' % (metadata_protocol, host_name, metadata_port) app_props["atlas.server.ids"] = "id1" app_props["atlas.server.address.id1"] = u"%s:%s" % (host_name, metadata_port) app_props["atlas.server.ha.enabled"] = "false" self.assertResourceCalled('File', str(self.conf_dir + "/atlas-log4j.xml"), content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-log4j']['content']), owner='atlas', group='hadoop', mode=0644, ) self.assertResourceCalled('File', str(self.conf_dir + "/atlas-env.sh"), content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-env']['content']), owner='atlas', group='hadoop', mode=0755, ) self.assertResourceCalled('File', str(self.conf_dir + "/solr/solrconfig.xml"), content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-solrconfig']['content']), owner='atlas', group='hadoop', mode=0644, ) # application.properties file self.assertResourceCalled('PropertiesFile',str(self.conf_dir + "/application.properties"), properties=app_props, owner=u'atlas', group=u'hadoop', mode=0644, ) self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client', create_parents = True, cd_access='a', mode=0755 ) self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client', create_parents = True, recursive_ownership = True, cd_access='a', mode=0755 ) self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh', content=StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'), mode=0755, ) self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties', content=self.getConfig()['configurations']['infra-solr-client-log4j']['content'], mode=0644, ) self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log', mode=0664, content='' ) self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5') self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_atlas_configs_0.[0-9]*', content=InlineTemplate(self.getConfig()['configurations']['atlas-solrconfig']['content']), only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5', only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir {0}/solr --config-set atlas_configs --retry 30 --interval 5'.format(self.conf_dir), not_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_atlas_configs_0.[0-9]*', action=['delete'], create_parents=True) self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection vertex_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection edge_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection fulltext_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') def configureResourcesCalledSecure(self): # Both server and client self.assertResourceCalled('Directory', self.conf_dir, owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755 ) # Pid dir self.assertResourceCalled('Directory', '/var/run/atlas', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755 ) self.assertResourceCalled('Directory', self.conf_dir + "/solr", owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755, recursive_ownership = True ) # Log dir self.assertResourceCalled('Directory', '/var/log/atlas', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0755 ) # Data dir self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/data', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0644 ) # Expanded war dir self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/server/webapp', owner='atlas', group='hadoop', create_parents = True, cd_access='a', mode=0644 ) self.assertResourceCalled('File', self.stack_root+'/current/atlas-server/server/webapp/atlas.war', content = StaticFile(self.stack_root+'/current/atlas-server/server/webapp/atlas.war'), ) host_name = u"c6401.ambari.apache.org" app_props = dict(self.getConfig()['configurations']['application-properties']) app_props['atlas.server.bind.address'] = host_name metadata_protocol = "https" if app_props["atlas.enableTLS"] is True else "http" metadata_port = app_props["atlas.server.https.port"] if metadata_protocol == "https" else app_props["atlas.server.http.port"] app_props["atlas.rest.address"] = u'%s://%s:%s' % (metadata_protocol, host_name, metadata_port) app_props["atlas.server.ids"] = "id1" app_props["atlas.server.address.id1"] = u"%s:%s" % (host_name, metadata_port) app_props["atlas.server.ha.enabled"] = "false" self.assertResourceCalled('File', self.conf_dir + "/atlas-log4j.xml", content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-log4j']['content']), owner='atlas', group='hadoop', mode=0644, ) self.assertResourceCalled('File', self.conf_dir + "/atlas-env.sh", content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-env']['content']), owner='atlas', group='hadoop', mode=0755, ) self.assertResourceCalled('File', self.conf_dir+"/solr/solrconfig.xml", content=InlineTemplate( self.getConfig()['configurations'][ 'atlas-solrconfig']['content']), owner='atlas', group='hadoop', mode=0644, ) # application.properties file self.assertResourceCalled('PropertiesFile',self.conf_dir + "/application.properties", properties=app_props, owner=u'atlas', group=u'hadoop', mode=0644, ) self.assertResourceCalled('TemplateConfig', self.conf_dir+"/atlas_jaas.conf", owner = 'atlas', ) self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client', create_parents = True, cd_access='a', mode=0755 ) self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client', create_parents = True, recursive_ownership = True, cd_access='a', mode=0755 ) self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh', content=StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'), mode=0755, ) self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties', content=self.getConfig()['configurations']['infra-solr-client-log4j']['content'], mode=0644, ) self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log', mode=0664, content='' ) self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5') self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_atlas_configs_0.[0-9]*', content=InlineTemplate(self.getConfig()['configurations']['atlas-solrconfig']['content']), only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5', only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir {0}/solr --config-set atlas_configs --retry 30 --interval 5'.format(self.conf_dir), not_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*') self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_atlas_configs_0.[0-9]*', action=['delete'], create_parents=True) kinit_path_local = get_kinit_path() self.assertResourceCalled('Execute', kinit_path_local + " -kt /etc/security/keytabs/ambari-infra-solr.keytab infra-solr/c6401.ambari.apache.org@EXAMPLE.COM; curl -k -s --negotiate -u : http://c6401.ambari.apache.org:8886/solr/admin/authorization | grep authorization.enabled && " + kinit_path_local +" -kt /etc/security/keytabs/ambari-infra-solr.keytab infra-solr/c6401.ambari.apache.org@EXAMPLE.COM; curl -H 'Content-type:application/json' -d '{\"set-user-role\": {\"atlas@EXAMPLE.COM\": [\"atlas_user\", \"ranger_audit_user\", \"dev\"]}}' -s -o /dev/null -w'%{http_code}' --negotiate -u: -k http://c6401.ambari.apache.org:8886/solr/admin/authorization | grep 200", logoutput = True, tries = 30, try_sleep = 10, user='solr') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection vertex_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection edge_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection fulltext_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10') self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/configs/atlas_configs --secure-znode --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --sasl-users atlas,infra-solr --retry 5 --interval 10") self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/vertex_index --secure-znode --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --sasl-users atlas,infra-solr --retry 5 --interval 10") self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/edge_index --secure-znode --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --sasl-users atlas,infra-solr --retry 5 --interval 10") self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/fulltext_index --secure-znode --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --sasl-users atlas,infra-solr --retry 5 --interval 10") def test_configure_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", classname = "MetadataServer", command = "configure", config_file="default.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.configureResourcesCalled() self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb', owner = "hbase", group = "hadoop", content=Template("atlas_hbase_setup.rb.j2")) self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],) self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, ) self.assertNoMoreResources() def test_configure_secure(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", classname = "MetadataServer", command = "configure", config_file="secure.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.configureResourcesCalledSecure() self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb', owner = "hbase", group = "hadoop", content=Template("atlas_hbase_setup.rb.j2")) self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],) self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, ) self.assertNoMoreResources() def test_start_default(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", classname = "MetadataServer", command = "start", config_file="default.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.configureResourcesCalled() self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb', owner = "hbase", group = "hadoop", content=Template("atlas_hbase_setup.rb.j2")) self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],) self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, ) self.assertResourceCalled('Execute', 'source {0}/atlas-env.sh ; {1}/current/atlas-server/bin/atlas_start.py'.format(self.conf_dir,self.stack_root), not_if = 'ls /var/run/atlas/atlas.pid >/dev/null 2>&1 && ps -p `cat /var/run/atlas/atlas.pid` >/dev/null 2>&1', user = 'atlas', ) @patch('os.path.isdir') def test_stop_default(self, is_dir_mock): is_dir_mock.return_value = True self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py", classname = "MetadataServer", command = "stop", config_file="default.json", stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES ) self.assertResourceCalled('Execute', 'source {0}/atlas-env.sh; {1}/current/atlas-server/bin/atlas_stop.py'.format(self.conf_dir,self.stack_root), user = 'atlas', ) self.assertResourceCalled('File', '/var/run/atlas/atlas.pid', action = ['delete'], )
apache-2.0
kdwink/intellij-community
python/lib/Lib/site-packages/django/contrib/auth/context_processors.py
158
1452
from django.core.context_processors import PermWrapper from django.utils.functional import lazy, memoize, SimpleLazyObject from django.contrib import messages def auth(request): """ Returns context variables required by apps that use Django's authentication system. If there is no 'user' attribute in the request, uses AnonymousUser (from django.contrib.auth). """ # If we access request.user, request.session is accessed, which results in # 'Vary: Cookie' being sent in every request that uses this context # processor, which can easily be every request on a site if # TEMPLATE_CONTEXT_PROCESSORS has this context processor added. This kills # the ability to cache. So, we carefully ensure these attributes are lazy. # We don't use django.utils.functional.lazy() for User, because that # requires knowing the class of the object we want to proxy, which could # break with custom auth backends. LazyObject is a less complete but more # flexible solution that is a good enough wrapper for 'User'. def get_user(): if hasattr(request, 'user'): return request.user else: from django.contrib.auth.models import AnonymousUser return AnonymousUser() return { 'user': SimpleLazyObject(get_user), 'messages': messages.get_messages(request), 'perms': lazy(lambda: PermWrapper(get_user()), PermWrapper)(), }
apache-2.0
dfdx2/django
django/core/checks/model_checks.py
72
6183
import inspect import types from itertools import chain from django.apps import apps from django.core.checks import Error, Tags, register @register(Tags.models) def check_all_models(app_configs=None, **kwargs): errors = [] if app_configs is None: models = apps.get_models() else: models = chain.from_iterable(app_config.get_models() for app_config in app_configs) for model in models: if not inspect.ismethod(model.check): errors.append( Error( "The '%s.check()' class method is currently overridden by %r." % (model.__name__, model.check), obj=model, id='models.E020' ) ) else: errors.extend(model.check(**kwargs)) return errors def _check_lazy_references(apps, ignore=None): """ Ensure all lazy (i.e. string) model references have been resolved. Lazy references are used in various places throughout Django, primarily in related fields and model signals. Identify those common cases and provide more helpful error messages for them. The ignore parameter is used by StateApps to exclude swappable models from this check. """ pending_models = set(apps._pending_operations) - (ignore or set()) # Short circuit if there aren't any errors. if not pending_models: return [] from django.db.models import signals model_signals = { signal: name for name, signal in vars(signals).items() if isinstance(signal, signals.ModelSignal) } def extract_operation(obj): """ Take a callable found in Apps._pending_operations and identify the original callable passed to Apps.lazy_model_operation(). If that callable was a partial, return the inner, non-partial function and any arguments and keyword arguments that were supplied with it. obj is a callback defined locally in Apps.lazy_model_operation() and annotated there with a `func` attribute so as to imitate a partial. """ operation, args, keywords = obj, [], {} while hasattr(operation, 'func'): # The or clauses are redundant but work around a bug (#25945) in # functools.partial in Python <= 3.5.1. args.extend(getattr(operation, 'args', []) or []) keywords.update(getattr(operation, 'keywords', {}) or {}) operation = operation.func return operation, args, keywords def app_model_error(model_key): try: apps.get_app_config(model_key[0]) model_error = "app '%s' doesn't provide model '%s'" % model_key except LookupError: model_error = "app '%s' isn't installed" % model_key[0] return model_error # Here are several functions which return CheckMessage instances for the # most common usages of lazy operations throughout Django. These functions # take the model that was being waited on as an (app_label, modelname) # pair, the original lazy function, and its positional and keyword args as # determined by extract_operation(). def field_error(model_key, func, args, keywords): error_msg = ( "The field %(field)s was declared with a lazy reference " "to '%(model)s', but %(model_error)s." ) params = { 'model': '.'.join(model_key), 'field': keywords['field'], 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=keywords['field'], id='fields.E307') def signal_connect_error(model_key, func, args, keywords): error_msg = ( "%(receiver)s was connected to the '%(signal)s' signal with a " "lazy reference to the sender '%(model)s', but %(model_error)s." ) receiver = args[0] # The receiver is either a function or an instance of class # defining a `__call__` method. if isinstance(receiver, types.FunctionType): description = "The function '%s'" % receiver.__name__ elif isinstance(receiver, types.MethodType): description = "Bound method '%s.%s'" % (receiver.__self__.__class__.__name__, receiver.__name__) else: description = "An instance of class '%s'" % receiver.__class__.__name__ signal_name = model_signals.get(func.__self__, 'unknown') params = { 'model': '.'.join(model_key), 'receiver': description, 'signal': signal_name, 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=receiver.__module__, id='signals.E001') def default_error(model_key, func, args, keywords): error_msg = "%(op)s contains a lazy reference to %(model)s, but %(model_error)s." params = { 'op': func, 'model': '.'.join(model_key), 'model_error': app_model_error(model_key), } return Error(error_msg % params, obj=func, id='models.E022') # Maps common uses of lazy operations to corresponding error functions # defined above. If a key maps to None, no error will be produced. # default_error() will be used for usages that don't appear in this dict. known_lazy = { ('django.db.models.fields.related', 'resolve_related_class'): field_error, ('django.db.models.fields.related', 'set_managed'): None, ('django.dispatch.dispatcher', 'connect'): signal_connect_error, } def build_error(model_key, func, args, keywords): key = (func.__module__, func.__name__) error_fn = known_lazy.get(key, default_error) return error_fn(model_key, func, args, keywords) if error_fn else None return sorted(filter(None, ( build_error(model_key, *extract_operation(func)) for model_key in pending_models for func in apps._pending_operations[model_key] )), key=lambda error: error.msg) @register(Tags.models) def check_lazy_references(app_configs=None, **kwargs): return _check_lazy_references(apps)
bsd-3-clause
eg-zhang/h2o-2
py/h2o_os_util.py
30
5521
import subprocess import getpass def kill_process_tree(pid, including_parent=True): parent = psutil.Process(pid) for child in parent.get_children(recursive=True): child.kill() if including_parent: parent.kill() def kill_child_processes(): me = os.getpid() kill_process_tree(me, including_parent=False) # since we hang if hosts has bad IP addresses, thought it'd be nice # to have simple obvious feedback to user if he's running with -v # and machines are down or his hosts definition has bad IPs. # FIX! currently not used def ping_host_if_verbose(host): # if (h2o.verbose) if 1==1: username = getpass.getuser() # if username=='jenkins' or username=='kevin' or username=='michal': if username=='jenkins' or username=='kevin': ping = subprocess.Popen( ["ping", "-c", "4", host]) ping.communicate() def check_port_group(base_port): # Only enable if useful for debug if 1==1: username = getpass.getuser() # if username=='jenkins' or username=='kevin' or username=='michal': if username=='jenkins': # assumes you want to know about 3 ports starting at base_port # can't use p, not root command1Split = ['netstat', '-an'] command2Split = ['egrep'] # colon so only match ports. space at end? so no submatches command2Split.append("(%s | %s)" % (base_port, base_port+1) ) command3Split = ['wc','-l'] print "Checking 2 ports starting at ", base_port print ' '.join(command2Split) # use netstat thru subprocess p1 = subprocess.Popen(command1Split, stdout=subprocess.PIPE) p2 = subprocess.Popen(command2Split, stdin=p1.stdout, stdout=subprocess.PIPE) output = p2.communicate()[0] print output # I suppose we should use psutil here. since everyone has it installed? # and it should work on windows? def show_h2o_processes(): # Only enable if useful for debug if 1==0: username = getpass.getuser() h2oFound = False users = set() h2oUsers = set() # if username=='jenkins' or username=='kevin' or username=='michal': if username=='jenkins' or username=='kevin': import psutil # print "get_users:", psutil.get_users() print "total physical dram:" , (psutil.TOTAL_PHYMEM+0)/(1024*1024), "GB" print "max cpu threads:", psutil.NUM_CPUS print "\nReporting on h2o" users = set() h2oUsers = set() h2oFound = False for p in psutil.process_iter(): h2oProcess = False # hack. # psutil 2.x needs function reference # psutil 1.x needs object reference if hasattr(p.name, '__call__'): pname = p.name() pcmdline = p.cmdline() # the user name might be uknown here, due to LXC? try: pusername = p.username() except: pusername = "Unknown-maybe-LXC-user" pstatus = p.status() else: pname = p.name pcmdline = p.cmdline try: pusername = p.username except: pusername = "Unknown-maybe-LXC-user" pstatus = p.status if hasattr(p.pid, '__call__'): ppid = p.pid() else: ppid = p.pid if 'java' in pname: users.add(pusername) # now iterate through the cmdline, to see if it's got 'h2o for c in pcmdline: if 'h2o' in c: h2oProcess = True h2oUsers.add(pusername) break if h2oProcess: h2oFound = True print "\n#**********************************************" print p # process could disappear while we're looking? (fast h2o version java process?) try: print "pid:", ppid print "cmdline:", pcmdline # AccessDenied problem? # print p.getcwd() print "status:", pstatus print "username:", pusername print "cpu_percent:", p.get_cpu_percent(interval=1.0) print "memory_percent:", p.get_memory_percent() print p.get_memory_info() # AccessDenied problem # print p.get_io_counters() # AccessDenied problem # p.get_open_files() # AccessDenied problem # print p.get_connections() except: pass if h2oFound: print "\n\n#**********************************************************************************************" else: print "No h2o processes found." print "\nusers running java:", list(users) print "users running h2o java:", list(h2oUsers)
apache-2.0
ContinuumIO/datashape
datashape/tests/test_lexer.py
5
6712
""" Test the DataShape lexer. """ from __future__ import absolute_import, division, print_function import unittest import datashape from datashape import lexer class TestDataShapeLexer(unittest.TestCase): def check_isolated_token(self, ds_str, tname, val=None): # The token name should be a property in parser tid = getattr(lexer, tname) # Lexing should produce a single token matching the specification self.assertEqual(list(lexer.lex(ds_str)), [lexer.Token(tid, tname, (0, len(ds_str)), val)]) def check_failing_token(self, ds_str): # Creating the lexer will fail, because the error is # in the first token. self.assertRaises(datashape.DataShapeSyntaxError, list, lexer.lex(ds_str)) def test_isolated_tokens(self): self.check_isolated_token('testing', 'NAME_LOWER', 'testing') self.check_isolated_token('Testing', 'NAME_UPPER', 'Testing') self.check_isolated_token('_testing', 'NAME_OTHER', '_testing') self.check_isolated_token('*', 'ASTERISK') self.check_isolated_token(',', 'COMMA') self.check_isolated_token('=', 'EQUAL') self.check_isolated_token(':', 'COLON') self.check_isolated_token('[', 'LBRACKET') self.check_isolated_token(']', 'RBRACKET') self.check_isolated_token('{', 'LBRACE') self.check_isolated_token('}', 'RBRACE') self.check_isolated_token('(', 'LPAREN') self.check_isolated_token(')', 'RPAREN') self.check_isolated_token('...', 'ELLIPSIS') self.check_isolated_token('->', 'RARROW') self.check_isolated_token('?', 'QUESTIONMARK') self.check_isolated_token('32102', 'INTEGER', 32102) self.check_isolated_token('->', 'RARROW') self.check_isolated_token('"testing"', 'STRING', 'testing') self.check_isolated_token("'testing'", 'STRING', 'testing') def test_integer(self): # Digits self.check_isolated_token('0', 'INTEGER', 0) self.check_isolated_token('1', 'INTEGER', 1) self.check_isolated_token('2', 'INTEGER', 2) self.check_isolated_token('3', 'INTEGER', 3) self.check_isolated_token('4', 'INTEGER', 4) self.check_isolated_token('5', 'INTEGER', 5) self.check_isolated_token('6', 'INTEGER', 6) self.check_isolated_token('7', 'INTEGER', 7) self.check_isolated_token('8', 'INTEGER', 8) self.check_isolated_token('9', 'INTEGER', 9) # Various-sized numbers self.check_isolated_token('10', 'INTEGER', 10) self.check_isolated_token('102', 'INTEGER', 102) self.check_isolated_token('1024', 'INTEGER', 1024) self.check_isolated_token('10246', 'INTEGER', 10246) self.check_isolated_token('102468', 'INTEGER', 102468) self.check_isolated_token('1024683', 'INTEGER', 1024683) self.check_isolated_token('10246835', 'INTEGER', 10246835) self.check_isolated_token('102468357', 'INTEGER', 102468357) self.check_isolated_token('1024683579', 'INTEGER', 1024683579) # Leading zeros are not allowed self.check_failing_token('00') self.check_failing_token('01') self.check_failing_token('090') def test_string(self): # Trivial strings self.check_isolated_token('""', 'STRING', '') self.check_isolated_token("''", 'STRING', '') self.check_isolated_token('"test"', 'STRING', 'test') self.check_isolated_token("'test'", 'STRING', 'test') # Valid escaped characters self.check_isolated_token(r'"\"\b\f\n\r\t\ub155"', 'STRING', u'"\b\f\n\r\t\ub155') self.check_isolated_token(r"'\'\b\f\n\r\t\ub155'", 'STRING', u"'\b\f\n\r\t\ub155") # A sampling of invalid escaped characters self.check_failing_token(r'''"\'"''') self.check_failing_token(r"""'\"'""") self.check_failing_token(r"'\a'") self.check_failing_token(r"'\s'") self.check_failing_token(r"'\R'") self.check_failing_token(r"'\N'") self.check_failing_token(r"'\U'") self.check_failing_token(r"'\u123g'") self.check_failing_token(r"'\u123'") # Some unescaped and escapted unicode characters self.check_isolated_token(u'"\uc548\ub155 \\uc548\\ub155"', 'STRING', u'\uc548\ub155 \uc548\ub155') def test_failing_tokens(self): self.check_failing_token('~') self.check_failing_token('`') self.check_failing_token('@') self.check_failing_token('$') self.check_failing_token('%') self.check_failing_token('^') self.check_failing_token('&') self.check_failing_token('-') self.check_failing_token('+') self.check_failing_token(';') self.check_failing_token('<') self.check_failing_token('>') self.check_failing_token('.') self.check_failing_token('..') self.check_failing_token('/') self.check_failing_token('|') self.check_failing_token('\\') def test_whitespace(self): expected_idval = [(lexer.COLON, None), (lexer.STRING, 'a'), (lexer.INTEGER, 12345), (lexer.RARROW, None), (lexer.EQUAL, None), (lexer.ASTERISK, None), (lexer.NAME_OTHER, '_b')] # With minimal whitespace toks = list(lexer.lex(':"a"12345->=*_b')) self.assertEqual([(tok.id, tok.val) for tok in toks], expected_idval) # With spaces toks = list(lexer.lex(' : "a" 12345 -> = * _b ')) self.assertEqual([(tok.id, tok.val) for tok in toks], expected_idval) # With tabs toks = list(lexer.lex('\t:\t"a"\t12345\t->\t=\t*\t_b\t')) self.assertEqual([(tok.id, tok.val) for tok in toks], expected_idval) # With newlines toks = list(lexer.lex('\n:\n"a"\n12345\n->\n=\n*\n_b\n')) self.assertEqual([(tok.id, tok.val) for tok in toks], expected_idval) # With spaces, tabs, newlines and comments toks = list(lexer.lex('# comment\n' + ': # X\n' + ' "a" # "b"\t\n' + '\t12345\n\n' + '->\n' + '=\n' + '*\n' + '_b # comment\n' + ' \t # end')) self.assertEqual([(tok.id, tok.val) for tok in toks], expected_idval)
bsd-2-clause
Idematica/django-oscar
sites/demo/apps/shipping/migrations/0001_initial.py
15
4877
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'OrderAndItemCharges' db.create_table('shipping_orderanditemcharges', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('code', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=128, db_index=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('price_per_order', self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=12, decimal_places=2)), ('price_per_item', self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=12, decimal_places=2)), ('free_shipping_threshold', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)), )) db.send_create_signal('shipping', ['OrderAndItemCharges']) # Adding model 'WeightBased' db.create_table('shipping_weightbased', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('code', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=128, db_index=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('upper_charge', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2)), )) db.send_create_signal('shipping', ['WeightBased']) # Adding model 'WeightBand' db.create_table('shipping_weightband', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('method', self.gf('django.db.models.fields.related.ForeignKey')(related_name='bands', to=orm['shipping.WeightBased'])), ('upper_limit', self.gf('django.db.models.fields.FloatField')()), ('charge', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)), )) db.send_create_signal('shipping', ['WeightBand']) def backwards(self, orm): # Deleting model 'OrderAndItemCharges' db.delete_table('shipping_orderanditemcharges') # Deleting model 'WeightBased' db.delete_table('shipping_weightbased') # Deleting model 'WeightBand' db.delete_table('shipping_weightband') models = { 'shipping.orderanditemcharges': { 'Meta': {'object_name': 'OrderAndItemCharges'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'free_shipping_threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'price_per_item': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}), 'price_per_order': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}) }, 'shipping.weightband': { 'Meta': {'ordering': "['upper_limit']", 'object_name': 'WeightBand'}, 'charge': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'method': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bands'", 'to': "orm['shipping.WeightBased']"}), 'upper_limit': ('django.db.models.fields.FloatField', [], {}) }, 'shipping.weightbased': { 'Meta': {'object_name': 'WeightBased'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'upper_charge': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}) } } complete_apps = ['shipping']
bsd-3-clause
AlanZatarain/python-astm
astm/codec.py
16
10528
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # from collections import Iterable from .compat import unicode from .constants import ( STX, ETX, ETB, CR, LF, CRLF, FIELD_SEP, COMPONENT_SEP, RECORD_SEP, REPEAT_SEP, ENCODING ) try: from itertools import izip_longest except ImportError: # Python 3 from itertools import zip_longest as izip_longest def decode(data, encoding=ENCODING): """Common ASTM decoding function that tries to guess which kind of data it handles. If `data` starts with STX character (``0x02``) than probably it is full ASTM message with checksum and other system characters. If `data` starts with digit character (``0-9``) than probably it is frame of records leading by his sequence number. No checksum is expected in this case. Otherwise it counts `data` as regular record structure. Note, that `data` should be bytes, not unicode string even if you know his `encoding`. :param data: ASTM data object. :type data: bytes :param encoding: Data encoding. :type encoding: str :return: List of ASTM records with unicode data. :rtype: list """ if not isinstance(data, bytes): raise TypeError('bytes expected, got %r' % data) if data.startswith(STX): # may be decode message \x02...\x03CS\r\n seq, records, cs = decode_message(data, encoding) return records byte = data[:1].decode() if byte.isdigit(): seq, records = decode_frame(data, encoding) return records return [decode_record(data, encoding)] def decode_message(message, encoding): """Decodes complete ASTM message that is sent or received due communication routines. It should contains checksum that would be additionally verified. :param message: ASTM message. :type message: bytes :param encoding: Data encoding. :type encoding: str :returns: Tuple of three elements: * :class:`int` frame sequence number. * :class:`list` of records with unicode data. * :class:`bytes` checksum. :raises: * :exc:`ValueError` if ASTM message is malformed. * :exc:`AssertionError` if checksum verification fails. """ if not isinstance(message, bytes): raise TypeError('bytes expected, got %r' % message) if not (message.startswith(STX) and message.endswith(CRLF)): raise ValueError('Malformed ASTM message. Expected that it will started' ' with %x and followed by %x%x characters. Got: %r' ' ' % (ord(STX), ord(CR), ord(LF), message)) stx, frame_cs = message[0], message[1:-2] frame, cs = frame_cs[:-2], frame_cs[-2:] ccs = make_checksum(frame) assert cs == ccs, 'Checksum failure: expected %r, calculated %r' % (cs, ccs) seq, records = decode_frame(frame, encoding) return seq, records, cs.decode() def decode_frame(frame, encoding): """Decodes ASTM frame: list of records followed by sequence number.""" if not isinstance(frame, bytes): raise TypeError('bytes expected, got %r' % frame) if frame.endswith(CR + ETX): frame = frame[:-2] elif frame.endswith(ETB): frame = frame[:-1] else: raise ValueError('Incomplete frame data %r.' ' Expected trailing <CR><ETX> or <ETB> chars' % frame) seq = frame[:1].decode() if not seq.isdigit(): raise ValueError('Malformed ASTM frame. Expected leading seq number %r' '' % frame) seq, records = int(seq), frame[1:] return seq, [decode_record(record, encoding) for record in records.split(RECORD_SEP)] def decode_record(record, encoding): """Decodes ASTM record message.""" fields = [] for item in record.split(FIELD_SEP): if REPEAT_SEP in item: item = decode_repeated_component(item, encoding) elif COMPONENT_SEP in item: item = decode_component(item, encoding) else: item = item.decode(encoding) fields.append([None, item][bool(item)]) return fields def decode_component(field, encoding): """Decodes ASTM field component.""" return [[None, item.decode(encoding)][bool(item)] for item in field.split(COMPONENT_SEP)] def decode_repeated_component(component, encoding): """Decodes ASTM field repeated component.""" return [decode_component(item, encoding) for item in component.split(REPEAT_SEP)] def encode(records, encoding=ENCODING, size=None, seq=1): """Encodes list of records into single ASTM message, also called as "packed" message. If you need to get each record as standalone message use :func:`iter_encode` instead. If the result message is too large (greater than specified `size` if it's not :const:`None`), than it will be split by chunks. :param records: List of ASTM records. :type records: list :param encoding: Data encoding. :type encoding: str :param size: Chunk size in bytes. :type size: int :param seq: Frame start sequence number. :type seq: int :return: List of ASTM message chunks. :rtype: list """ msg = encode_message(seq, records, encoding) if size is not None and len(msg) > size: return list(split(msg, size)) return [msg] def iter_encode(records, encoding=ENCODING, size=None, seq=1): """Encodes and emits each record as separate message. If the result message is too large (greater than specified `size` if it's not :const:`None`), than it will be split by chunks. :yields: ASTM message chunks. :rtype: str """ for record in records: msg = encode_message(seq, [record], encoding) if size is not None and len(msg) > size: for chunk in split(msg, size): seq += 1 yield chunk else: seq += 1 yield msg def encode_message(seq, records, encoding): """Encodes ASTM message. :param seq: Frame sequence number. :type seq: int :param records: List of ASTM records. :type records: list :param encoding: Data encoding. :type encoding: str :return: ASTM complete message with checksum and other control characters. :rtype: str """ data = RECORD_SEP.join(encode_record(record, encoding) for record in records) data = b''.join((str(seq % 8).encode(), data, CR, ETX)) return b''.join([STX, data, make_checksum(data), CR, LF]) def encode_record(record, encoding): """Encodes single ASTM record. :param record: ASTM record. Each :class:`str`-typed item counted as field value, one level nested :class:`list` counted as components and second leveled - as repeated components. :type record: list :param encoding: Data encoding. :type encoding: str :returns: Encoded ASTM record. :rtype: str """ fields = [] _append = fields.append for field in record: if isinstance(field, bytes): _append(field) elif isinstance(field, unicode): _append(field.encode(encoding)) elif isinstance(field, Iterable): _append(encode_component(field, encoding)) elif field is None: _append(b'') else: _append(unicode(field).encode(encoding)) return FIELD_SEP.join(fields) def encode_component(component, encoding): """Encodes ASTM record field components.""" items = [] _append = items.append for item in component: if isinstance(item, bytes): _append(item) elif isinstance(item, unicode): _append(item.encode(encoding)) elif isinstance(item, Iterable): return encode_repeated_component(component, encoding) elif item is None: _append(b'') else: _append(unicode(item).encode(encoding)) return COMPONENT_SEP.join(items).rstrip(COMPONENT_SEP) def encode_repeated_component(components, encoding): """Encodes repeated components.""" return REPEAT_SEP.join(encode_component(item, encoding) for item in components) def make_checksum(message): """Calculates checksum for specified message. :param message: ASTM message. :type message: bytes :returns: Checksum value that is actually byte sized integer in hex base :rtype: bytes """ if not isinstance(message[0], int): message = map(ord, message) return hex(sum(message) & 0xFF)[2:].upper().zfill(2).encode() def make_chunks(s, n): iter_bytes = (s[i:i + 1] for i in range(len(s))) return [b''.join(item) for item in izip_longest(*[iter_bytes] * n, fillvalue=b'')] def split(msg, size): """Split `msg` into chunks with specified `size`. Chunk `size` value couldn't be less then 7 since each chunk goes with at least 7 special characters: STX, frame number, ETX or ETB, checksum and message terminator. :param msg: ASTM message. :type msg: bytes :param size: Chunk size in bytes. :type size: int :yield: `bytes` """ stx, frame, msg, tail = msg[:1], msg[1:2], msg[2:-6], msg[-6:] assert stx == STX assert frame.isdigit() assert tail.endswith(CRLF) assert size is not None and size >= 7 frame = int(frame) chunks = make_chunks(msg, size - 7) chunks, last = chunks[:-1], chunks[-1] idx = 0 for idx, chunk in enumerate(chunks): item = b''.join([str((idx + frame) % 8).encode(), chunk, ETB]) yield b''.join([STX, item, make_checksum(item), CRLF]) item = b''.join([str((idx + frame + 1) % 8).encode(), last, CR, ETX]) yield b''.join([STX, item, make_checksum(item), CRLF]) def join(chunks): """Merges ASTM message `chunks` into single message. :param chunks: List of chunks as `bytes`. :type chunks: iterable """ msg = b'1' + b''.join(c[2:-5] for c in chunks) + ETX return b''.join([STX, msg, make_checksum(msg), CRLF]) def is_chunked_message(message): """Checks plain message for chunked byte.""" length = len(message) if len(message) < 5: return False if ETB not in message: return False return message.index(ETB) == length - 5
bsd-3-clause
saru95/DSA
Python/karatsuba.py
1
1362
#this is karatsuba's multiplication method of 2 n-digit integers # This is a divide and conquer algorithm implemented in Python __author__ = "Anirudh Swaminathan" import math def main(): x = int(raw_input("Enter 1st integer\n")) y = int(raw_input("Enter 2nd integer\n")) x = list(str(x)) y = list(str(y)) result = int(kmult(x,y)) print "The product is "+str(result) # Recursive function for computation of @param x, and @param y def kmult(x,y): n = len(x) if len(x)>len(y) else len(y) if n==1: return int(x[0])*int(y[0]) # Make the integers to multiplied equal length for _ in range(len(x),n): x.insert(0,'0') for _ in range(len(y),n): y.insert(0,'0') a = [] b = [] c = [] d = [] for i in range(n): if i<n/2: a.append(x[i]) c.append(y[i]) else: b.append(x[i]) d.append(y[i]) # Divide the input integers each into two halves ac = kmult(a,c) bd = kmult(b,d) a = int(''.join(a)) b = int(''.join(b)) c = int(''.join(c)) d = int(''.join(d)) apb = list(str(a+b)) cpd = list(str(c+d)) sums = kmult(apb,cpd) if n%2 != 0: p = n+1 else: p = n return (10**p)*ac + bd + (10**math.ceil(n/2.0))*(sums - ac -bd) if __name__ == '__main__': main()
mit
Kitware/geojs
scripts/make_thumbnails.py
2
6480
#!/usr/bin/env python # NOTE: If this doesn't work, it may be related to a policy in # /etc/ImageMagick-6/policy.xml # Specifically, disable # <policy domain="coder" rights="none" pattern="PS" /> # by removing it or commenting it out. import json import os import psutil import signal import six import subprocess import sys import time OriginalSize = (1200, 900) ExtraSpace = 1 # 1, otherwise we get a black border on the bottom and right NavbarHeight = 60 FinalSize = (800, 600) InitialDelay = 15 # in seconds MaxDelay = 30 # in seconds Quality = 90 OutputFile = 'thumb.jpg' InputList = ["examples", "tutorials"] BrowserCommand = [ 'xvfb-run', '-s', '-ac -screen 0 %dx%dx24' % ( OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace + NavbarHeight), 'google-chrome', '--kiosk', '--no-pings', '--device-scale-factor=1', '--incognito', '--start-fullscreen', '--no-default-browser-check', '--user-data-dir=/tmp/chrome_geojs_thumbnails', '--no-first-run', '--disable-default-apps', '--disable-popup-blocking', '--disable-translate', '--disable-background-timer-throttling', '--disable-renderer-backgrounding', '--disable-device-discovery-notifications', '--window-position=0,0', ] BrowserCommandSize = [ '--window-size=%d,%d' % (OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace), ] BrowserCommandSizeIgnoreNavbar = [ '--window-size=%d,%d' % ( OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace + NavbarHeight), ] BrowserUrl = 'http://127.0.0.1:30100/%s' ImageCommand = ( 'DISPLAY=:99.0 import -window root -crop %dx%d+0+0 +repage - | ' 'convert - -resize %dx%d -quality %d ' % ( OriginalSize[0], OriginalSize[1], FinalSize[0], FinalSize[1], Quality)) ImageCommandIgnoreNavbar = ( 'DISPLAY=:99.0 import -window root -crop %dx%d+0+%d +repage - | ' 'convert - -resize %dx%d -quality %d ' % ( OriginalSize[0], OriginalSize[1], NavbarHeight, FinalSize[0], FinalSize[1], Quality)) def process_item(path, opts): output = (open('/tmp/thumbnail.out', 'ab') if opts.get('verbose', 0) >= 1 else open(os.devnull, 'w')) data = json.load(open(path)) if data.get('disabled') and not opts.get('all'): return dest = os.path.join(os.path.dirname(path), OutputFile) if os.path.exists(dest) and not opts.get('force'): return originalSize = 0 if os.path.exists(dest): originalSize = os.path.getsize(dest) sys.stdout.write('\r%s %d' % (path, originalSize)) sys.stdout.flush() if opts.get('simulate'): dest = os.path.join('/tmp', os.path.basename(os.path.dirname( os.path.dirname(path))) + '_' + os.path.basename(os.path.dirname( path)) + '_' + OutputFile) if os.path.exists(dest): os.unlink(dest) cmd = list(BrowserCommand) imgcmd = ImageCommand if 'example.json' in path and not data.get('hideNavbar'): cmd.extend(BrowserCommandSizeIgnoreNavbar) imgcmd = ImageCommandIgnoreNavbar else: cmd.extend(BrowserCommandSize) url = BrowserUrl % os.path.dirname(path) if data.get('thumbquery'): url += '?' + data['thumbquery'] cmd.append(url) output.write('--> %r\n' % (cmd, )) output.write(' %s\n' % (' '.join([six.moves.shlex_quote(arg) for arg in cmd]))) proc = subprocess.Popen(cmd, shell=False, stdout=output, stderr=output) delay = opts.get('delay', InitialDelay) startTime = time.time() time.sleep(delay) lastSize = 0 while True: output.write('--> %r\n' % (imgcmd + six.moves.shlex_quote(dest), )) subprocess.Popen( imgcmd + six.moves.shlex_quote(dest), shell=True, stdout=output, stderr=output).wait() newSize = os.path.getsize(dest) if newSize and newSize == lastSize: break if time.time() - startTime > opts.get('maxdelay', MaxDelay): break lastSize = newSize sys.stdout.write('\r%s %d %d ' % (path, originalSize, newSize)) sys.stdout.flush() time.sleep(0.5) for child in psutil.Process(proc.pid).children(recursive=True): try: child.send_signal(signal.SIGINT) except psutil.NoSuchProcess: pass os.kill(proc.pid, signal.SIGINT) proc.wait() sys.stdout.write('\n') if __name__ == '__main__': # noqa opts = {'force': False, 'verbose': 0} for arg in sys.argv[1:]: if arg in ('-a', '--all'): opts['all'] = True elif arg.startswith('--delay='): opts['delay'] = float(arg.split('=', 1)[1]) elif arg == '--force': opts['force'] = True elif arg.startswith('--maxdelay='): opts['maxdelay'] = float(arg.split('=', 1)[1]) elif arg.startswith('--only='): opts['only'] = arg.split('=', 1)[1] elif arg in ('-s', '--simulate'): opts['simulate'] = True elif arg in ('-v', '--verbose'): opts['verbose'] += 1 else: opts['help'] = True if opts.get('help'): print(""" Regenerate thumbnails for examples and tutorials. Syntax: make_thumbnails.py --force --simulate --only=(substr) --all --delay=(seconds) --maxdelay=(seconds) Run in the root geojs directory. --all or -a generates thumbnails for disabled examples, too. --delay is the duration after the web browser is started before a thumbnail snapshot might be taken. The thumbnail is only taken after the webpage hasn't changed for a short duration. --force regenerates all thumbnails. Otherwise, only missing thumbnails are created. --maxdelay is the longest to wait before taking the snapshot. This will happen even if the webpage is still changing. --only will only process examples or tutorials whose name contains the specified substring. --simulate or -s determines the size of thumbnails that would be created but doesn't make them. """) sys.exit(0) for inputdir in InputList: for root, dirs, files in os.walk(inputdir): dirs.sort() for dir in dirs: for name in ['example.json', 'tutorial.json']: path = os.path.join(root, dir, name) if opts.get('only') and not opts['only'] in path: continue if os.path.exists(path): process_item(path, opts)
apache-2.0
as110/as110.github.io
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/svg.py
362
5867
# -*- coding: utf-8 -*- """ pygments.formatters.svg ~~~~~~~~~~~~~~~~~~~~~~~ Formatter for SVG output. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt, get_int_opt __all__ = ['SvgFormatter'] def escape_html(text): """Escape &, <, > as well as single and double quotes for HTML.""" return text.replace('&', '&amp;'). \ replace('<', '&lt;'). \ replace('>', '&gt;'). \ replace('"', '&quot;'). \ replace("'", '&#39;') class2style = {} class SvgFormatter(Formatter): """ Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles. By default, this formatter outputs a full SVG document including doctype declaration and the ``<svg>`` root element. *New in Pygments 0.9.* Additional options accepted: `nowrap` Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and don't add a XML declaration and a doctype. If true, the `fontfamily` and `fontsize` options are ignored. Defaults to ``False``. `fontfamily` The value to give the wrapping ``<g>`` element's ``font-family`` attribute, defaults to ``"monospace"``. `fontsize` The value to give the wrapping ``<g>`` element's ``font-size`` attribute, defaults to ``"14px"``. `xoffset` Starting offset in X direction, defaults to ``0``. `yoffset` Starting offset in Y direction, defaults to the font size if it is given in pixels, or ``20`` else. (This is necessary since text coordinates refer to the text baseline, not the top edge.) `ystep` Offset to add to the Y coordinate for each subsequent line. This should roughly be the text size plus 5. It defaults to that value if the text size is given in pixels, or ``25`` else. `spacehack` Convert spaces in the source to ``&#160;``, which are non-breaking spaces. SVG provides the ``xml:space`` attribute to control how whitespace inside tags is handled, in theory, the ``preserve`` value could be used to keep all whitespace as-is. However, many current SVG viewers don't obey that rule, so this option is provided as a workaround and defaults to ``True``. """ name = 'SVG' aliases = ['svg'] filenames = ['*.svg'] def __init__(self, **options): # XXX outencoding Formatter.__init__(self, **options) self.nowrap = get_bool_opt(options, 'nowrap', False) self.fontfamily = options.get('fontfamily', 'monospace') self.fontsize = options.get('fontsize', '14px') self.xoffset = get_int_opt(options, 'xoffset', 0) fs = self.fontsize.strip() if fs.endswith('px'): fs = fs[:-2].strip() try: int_fs = int(fs) except: int_fs = 20 self.yoffset = get_int_opt(options, 'yoffset', int_fs) self.ystep = get_int_opt(options, 'ystep', int_fs + 5) self.spacehack = get_bool_opt(options, 'spacehack', True) self._stylecache = {} def format_unencoded(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'. """ x = self.xoffset y = self.yoffset if not self.nowrap: if self.encoding: outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding) else: outfile.write('<?xml version="1.0"?>\n') outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' 'svg10.dtd">\n') outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize)) outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y)) for ttype, value in tokensource: style = self._get_style(ttype) tspan = style and '<tspan' + style + '>' or '' tspanend = tspan and '</tspan>' or '' value = escape_html(value) if self.spacehack: value = value.expandtabs().replace(' ', '&#160;') parts = value.split('\n') for part in parts[:-1]: outfile.write(tspan + part + tspanend) y += self.ystep outfile.write('</text>\n<text x="%s" y="%s" ' 'xml:space="preserve">' % (x, y)) outfile.write(tspan + parts[-1] + tspanend) outfile.write('</text>') if not self.nowrap: outfile.write('</g></svg>\n') def _get_style(self, tokentype): if tokentype in self._stylecache: return self._stylecache[tokentype] otokentype = tokentype while not self.style.styles_token(tokentype): tokentype = tokentype.parent value = self.style.style_for_token(tokentype) result = '' if value['color']: result = ' fill="#' + value['color'] + '"' if value['bold']: result += ' font-weight="bold"' if value['italic']: result += ' font-style="italic"' self._stylecache[otokentype] = result return result
mit
jeffrey4l/nova
nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py
59
11228
# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from oslo_serialization import jsonutils import webob from nova.compute import api as compute_api from nova import db from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes FAKE_UUID = fakes.FAKE_UUID FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'), ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')] DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')] INVALID_NETWORKS = [('invalid', 'invalid-ip-address')] def return_security_group_non_existing(context, project_id, group_name): raise exception.SecurityGroupNotFoundForProject(project_id=project_id, security_group_id=group_name) def return_security_group_get_by_name(context, project_id, group_name): return {'id': 1, 'name': group_name} def return_security_group_get(context, security_group_id, session): return {'id': security_group_id} def return_instance_add_security_group(context, instance_id, security_group_id): pass class CreateserverextTest(test.TestCase): def setUp(self): super(CreateserverextTest, self).setUp() self.security_group = None self.injected_files = None self.networks = None self.user_data = None def create(*args, **kwargs): if 'security_group' in kwargs: self.security_group = kwargs['security_group'] else: self.security_group = None if 'injected_files' in kwargs: self.injected_files = kwargs['injected_files'] else: self.injected_files = None if 'requested_networks' in kwargs: self.networks = kwargs['requested_networks'] else: self.networks = None if 'user_data' in kwargs: self.user_data = kwargs['user_data'] resv_id = None return ([{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'user_id': 'fake', 'project_id': 'fake', 'created_at': "", 'updated_at': "", 'fixed_ips': [], 'progress': 0}], resv_id) self.stubs.Set(compute_api.API, 'create', create) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Createserverext', 'User_data', 'Security_groups', 'Os_networks']) def _create_security_group_request_dict(self, security_groups): server = {} server['name'] = 'new-server-test' server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175' server['flavorRef'] = 1 if security_groups is not None: sg_list = [] for name in security_groups: sg_list.append({'name': name}) server['security_groups'] = sg_list return {'server': server} def _create_networks_request_dict(self, networks): server = {} server['name'] = 'new-server-test' server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175' server['flavorRef'] = 1 if networks is not None: network_list = [] for uuid, fixed_ip in networks: network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip}) server['networks'] = network_list return {'server': server} def _create_user_data_request_dict(self, user_data): server = {} server['name'] = 'new-server-test' server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175' server['flavorRef'] = 1 server['user_data'] = user_data return {'server': server} def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v2/fake/os-create-server-ext') req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = jsonutils.dumps(body_dict) return req def _create_instance_with_networks_json(self, networks): body_dict = self._create_networks_request_dict(networks) request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app( init_only=('servers', 'os-create-server-ext'))) return request, response, self.networks def _create_instance_with_user_data_json(self, networks): body_dict = self._create_user_data_request_dict(networks) request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app( init_only=('servers', 'os-create-server-ext'))) return request, response, self.user_data def test_create_instance_with_no_networks(self): _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(networks=None) self.assertEqual(response.status_int, 202) self.assertIsNone(networks) def test_create_instance_with_one_network(self): _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst([FAKE_NETWORKS[0]]) self.assertEqual(response.status_int, 202) self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples()) def test_create_instance_with_two_networks(self): _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(FAKE_NETWORKS) self.assertEqual(response.status_int, 202) self.assertEqual(FAKE_NETWORKS, networks.as_tuples()) def test_create_instance_with_duplicate_networks(self): _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(DUPLICATE_NETWORKS) self.assertEqual(response.status_int, 400) self.assertIsNone(networks) def test_create_instance_with_network_no_id(self): body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]]) del body_dict['server']['networks'][0]['uuid'] request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app( init_only=('servers', 'os-create-server-ext'))) self.assertEqual(response.status_int, 400) self.assertIsNone(self.networks) def test_create_instance_with_network_invalid_id(self): _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(INVALID_NETWORKS) self.assertEqual(response.status_int, 400) self.assertIsNone(networks) def test_create_instance_with_network_empty_fixed_ip(self): networks = [('1', '')] _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(networks) self.assertEqual(response.status_int, 400) self.assertIsNone(networks) def test_create_instance_with_network_non_string_fixed_ip(self): networks = [('1', 12345)] _create_inst = self._create_instance_with_networks_json request, response, networks = _create_inst(networks) self.assertEqual(response.status_int, 400) self.assertIsNone(networks) def test_create_instance_with_network_no_fixed_ip(self): body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]]) del body_dict['server']['networks'][0]['fixed_ip'] request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app( init_only=('servers', 'os-create-server-ext'))) self.assertEqual(response.status_int, 202) self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)], self.networks.as_tuples()) def test_create_instance_with_userdata(self): user_data_contents = '#!/bin/bash\necho "Oh no!"\n' user_data_contents = base64.b64encode(user_data_contents) _create_inst = self._create_instance_with_user_data_json request, response, user_data = _create_inst(user_data_contents) self.assertEqual(response.status_int, 202) self.assertEqual(user_data, user_data_contents) def test_create_instance_with_userdata_none(self): user_data_contents = None _create_inst = self._create_instance_with_user_data_json request, response, user_data = _create_inst(user_data_contents) self.assertEqual(response.status_int, 202) self.assertEqual(user_data, user_data_contents) def test_create_instance_with_userdata_with_non_b64_content(self): user_data_contents = '#!/bin/bash\necho "Oh no!"\n' _create_inst = self._create_instance_with_user_data_json request, response, user_data = _create_inst(user_data_contents) self.assertEqual(response.status_int, 400) self.assertIsNone(user_data) def test_create_instance_with_security_group_json(self): security_groups = ['test', 'test1'] self.stubs.Set(db, 'security_group_get_by_name', return_security_group_get_by_name) self.stubs.Set(db, 'instance_add_security_group', return_instance_add_security_group) body_dict = self._create_security_group_request_dict(security_groups) request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app( init_only=('servers', 'os-create-server-ext'))) self.assertEqual(response.status_int, 202) self.assertJsonEqual(self.security_group, security_groups) def test_get_server_by_id_verify_security_groups_json(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get()) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) req = webob.Request.blank('/v2/fake/os-create-server-ext/1') req.headers['Content-Type'] = 'application/json' response = req.get_response(fakes.wsgi_app( init_only=('os-create-server-ext', 'servers'))) self.assertEqual(response.status_int, 200) res_dict = jsonutils.loads(response.body) expected_security_group = [{"name": "test"}] self.assertEqual(res_dict['server'].get('security_groups'), expected_security_group)
apache-2.0
tfar/RIOT
dist/tools/testrunner/testrunner.py
26
1168
#!/usr/bin/env python3 # Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de> # 2014 Martine Lenders <mlenders@inf.fu-berlin.de> # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import os, signal, sys, subprocess from pexpect import spawnu, TIMEOUT, EOF from traceback import print_tb def run(testfunc, timeout=10, echo=True, traceback=False): env = os.environ.copy() child = spawnu("make term", env=env, timeout=timeout) if echo: child.logfile = sys.stdout try: subprocess.check_output(('make', 'reset'), env=env, stderr=subprocess.PIPE) except subprocess.CalledProcessError: # make reset yields error on some boards even if successful pass try: testfunc(child) except TIMEOUT: print("Timeout in expect script") if traceback: print_tb(sys.exc_info()[2]) return 1 finally: print("") os.killpg(os.getpgid(child.pid), signal.SIGKILL) child.close() return 0
lgpl-2.1
samuelhavron/heroku-buildpack-python
Python-3.4.3/Lib/test/test_bool.py
19
12031
# Test properties of bool promised by PEP 285 import unittest from test import support import os class BoolTest(unittest.TestCase): def test_subclass(self): try: class C(bool): pass except TypeError: pass else: self.fail("bool should not be subclassable") self.assertRaises(TypeError, int.__new__, bool, 0) def test_print(self): try: fo = open(support.TESTFN, "w") print(False, True, file=fo) fo.close() fo = open(support.TESTFN, "r") self.assertEqual(fo.read(), 'False True\n') finally: fo.close() os.remove(support.TESTFN) def test_repr(self): self.assertEqual(repr(False), 'False') self.assertEqual(repr(True), 'True') self.assertEqual(eval(repr(False)), False) self.assertEqual(eval(repr(True)), True) def test_str(self): self.assertEqual(str(False), 'False') self.assertEqual(str(True), 'True') def test_int(self): self.assertEqual(int(False), 0) self.assertIsNot(int(False), False) self.assertEqual(int(True), 1) self.assertIsNot(int(True), True) def test_float(self): self.assertEqual(float(False), 0.0) self.assertIsNot(float(False), False) self.assertEqual(float(True), 1.0) self.assertIsNot(float(True), True) def test_math(self): self.assertEqual(+False, 0) self.assertIsNot(+False, False) self.assertEqual(-False, 0) self.assertIsNot(-False, False) self.assertEqual(abs(False), 0) self.assertIsNot(abs(False), False) self.assertEqual(+True, 1) self.assertIsNot(+True, True) self.assertEqual(-True, -1) self.assertEqual(abs(True), 1) self.assertIsNot(abs(True), True) self.assertEqual(~False, -1) self.assertEqual(~True, -2) self.assertEqual(False+2, 2) self.assertEqual(True+2, 3) self.assertEqual(2+False, 2) self.assertEqual(2+True, 3) self.assertEqual(False+False, 0) self.assertIsNot(False+False, False) self.assertEqual(False+True, 1) self.assertIsNot(False+True, True) self.assertEqual(True+False, 1) self.assertIsNot(True+False, True) self.assertEqual(True+True, 2) self.assertEqual(True-True, 0) self.assertIsNot(True-True, False) self.assertEqual(False-False, 0) self.assertIsNot(False-False, False) self.assertEqual(True-False, 1) self.assertIsNot(True-False, True) self.assertEqual(False-True, -1) self.assertEqual(True*1, 1) self.assertEqual(False*1, 0) self.assertIsNot(False*1, False) self.assertEqual(True/1, 1) self.assertIsNot(True/1, True) self.assertEqual(False/1, 0) self.assertIsNot(False/1, False) for b in False, True: for i in 0, 1, 2: self.assertEqual(b**i, int(b)**i) self.assertIsNot(b**i, bool(int(b)**i)) for a in False, True: for b in False, True: self.assertIs(a&b, bool(int(a)&int(b))) self.assertIs(a|b, bool(int(a)|int(b))) self.assertIs(a^b, bool(int(a)^int(b))) self.assertEqual(a&int(b), int(a)&int(b)) self.assertIsNot(a&int(b), bool(int(a)&int(b))) self.assertEqual(a|int(b), int(a)|int(b)) self.assertIsNot(a|int(b), bool(int(a)|int(b))) self.assertEqual(a^int(b), int(a)^int(b)) self.assertIsNot(a^int(b), bool(int(a)^int(b))) self.assertEqual(int(a)&b, int(a)&int(b)) self.assertIsNot(int(a)&b, bool(int(a)&int(b))) self.assertEqual(int(a)|b, int(a)|int(b)) self.assertIsNot(int(a)|b, bool(int(a)|int(b))) self.assertEqual(int(a)^b, int(a)^int(b)) self.assertIsNot(int(a)^b, bool(int(a)^int(b))) self.assertIs(1==1, True) self.assertIs(1==0, False) self.assertIs(0<1, True) self.assertIs(1<0, False) self.assertIs(0<=0, True) self.assertIs(1<=0, False) self.assertIs(1>0, True) self.assertIs(1>1, False) self.assertIs(1>=1, True) self.assertIs(0>=1, False) self.assertIs(0!=1, True) self.assertIs(0!=0, False) x = [1] self.assertIs(x is x, True) self.assertIs(x is not x, False) self.assertIs(1 in x, True) self.assertIs(0 in x, False) self.assertIs(1 not in x, False) self.assertIs(0 not in x, True) x = {1: 2} self.assertIs(x is x, True) self.assertIs(x is not x, False) self.assertIs(1 in x, True) self.assertIs(0 in x, False) self.assertIs(1 not in x, False) self.assertIs(0 not in x, True) self.assertIs(not True, False) self.assertIs(not False, True) def test_convert(self): self.assertRaises(TypeError, bool, 42, 42) self.assertIs(bool(10), True) self.assertIs(bool(1), True) self.assertIs(bool(-1), True) self.assertIs(bool(0), False) self.assertIs(bool("hello"), True) self.assertIs(bool(""), False) self.assertIs(bool(), False) def test_format(self): self.assertEqual("%d" % False, "0") self.assertEqual("%d" % True, "1") self.assertEqual("%x" % False, "0") self.assertEqual("%x" % True, "1") def test_hasattr(self): self.assertIs(hasattr([], "append"), True) self.assertIs(hasattr([], "wobble"), False) def test_callable(self): self.assertIs(callable(len), True) self.assertIs(callable(1), False) def test_isinstance(self): self.assertIs(isinstance(True, bool), True) self.assertIs(isinstance(False, bool), True) self.assertIs(isinstance(True, int), True) self.assertIs(isinstance(False, int), True) self.assertIs(isinstance(1, bool), False) self.assertIs(isinstance(0, bool), False) def test_issubclass(self): self.assertIs(issubclass(bool, int), True) self.assertIs(issubclass(int, bool), False) def test_contains(self): self.assertIs(1 in {}, False) self.assertIs(1 in {1:1}, True) def test_string(self): self.assertIs("xyz".endswith("z"), True) self.assertIs("xyz".endswith("x"), False) self.assertIs("xyz0123".isalnum(), True) self.assertIs("@#$%".isalnum(), False) self.assertIs("xyz".isalpha(), True) self.assertIs("@#$%".isalpha(), False) self.assertIs("0123".isdigit(), True) self.assertIs("xyz".isdigit(), False) self.assertIs("xyz".islower(), True) self.assertIs("XYZ".islower(), False) self.assertIs("0123".isdecimal(), True) self.assertIs("xyz".isdecimal(), False) self.assertIs("0123".isnumeric(), True) self.assertIs("xyz".isnumeric(), False) self.assertIs(" ".isspace(), True) self.assertIs("\xa0".isspace(), True) self.assertIs("\u3000".isspace(), True) self.assertIs("XYZ".isspace(), False) self.assertIs("X".istitle(), True) self.assertIs("x".istitle(), False) self.assertIs("XYZ".isupper(), True) self.assertIs("xyz".isupper(), False) self.assertIs("xyz".startswith("x"), True) self.assertIs("xyz".startswith("z"), False) def test_boolean(self): self.assertEqual(True & 1, 1) self.assertNotIsInstance(True & 1, bool) self.assertIs(True & True, True) self.assertEqual(True | 1, 1) self.assertNotIsInstance(True | 1, bool) self.assertIs(True | True, True) self.assertEqual(True ^ 1, 0) self.assertNotIsInstance(True ^ 1, bool) self.assertIs(True ^ True, False) def test_fileclosed(self): try: f = open(support.TESTFN, "w") self.assertIs(f.closed, False) f.close() self.assertIs(f.closed, True) finally: os.remove(support.TESTFN) def test_types(self): # types are always true. for t in [bool, complex, dict, float, int, list, object, set, str, tuple, type]: self.assertIs(bool(t), True) def test_operator(self): import operator self.assertIs(operator.truth(0), False) self.assertIs(operator.truth(1), True) self.assertIs(operator.not_(1), False) self.assertIs(operator.not_(0), True) self.assertIs(operator.contains([], 1), False) self.assertIs(operator.contains([1], 1), True) self.assertIs(operator.lt(0, 0), False) self.assertIs(operator.lt(0, 1), True) self.assertIs(operator.is_(True, True), True) self.assertIs(operator.is_(True, False), False) self.assertIs(operator.is_not(True, True), False) self.assertIs(operator.is_not(True, False), True) def test_marshal(self): import marshal self.assertIs(marshal.loads(marshal.dumps(True)), True) self.assertIs(marshal.loads(marshal.dumps(False)), False) def test_pickle(self): import pickle for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.assertIs(pickle.loads(pickle.dumps(True, proto)), True) self.assertIs(pickle.loads(pickle.dumps(False, proto)), False) def test_picklevalues(self): # Test for specific backwards-compatible pickle values import pickle self.assertEqual(pickle.dumps(True, protocol=0), b"I01\n.") self.assertEqual(pickle.dumps(False, protocol=0), b"I00\n.") self.assertEqual(pickle.dumps(True, protocol=1), b"I01\n.") self.assertEqual(pickle.dumps(False, protocol=1), b"I00\n.") self.assertEqual(pickle.dumps(True, protocol=2), b'\x80\x02\x88.') self.assertEqual(pickle.dumps(False, protocol=2), b'\x80\x02\x89.') def test_convert_to_bool(self): # Verify that TypeError occurs when bad things are returned # from __bool__(). This isn't really a bool test, but # it's related. check = lambda o: self.assertRaises(TypeError, bool, o) class Foo(object): def __bool__(self): return self check(Foo()) class Bar(object): def __bool__(self): return "Yes" check(Bar()) class Baz(int): def __bool__(self): return self check(Baz()) # __bool__() must return a bool not an int class Spam(int): def __bool__(self): return 1 check(Spam()) class Eggs: def __len__(self): return -1 self.assertRaises(ValueError, bool, Eggs()) def test_sane_len(self): # this test just tests our assumptions about __len__ # this will start failing if __len__ changes assertions for badval in ['illegal', -1, 1 << 32]: class A: def __len__(self): return badval try: bool(A()) except (Exception) as e_bool: try: len(A()) except (Exception) as e_len: self.assertEqual(str(e_bool), str(e_len)) def test_real_and_imag(self): self.assertEqual(True.real, 1) self.assertEqual(True.imag, 0) self.assertIs(type(True.real), int) self.assertIs(type(True.imag), int) self.assertEqual(False.real, 0) self.assertEqual(False.imag, 0) self.assertIs(type(False.real), int) self.assertIs(type(False.imag), int) def test_main(): support.run_unittest(BoolTest) if __name__ == "__main__": test_main()
mit
meletakis/collato
lib/python2.7/site-packages/django/db/models/sql/where.py
99
14563
""" Code to manage the creation and SQL rendering of 'where' constraints. """ from __future__ import absolute_import import datetime from itertools import repeat from django.utils import tree from django.db.models.fields import Field from django.db.models.sql.datastructures import EmptyResultSet from django.db.models.sql.aggregates import Aggregate from django.utils.itercompat import is_iterator from django.utils.six.moves import xrange # Connection types AND = 'AND' OR = 'OR' class EmptyShortCircuit(Exception): """ Internal exception used to indicate that a "matches nothing" node should be added to the where-clause. """ pass class WhereNode(tree.Node): """ Used to represent the SQL where-clause. The class is tied to the Query class that created it (in order to create the correct SQL). The children in this tree are usually either Q-like objects or lists of [table_alias, field_name, db_type, lookup_type, value_annotation, params]. However, a child could also be any class with as_sql() and relabel_aliases() methods. """ default = AND def add(self, data, connector): """ Add a node to the where-tree. If the data is a list or tuple, it is expected to be of the form (obj, lookup_type, value), where obj is a Constraint object, and is then slightly munged before being stored (to avoid storing any reference to field objects). Otherwise, the 'data' is stored unchanged and can be any class with an 'as_sql()' method. """ if not isinstance(data, (list, tuple)): super(WhereNode, self).add(data, connector) return obj, lookup_type, value = data if is_iterator(value): # Consume any generators immediately, so that we can determine # emptiness and transform any non-empty values correctly. value = list(value) # The "value_annotation" parameter is used to pass auxilliary information # about the value(s) to the query construction. Specifically, datetime # and empty values need special handling. Other types could be used # here in the future (using Python types is suggested for consistency). if isinstance(value, datetime.datetime): value_annotation = datetime.datetime elif hasattr(value, 'value_annotation'): value_annotation = value.value_annotation else: value_annotation = bool(value) if hasattr(obj, "prepare"): value = obj.prepare(lookup_type, value) super(WhereNode, self).add( (obj, lookup_type, value_annotation, value), connector) def as_sql(self, qn, connection): """ Returns the SQL version of the where clause and the value to be substituted in. Returns '', [] if this node matches everything, None, [] if this node is empty, and raises EmptyResultSet if this node can't match anything. """ # Note that the logic here is made slightly more complex than # necessary because there are two kind of empty nodes: Nodes # containing 0 children, and nodes that are known to match everything. # A match-everything node is different than empty node (which also # technically matches everything) for backwards compatibility reasons. # Refs #5261. result = [] result_params = [] everything_childs, nothing_childs = 0, 0 non_empty_childs = len(self.children) for child in self.children: try: if hasattr(child, 'as_sql'): sql, params = child.as_sql(qn=qn, connection=connection) else: # A leaf node in the tree. sql, params = self.make_atom(child, qn, connection) except EmptyResultSet: nothing_childs += 1 else: if sql: result.append(sql) result_params.extend(params) else: if sql is None: # Skip empty childs totally. non_empty_childs -= 1 continue everything_childs += 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. if self.connector == AND: full_needed, empty_needed = non_empty_childs, 1 else: full_needed, empty_needed = 1, non_empty_childs # Now, check if this node is full/empty using the # counts. if empty_needed - nothing_childs <= 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed - everything_childs <= 0: if self.negated: raise EmptyResultSet else: return '', [] if non_empty_childs == 0: # All the child nodes were empty, so this one is empty, too. return None, [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1: sql_string = '(%s)' % sql_string return sql_string, result_params def make_atom(self, child, qn, connection): """ Turn a tuple (Constraint(table_alias, column_name, db_type), lookup_type, value_annotation, params) into valid SQL. The first item of the tuple may also be an Aggregate. Returns the string for the SQL fragment and the parameters to use for it. """ lvalue, lookup_type, value_annotation, params_or_value = child if isinstance(lvalue, Constraint): try: lvalue, params = lvalue.process(lookup_type, params_or_value, connection) except EmptyShortCircuit: raise EmptyResultSet elif isinstance(lvalue, Aggregate): params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection) else: raise TypeError("'make_atom' expects a Constraint or an Aggregate " "as the first item of its 'child' argument.") if isinstance(lvalue, tuple): # A direct database column lookup. field_sql = self.sql_for_columns(lvalue, qn, connection) else: # A smart object with an as_sql() method. field_sql = lvalue.as_sql(qn, connection) if value_annotation is datetime.datetime: cast_sql = connection.ops.datetime_cast_sql() else: cast_sql = '%s' if hasattr(params, 'as_sql'): extra, params = params.as_sql(qn, connection) cast_sql = '' else: extra = '' if (len(params) == 1 and params[0] == '' and lookup_type == 'exact' and connection.features.interprets_empty_strings_as_nulls): lookup_type = 'isnull' value_annotation = True if lookup_type in connection.operators: format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),) return (format % (field_sql, connection.operators[lookup_type] % cast_sql, extra), params) if lookup_type == 'in': if not value_annotation: raise EmptyResultSet if extra: return ('%s IN %s' % (field_sql, extra), params) max_in_list_size = connection.ops.max_in_list_size() if max_in_list_size and len(params) > max_in_list_size: # Break up the params list into an OR of manageable chunks. in_clause_elements = ['('] for offset in xrange(0, len(params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % field_sql) group_size = min(len(params) - offset, max_in_list_size) param_group = ', '.join(repeat('%s', group_size)) in_clause_elements.append(param_group) in_clause_elements.append(')') in_clause_elements.append(')') return ''.join(in_clause_elements), params else: return ('%s IN (%s)' % (field_sql, ', '.join(repeat('%s', len(params)))), params) elif lookup_type in ('range', 'year'): return ('%s BETWEEN %%s and %%s' % field_sql, params) elif lookup_type in ('month', 'day', 'week_day'): return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type, field_sql), params) elif lookup_type == 'isnull': return ('%s IS %sNULL' % (field_sql, (not value_annotation and 'NOT ' or '')), ()) elif lookup_type == 'search': return (connection.ops.fulltext_search_sql(field_sql), params) elif lookup_type in ('regex', 'iregex'): return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params raise TypeError('Invalid lookup_type: %r' % lookup_type) def sql_for_columns(self, data, qn, connection): """ Returns the SQL fragment used for the left-hand side of a column constraint (for example, the "T1.foo" portion in the clause "WHERE ... T1.foo = 6"). """ table_alias, name, db_type = data if table_alias: lhs = '%s.%s' % (qn(table_alias), qn(name)) else: lhs = qn(name) return connection.ops.field_cast_sql(db_type) % lhs def relabel_aliases(self, change_map, node=None): """ Relabels the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ if not node: node = self for pos, child in enumerate(node.children): if hasattr(child, 'relabel_aliases'): child.relabel_aliases(change_map) elif isinstance(child, tree.Node): self.relabel_aliases(change_map, child) elif isinstance(child, (list, tuple)): if isinstance(child[0], (list, tuple)): elt = list(child[0]) if elt[0] in change_map: elt[0] = change_map[elt[0]] node.children[pos] = (tuple(elt),) + child[1:] else: child[0].relabel_aliases(change_map) # Check if the query value also requires relabelling if hasattr(child[3], 'relabel_aliases'): child[3].relabel_aliases(change_map) class EverythingNode(object): """ A node that matches everything. """ def as_sql(self, qn=None, connection=None): return '', [] def relabel_aliases(self, change_map, node=None): return class NothingNode(object): """ A node that matches nothing. """ def as_sql(self, qn=None, connection=None): raise EmptyResultSet def relabel_aliases(self, change_map, node=None): return class ExtraWhere(object): def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, qn=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), tuple(self.params or ()) class Constraint(object): """ An object that can be passed to WhereNode.add() and knows how to pre-process itself prior to including in the WhereNode. """ def __init__(self, alias, col, field): self.alias, self.col, self.field = alias, col, field def __getstate__(self): """Save the state of the Constraint for pickling. Fields aren't necessarily pickleable, because they can have callable default values. So, instead of pickling the field store a reference so we can restore it manually """ obj_dict = self.__dict__.copy() if self.field: obj_dict['model'] = self.field.model obj_dict['field_name'] = self.field.name del obj_dict['field'] return obj_dict def __setstate__(self, data): """Restore the constraint """ model = data.pop('model', None) field_name = data.pop('field_name', None) self.__dict__.update(data) if model is not None: self.field = model._meta.get_field(field_name) else: self.field = None def prepare(self, lookup_type, value): if self.field: return self.field.get_prep_lookup(lookup_type, value) return value def process(self, lookup_type, value, connection): """ Returns a tuple of data suitable for inclusion in a WhereNode instance. """ # Because of circular imports, we need to import this here. from django.db.models.base import ObjectDoesNotExist try: if self.field: params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = self.field.db_type(connection=connection) else: # This branch is used at times when we add a comparison to NULL # (we don't really want to waste time looking up the associated # field object at the calling location). params = Field().get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = None except ObjectDoesNotExist: raise EmptyShortCircuit return (self.alias, self.col, db_type), params def relabel_aliases(self, change_map): if self.alias in change_map: self.alias = change_map[self.alias]
gpl-2.0
MaryanMorel/faker
faker/providers/address/sl_SI/__init__.py
15
34150
# coding=utf-8 from __future__ import unicode_literals from .. import Provider as AddressProvider class Provider(AddressProvider): city_formats = ('{{city_name}}', ) street_name_formats = ('{{street_name}}', ) street_address_formats = ('{{street_name}} {{building_number}}', ) address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) building_number_formats = ('###', '##', '#', '#a', '#b', '#c') postcode_formats = ('####', ) cities = ( "Ajdovščina", "Bled", "Bovec", "Brežice", "Celje", "Cerknica", "Črnomelj", "Domžale", "Dravograd", "Gornja Radgona", "Gornji Grad", "Grosuplje", "Hrastnik", "Idrija", "Ilirska Bistrica", "Izola", "Jesenice", "Kamnik", "Kobarid", "Kočevje", "Koper", "Kostanjevica na Krki", "Kranj", "Krško", "Laško", "Lenart v Slovenskih goricah", "Lendava", "Litija", "Ljubljana", "Ljutomer", "Logatec", "Maribor", "Medvode", "Mengeš", "Metlika", "Mežica", "Murska Sobota", "Nova Gorica", "Novo mesto", "Ormož", "Piran", "Postojna", "Prevalje", "Ptuj", "Radeče", "Radovljica", "Ravne na Koroškem", "Ribnica", "Rogaška Slatina", "Ruše", "Sevnica", "Sežana", "Slovenj Gradec", "Slovenska Bistrica", "Slovenske Konjice", "Šempeter pri Gorici", "Šentjur", "Škofja Loka", "Šoštanj", "Tolmin", "Trbovlje", "Trebnje", "Tržič", "Turnišče", "Velenje", "Vipava", "Vipavski Križ", "Višnja Gora", "Vrhnika", "Zagorje ob Savi", "Žalec", "Železniki", "Žiri", ) streets = ( "Abramova ulica", "Adamičeva ulica", "Adamič-Lundrovo nabrežje", "Ajdovščina", "Aleševa ulica", "Alešovčeva ulica", "Aljaževa ulica", "Ambrožev trg", "Ameriška ulica", "Andrićeva ulica", "Anžurjeva ulica", "Apihova ulica", "Argentinska ulica", "Arharjeva cesta", "Arkova ulica", "Artačeva ulica", "Aškerčeva cesta", "Avčinova ulica", "Avsečeva ulica", "Avstrijska ulica", "Avšičeva cesta", "Ažmanova ulica", "Babičeva ulica", "Badjurova ulica", "Balinarska pot", "Baragova ulica", "Barjanska cesta", "Bavdkova ulica", "Baznikova ulica", "Bazoviška ulica", "Beethovnova ulica", "Belačeva ulica", "Beljaška ulica", "Berčičeva ulica", "Berčonova pot", "Berdajsova ulica", "Bernekerjeva ulica", "Bernikova ulica", "Betettova cesta", "Bezenškova ulica", "Bežigrad", "Bičevje", "Bilečanska ulica", "Bitenčeva ulica", "Bizjakova ulica", "Bizjanova ulica", "Bizovški štradon", "Blasnikova ulica", "Blasov breg", "Bleiweisova cesta", "Bobenčkova ulica", "Bobrova ulica", "Bognarjeva pot", "Bohinjčeva ulica", "Bohoričeva ulica", "Boletova ulica", "Bolgarska ulica", "Borovniška ulica", "Borštnikov trg", "Borutova ulica", "Božičeva ulica", "Brankova ulica", "Bratinova ulica", "Bratislavska cesta", "Bratov Jakopičev ulica", "Bratov Kunovarjev ulica", "Bravničarjeva ulica", "Brdnikova ulica", "Breg", "Bregarjeva ulica", "Breznikova ulica", "Brglezov štradon", "Brilejeva ulica", "Brodarjev trg", "Brodska cesta", "Burnikova ulica", "Cankarjev vrh", "Cankarjevo nabrežje", "Carja Dušana ulica", "Celarčeva ulica", "Celjska ulica", "Celovška cesta", "Cerkniška ulica", "Cerutova ulica", "Cesta Andreja Bitenca", "Cesta Ceneta Štuparja", "Cesta Dolomitskega odreda", "Cesta II. grupe odredov", "Cesta Ljubljanske brigade", "Cesta na Bellevue", "Cesta na Bokalce", "Cesta na Brinovec", "Cesta na Brod", "Cesta na Ježah", "Cesta na Kope", "Cesta na Laze", "Cesta na Loko", "Cesta na Mesarico", "Cesta na Ozare", "Cesta na Poljane", "Cesta na Prevoje", "Cesta na Urh", "Cesta na Vrhovce", "Cesta slov. kmečkih uporov", "Cesta Urške Zatlerjeve", "Cesta v Dvor", "Cesta v Gameljne", "Cesta v Hrastje", "Cesta v hrib", "Cesta v Kleče", "Cesta v Kostanj", "Cesta v Legarico", "Cesta v Mestni log", "Cesta v Pečale", "Cesta v Prod", "Cesta v Rožno dolino", "Cesta v Šmartno", "Cesta v Zeleni log", "Cesta v Zgornji log", "Cesta vstaje", "Cesta 24. junija", "Cesta 25 talcev", "Cesta 27. aprila", "Chengdujska cesta", "Chopinov prehod", "Cigaletova ulica", "Cilenškova ulica", "Cimermanova ulica", "Cimpermanova ulica", "Cizejeva ulica", "Clevelandska ulica", "Colnarjeva ulica", "Cvetlična pot", "Čampova ulica", "Čanžekova ulica", "Čargova ulica", "Čebelarska ulica", "Čehova ulica", "Čepelnikova ulica", "Čepovanska ulica", "Čerinova ulica", "Černigojeva ulica", "Černivčeva ulica", "Červanova ulica", "Čevljarska ulica", "Čižmanova ulica", "Čopova ulica", "Črna pot", "Črnuška cesta", "Črtomirova ulica", "Čučkova ulica", "Dajnkova ulica", "Dalmatinova ulica", "Danile Kumarjeve ulica", "Dečkova ulica", "Dečmanova ulica", "Delakova ulica", "Demšarjeva cesta", "Derčeva ulica", "Dergančeva ulica", "Dermotova ulica", "Detelova ulica", "Devinska ulica", "Devova ulica", "Divjakova ulica", "Do proge", "Dobrajčeva ulica", "Dobrdobska ulica", "Dolenjska cesta", "Dolgi breg", "Dolgi most", "Dolharjeva ulica", "Dolinarjeva ulica", "Dolinškova ulica", "Dolničarjeva ulica", "Dolomitska ulica", "Drabosnjakova ulica", "Draga", "Draveljska ulica", "Dražgoška ulica", "Drenikov vrh", "Drenikova ulica", "Dunajska cesta", "Dvojna ulica", "Dvorakova ulica", "Dvorni trg", "Eipprova ulica", "Ellerjeva ulica", "Emonska cesta", "Erbežnikova ulica", "Erjavčeva cesta", "Fabianijeva ulica", "Fani Grumove ulica", "Ferberjeva ulica", "Filipičeva ulica", "Flajšmanova ulica", "Flandrova ulica", "Forsterjeva ulica", "Franketova ulica", "Frankopanska ulica", "Frenkova pot", "Friškovec", "Funtkova ulica", "Fužinska cesta", "Gabrov trg", "Gača", "Galičeva ulica", "Galjevica", "Gallusovo nabrežje", "Gasilska cesta", "Gasparijeva ulica", "Gašperšičeva ulica", "Gerbičeva ulica", "Gestrinova ulica", "Glavarjeva ulica", "Gledališka stolba", "Glinška ulica", "Glinškova ploščad", "Glonarjeva ulica", "Gmajnice", "Gobarska pot", "Godeževa ulica", "Gola Loka", "Golarjeva ulica", "Goljarjeva pot", "Golouhova ulica", "Goriška ulica", "Gorjančeva ulica", "Gorjupova ulica", "Gornji Rudnik I", "Gornji Rudnik II", "Gornji Rudnik III", "Gornji trg", "Goropečnikova ulica", "Gortanova ulica", "Gospodinjska ulica", "Gosposka ulica", "Gosposvetska cesta", "Govekarjeva ulica", "Gozdna pot", "Grablovičeva ulica", "Gradišče", "Gradnikova ulica", "Grafenauerjeva ulica", "Grajski drevored", "Grajzerjeva ulica", "Gramozna pot", "Grassellijeva ulica", "Gregorčičeva ulica", "Gregorinova ulica", "Grintovška ulica", "Grobeljca", "Grobeljska pot", "Groharjeva cesta", "Groznikova ulica", "Grška ulica", "Grško", "Gruberjevo nabrežje", "Grudnovo nabrežje", "Gubčeva ulica", "Gunceljska cesta", "Gustinčarjeva ulica", "Gustinčičeva ulica", "Hacetova ulica", "Hafnerjeva ulica", "Hajdrihova ulica", "Hauptmanca", "Hladilniška pot", "Hladnikova cesta", "Hlebčeva ulica", "Hotimirova ulica", "Hradeckega cesta", "Hranilniška ulica", "Hribarjevo nabrežje", "Hribernikova ulica", "Hribovska pot", "Hrvaška ulica", "Hrvatski trg", "Hubadova ulica", "Hudourniška pot", "Idrijska ulica", "Igriška ulica", "Ilešičeva ulica", "Ilovški štradon", "Industrijska cesta", "Ingličeva ulica", "Italijanska ulica", "Izletniška ulica", "Ižanska cesta", "Jakčeva ulica", "Jakhljeva ulica", "Jakopičev drevored", "Jakopičevo sprehajališče", "Jakšičeva ulica", "Jalnova ulica", "Jamova cesta", "Janežičeva cesta", "Janova ulica", "Janševa ulica", "Jarčeva ulica", "Jarnikova ulica", "Jarše", "Jarška cesta", "Javorškova ulica", "Jazbečeva pot", "Jelinčičeva ulica", "Jenkova ulica", "Jensenova ulica", "Jerajeva ulica", "Jeranova ulica", "Jesenkova ulica", "Jesihov štradon", "Jezerska ulica", "Ježa", "Ježica", "Joškov štradon", "Jurčičev trg", "Jurčkova cesta", "Juričeva ulica", "Juvanova ulica", "K reaktorju", "Kadilnikova ulica", "Kajuhova ulica", "Kalingerjeva ulica", "Kalinova ulica", "Kaminova ulica", "Kamniška ulica", "Kamnogoriška cesta", "Kančeva ulica", "Kanonijeva cesta", "Kantetova ulica", "Kapusova ulica", "Kardeljeva ploščad", "Karingerjeva ulica", "Karunova ulica", "Kastelčeva ulica", "Kašeljska cesta", "Kavadarska cesta", "Kavčičeva ulica", "Kavškova ulica", "Kekčeva ulica", "Kermaunerjeva ulica", "Kernova cesta", "Kerševanova ulica", "Keržičeva ulica", "Kettejeva ulica", "Kladezna ulica", "Klančarjeva ulica", "Kleče", "Klemenova ulica", "Kleparska steza", "Ključavničarska ulica", "Klunova ulica", "Kmečka pot", "Knafljev prehod", "Knezov štradon", "Knezova ulica", "Knobleharjeva ulica", "Koblarjeva ulica", "Kocbekova ulica", "Kocenova ulica", "Kocjanova ulica", "Kočenska ulica", "Kodrova ulica", "Kogojeva ulica", "Kogovškova ulica", "Kokaljeva ulica", "Kolarjeva ulica", "Kolesarska pot", "Koleševa ulica", "Kolinska ulica", "Kolmanova ulica", "Kolodvorska ulica", "Komanova ulica", "Komenskega ulica", "Kongresni trg", "Kopališka ulica", "Kopitarjeva ulica", "Kopna pot", "Koprska ulica", "Koreninova ulica", "Koroška ulica", "Korotanska ulica", "Kosančeva ulica", "Koseskega ulica", "Koseška cesta", "Kosmačeva ulica", "Kosova ulica", "Kosovelova ulica", "Koširjeva ulica", "Kotnikova ulica", "Kovačeva ulica", "Kovaška ulica", "Kovinarska ulica", "Kozakova ulica", "Kozinova ulica", "Kozlarjeva pot", "Koželjeva ulica", "Krakovski nasip", "Kraljeva ulica", "Kranerjeva ulica", "Kraška ulica", "Kratka pot", "Kratka steza", "Kregarjeva ulica", "Kreljeva ulica", "Kremžarjeva ulica", "Krimska ulica", "Krištofova ulica", "Kriva pot", "Krivec", "Križevniška soteska", "Križna ulica", "Krmčeva ulica", "Krmeljeva ulica", "Kropova ulica", "Krošljeva ulica", "Krovska ulica", "Krožna pot", "Kržičeva ulica", "Kudrova ulica", "Kuhljeva cesta", "Kumerdejeva ulica", "Kumerjeve ulica", "Kumrovška ulica", "Kurilniška ulica", "Kurirska ulica", "Kusoldova ulica", "Kuštrinova ulica", "Kuzeletova ulica", "Kuzmičeva ulica", "Lahova pot", "Lajovčeva ulica", "Laknerjeva ulica", "Lakotence", "Lampetova ulica", "Lamutova ulica", "Langusova ulica", "Latinski trg", "Lavrinova ulica", "Layerjeva ulica", "Lazarjeva ulica", "Legatova ulica", "Lemeževa ulica", "Lepi pot", "Lepodvorska ulica", "Leskovičeva ulica", "Letališka cesta", "Levarjeva ulica", "Levičnikova ulica", "Levstikov trg", "Levstikova ulica", "Linhartov podhod", "Linhartova cesta", "Lipahova ulica", "Litijska cesta", "Litostrojska cesta", "Livada", "Livarska ulica", "Ločnikarjeva ulica", "Lončarska steza", "Lorenzova cesta", "Lovrenčičeva ulica", "Lovska ulica", "Lovšetova ulica", "Lubejeva ulica", "Luize Pesjakove ulica", "Lunačkova ulica", "Mačja steza", "Mačkov kot", "Mačkova ulica", "Madžarska ulica", "Magistrova ulica", "Maistrova ulica", "Majaronova ulica", "Majde Vrhovnikove ulica", "Majorja Lavriča ulica", "Makucova ulica", "Mala ulica", "Mala vas", "Malejeva ulica", "Malenškova ulica", "Malgajeva ulica", "Mali štradon", "Mali trg", "Malnarjeva ulica", "Marčenkova ulica", "Marentičeva ulica", "Mareška pot", "Marice Kovačeve ulica", "Marincljeva ulica", "Marinovševa cesta", "Maroltova ulica", "Martina Krpana ulica", "Martinčeva ulica", "Martinova ulica", "Marušičeva ulica", "Masarykova cesta", "Matjanova pot", "Matjaževa ulica", "Maurerjeva ulica", "Mazovčeva pot", "Med hmeljniki", "Medarska ulica", "Medenska cesta", "Medveščkova ulica", "Mekinčeva ulica", "Melikova ulica", "Mencingerjeva ulica", "Merčnikova ulica", "Merosodna ulica", "Mesesnelova ulica", "Mestni trg", "Meškova ulica", "Metelkova ulica", "Miheličeva cesta", "Mihov štradon", "Miklavčeva ulica", "Miklošičeva cesta", "Mikuževa ulica", "Milčetova pot", "Mire Lenardičeve ulica", "Mirje", "Mirna pot", "Mislejeva ulica", "Mizarska pot", "Mladinska ulica", "Mlake", "Mlinska pot", "Močnikova ulica", "Mokrška ulica", "Molekova ulica", "Moškričeva ulica", "Mrharjeva ulica", "Mrzelova ulica", "Murkova ulica", "Murnikova ulica", "Murnova ulica", "Muzejska ulica", "Na cvetači", "Na delih", "Na dolih", "Na gaju", "Na gmajni", "Na Herši", "Na jami", "Na klančku", "Na Korošci", "Na Palcah", "Na požaru", "Na produ", "Na Rojah", "Na Stolbi", "Na Straški vrh", "Na Trati", "Na Žalah", "Nade Ovčakove ulica", "Nadgoriška cesta", "Nahlikova ulica", "Nahtigalova ulica", "Nanoška ulica", "Nazorjeva ulica", "Nebotičnikov prehod", "Nedohova ulica", "Njegoševa cesta", "Nova ulica", "Novakova pot", "Novakova ulica", "Novi trg", "Novinarska ulica", "Novo naselje", "Novo Polje, cesta I", "Novo Polje, cesta III", "Novo Polje, cesta IV", "Novo Polje, cesta V", "Novo Polje, cesta VI", "Novo Polje, cesta VII", "Novo Polje, cesta X", "Novo Polje, cesta XI", "Novo Polje, cesta XII", "Novo Polje, cesta XIV", "Novo Polje, cesta XIX", "Novo Polje, cesta XVI", "Novo Polje, cesta XVII", "Novo Polje, cesta XXI", "Novo Polje, cesta XXIII", "Novosadska ulica", "Ob daljnovodu", "Ob dolenjski železnici", "Ob Farjevcu", "Ob Ljubljanici", "Ob Mejašu", "Ob potoku", "Ob pristanu", "Ob Savi", "Ob studencu", "Ob zdravstvenem domu", "Ob zeleni jami", "Ob zelenici", "Ob žici", "Obirska ulica", "Obrežna steza", "Obrije", "Ocvirkova ulica", "Ogrinčeva ulica", "Okiškega ulica", "Omahnova ulica", "Omejčeva ulica", "Omersova ulica", "Oražnova ulica", "Orlova ulica", "Osenjakova ulica", "Osojna pot", "Osojna steza", "Osterčeva ulica", "Ovčakova ulica", "Pahorjeva ulica", "Palmejeva ulica", "Papirniška pot", "Park Ajdovščina", "Park Arturo Toscanini", "Parmova ulica", "Parmska cesta", "Partizanska ulica", "Pavlovčeva ulica", "Pavšičeva ulica", "Pečarjeva ulica", "Pečnik", "Pečnikova ulica", "Pegamova ulica", "Perčeva ulica", "Periška cesta", "Perkova ulica", "Peršinova cesta", "Pesarska cesta", "Pestotnikova ulica", "Peščena pot", "Petkova ulica", "Petkovškovo nabrežje", "Petrčeva ulica", "Pilonova ulica", "Pionirska pot", "Pipanova pot", "Pirnatova ulica", "Planinska cesta", "Planinškova ulica", "Plečnikov podhod", "Plemljeva ulica", "Plešičeva ulica", "Pleteršnikova ulica", "Pločanska ulica", "Pod akacijami", "Pod bregom", "Pod bresti", "Pod bukvami", "Pod Debnim vrhom", "Pod gabri", "Pod gozdom", "Pod hrasti", "Pod hribom", "Pod hruško", "Pod jelšami", "Pod jezom", "Pod ježami", "Pod Kamno gorico", "Pod klancem", "Pod lipami", "Pod topoli", "Pod Trančo", "Pod turnom", "Pod vrbami", "Podgornikova ulica", "Podgorska cesta", "Podgrajska cesta", "Podjunska ulica", "Podlimbarskega ulica", "Podmilščakova ulica", "Podrožniška pot", "Podsmreška cesta", "Podutiška cesta", "Pogačarjev trg", "Pohlinova ulica", "Poklukarjeva ulica", "Polakova ulica", "Polanškova ulica", "Poljanska cesta", "Polje", "Polje, cesta I", "Polje, cesta II", "Polje, cesta III", "Polje, cesta VI", "Polje, cesta VIII", "Polje, cesta X", "Polje, cesta XIV", "Polje, cesta XL", "Polje, cesta XLII", "Polje, cesta XLVI", "Polje, cesta XVI", "Polje, cesta XVIII", "Polje, cesta XXII", "Polje, cesta XXIV", "Polje, cesta XXVI", "Polje, cesta XXX", "Polje, cesta XXXII", "Polje, cesta XXXIV", "Polje, cesta XXXVIII", "Poljedelska ulica", "Poljska pot", "Porentova ulica", "Posavskega ulica", "Postojnska ulica", "Pot do šole", "Pot Draga Jakopiča", "Pot heroja Trtnika", "Pot k igrišču", "Pot k ribniku", "Pot k Savi", "Pot k sejmišču", "Pot k studencu", "Pot na Breje", "Pot na Drenikov vrh", "Pot na Golovec", "Pot na goro", "Pot na Gradišče", "Pot na Grič", "Pot na Labar", "Pot na mah", "Pot na most", "Pot na Orle", "Pot na Visoko", "Pot na Zduše", "Pot Rdečega križa", "Pot v boršt", "Pot v Čeželj", "Pot v dolino", "Pot v Goričico", "Pot v hribec", "Pot v mejah", "Pot v Mlake", "Pot v Podgorje", "Pot v Zeleni gaj", "Pot za Brdom", "Pot za razori", "Potokarjeva ulica", "Potrčeva ulica", "Povšetova ulica", "Prašnikarjeva ulica", "Praznikova ulica", "Pražakova ulica", "Pred Savljami", "Predjamska cesta", "Predor pod Gradom", "Preglov trg", "Prekmurska ulica", "Prelčeva ulica", "Preloge", "Premrlova ulica", "Preradovićeva ulica", "Preserska ulica", "Prešernov trg", "Prešernova cesta", "Pretnarjeva ulica", "Pri borštu", "Pri brvi", "Pri malem kamnu", "Pri mostiščarjih", "Pribinova ulica", "Prijateljeva ulica", "Primorska ulica", "Prinčičeva ulica", "Prisojna ulica", "Prištinska ulica", "Privoz", "Proletarska cesta", "Prule", "Prušnikova ulica", "Prvomajska ulica", "Pšatnik", "Pšatska pot", "Ptujska ulica", "Pučnikova ulica", "Puharjeva ulica", "Puhova ulica", "Puhtejeva ulica", "Puterlejeva ulica", "Putrihova ulica", "Raičeva ulica", "Rakovniška ulica", "Rakuševa ulica", "Ramovševa ulica", "Ravbarjeva ulica", "Ravna pot", "Ravnikova ulica", "Razgledna steza", "Reber", "Reboljeva ulica", "Rečna ulica", "Regentova cesta", "Resljeva cesta", "Reška ulica", "Ribičičeva ulica", "Ribji trg", "Ribniška ulica", "Rimska cesta", "Rjava cesta", "Robbova ulica", "Robičeva ulica", "Rodičeva ulica", "Rojčeva ulica", "Romavhova ulica", "Rosna pot", "Rotarjeva ulica", "Rovšnikova ulica", "Rozmanova ulica", "Rožanska ulica", "Rožičeva ulica", "Rožna dolina, cesta I", "Rožna dolina, cesta III", "Rožna dolina, cesta IV", "Rožna dolina, cesta V", "Rožna dolina, cesta VI", "Rožna dolina, cesta VIII", "Rožna dolina, cesta X", "Rožna dolina, cesta XII", "Rožna dolina, cesta XIII", "Rožna dolina, cesta XV", "Rožna dolina, cesta XVII", "Rožna ulica", "Rudnik I", "Rudnik II", "Rudnik III", "Runkova ulica", "Ruska ulica", "Rutarjeva ulica", "Sadinja vas", "Sajovčeva ulica", "Samova ulica", "Saškova ulica", "Sattnerjeva ulica", "Savinova ulica", "Savinškova ulica", "Savlje", "Savska cesta", "Sedejeva ulica", "Selanov trg", "Selanova ulica", "Setnikarjeva ulica", "Seunigova ulica", "Simončičeva ulica", "Siva pot", "Skapinova ulica", "Sketova ulica", "Skopčeva ulica", "Skrbinškova ulica", "Slape", "Slapnikova ulica", "Slavčja ulica", "Slomškova ulica", "Slovenčeva ulica", "Slovenska cesta", "Smoletova ulica", "Smrekarjeva ulica", "Smrtnikova ulica", "Snebersko nabrežje", "Snežniška ulica", "Snojeva ulica", "Sojerjeva ulica", "Sončna pot", "Sostrska cesta", "Soška ulica", "Soteška pot", "Soussenska ulica", "Sovretova ulica", "Spodnji Rudnik I", "Spodnji Rudnik II", "Spodnji Rudnik III", "Spodnji Rudnik V", "Spomeniška pot", "Srebrničeva ulica", "Srednja pot", "Stadionska ulica", "Staničeva ulica", "Stara Ježica", "Stara slovenska ulica", "Stare Črnuče", "Stari trg", "Stegne", "Steletova ulica", "Sternadova ulica", "Stiška ulica", "Stolpniška ulica", "Stoženska ulica", "Stožice", "Stražarjeva ulica", "Streliška ulica", "Stritarjeva ulica", "Strmeckijeva ulica", "Strmi pot", "Strniševa cesta", "Strossmayerjeva ulica", "Strugarska ulica", "Strupijevo nabrežje", "Suhadolčanova ulica", "Sulčja ulica", "Svetčeva ulica", "Šarhova ulica", "Šentjakob", "Šentviška ulica", "Šerkova ulica", "Šestova ulica", "Šibeniška ulica", "Šinkov štradon", "Šišenska cesta", "Šivičeva ulica", "Škerljeva ulica", "Škofova ulica", "Škrabčeva ulica", "Šlandrova ulica", "Šlosarjeva ulica", "Šmarna gora", "Šmartinska cesta", "Šmartno", "Španova pot", "Španska ulica", "Štajerska cesta", "Štebijeva cesta", "Štefančeva ulica", "Štembalova ulica", "Štepanjska cesta", "Štepanjsko nabrežje", "Štirnova ulica", "Štradon čez Prošco", "Štrekljeva ulica", "Študentovska ulica", "Štukljeva cesta", "Štula", "Šturmova ulica", "Šubičeva ulica", "Šumarjeva ulica", "Švabićeva ulica", "Švarova ulica", "Švegljeva cesta", "Tabor", "Tacenska cesta", "Tavčarjeva ulica", "Tbilisijska ulica", "Tesarska ulica", "Teslova ulica", "Tesna ulica", "Tesovnikova ulica", "Tiha ulica", "Tiranova ulica", "Tischlerjeva ulica", "Tivolska cesta", "Tkalska ulica", "Tobačna ulica", "Tolminska ulica", "Tomačevo", "Tomačevska cesta", "Tomažičeva ulica", "Tometova ulica", "Tominškova ulica", "Tomišeljska ulica", "Toplarniška ulica", "Topniška ulica", "Torkarjeva ulica", "Tratnikova ulica", "Travniška ulica", "Trbeže", "Trdinova ulica", "Trebušakova ulica", "Trg francoske revolucije", "Trg mladih", "Trg mladinskih delov. brigad", "Trg narodnih herojev", "Trg prekomorskih brigad", "Trg republike", "Trg 9. maja", "Trinkova ulica", "Trnovčeva ulica", "Trnovska ulica", "Trpinčeva ulica", "Trstenjakova ulica", "Trtnikova ulica", "Tržaška cesta", "Tržna ulica", "Tugomerjeva ulica", "Turnerjeva ulica", "Turnsko nabrežje", "Udvančeva ulica", "Ulica aktivistov", "Ulica Alme Sodnik", "Ulica Andreja Kumarja", "Ulica Angelce Ocepkove", "Ulica Angele Ljubičeve", "Ulica borca Petra", "Ulica borcev za severno mejo", "Ulica bratov Bezlajev", "Ulica bratov Blanč", "Ulica bratov Jančar", "Ulica bratov Komel", "Ulica bratov Kraljič", "Ulica bratov Martinec", "Ulica bratov Novak", "Ulica bratov Rozmanov", "Ulica bratov Škofov", "Ulica bratov Učakar", "Ulica bratov Židan", "Ulica Dušana Kraigherja", "Ulica Ernesta Kramerja", "Ulica Franca Nebca", "Ulica Francke Jerasove", "Ulica Franja Novaka", "Ulica gledališča BTC", "Ulica Goce Delčeva", "Ulica Gubčeve brigade", "Ulica Hermana Potočnika", "Ulica Ivana Roba", "Ulica Ivanke Kožuh", "Ulica Ivice Pirjevčeve", "Ulica Janeza Pavla II.", "Ulica Janeza Rožiča", "Ulica Jožeta Jame", "Ulica Jožeta Japlja", "Ulica Jožeta Mirtiča", "Ulica Konrada Babnika", "Ulica Koroškega bataljona", "Ulica Lizike Jančarjeve", "Ulica Lojzeta Spacala", "Ulica Lovre Klemenčiča", "Ulica Malči Beličeve", "Ulica Marije Drakslerjeve", "Ulica Marije Hvaličeve", "Ulica Marje Boršnikove", "Ulica Marka Šlajmerja", "Ulica Milana Majcna", "Ulica Milke Kerinove", "Ulica Minke Bobnar", "Ulica Mirka Jurce", "Ulica Mirka Tomšiča", "Ulica Miroslava Turka", "Ulica Molniške čete", "Ulica na Grad", "Ulica Nade Čamernikove", "Ulica Olge Mohorjeve", "Ulica padlih borcev", "Ulica Pariške komune", "Ulica Pohorskega bataljona", "Ulica Polonce Čude", "Ulica prvoborcev", "Ulica Rezke Dragarjeve", "Ulica Rezke Klopčič", "Ulica Rudolfa Janežiča", "Ulica Staneta Severja", "Ulica Štefke Zbašnikove", "Ulica talcev", "Ulica Tončke Čečeve", "Ulica v Kokovšek", "Ulica Vide Pregarčeve", "Ulica Vladimirja Trampuža", "Ulica Zore Ragancinove", "Ulica Žanke Erjavec", "Ulica 15. aprila", "Ulica 15. maja", "Ulica 24. avgusta", "Ulica 4. julija", "Ulica 7. septembra", "Ulica 9. junija", "Uršičev štradon", "Usnjarska ulica", "V Češnjico", "V dolini", "V Karlovce", "V Karlovce", "V Kladeh", "V Murglah", "V Sige", "V Varde", "V Zalar", "Vagajeva ulica", "Valjavčeva ulica", "Valvasorjeva ulica", "Vandotova ulica", "Vaška pot", "Večna pot", "Vegova ulica", "Velebitska ulica", "Veliki štradon", "Velikovška ulica", "Velnarjeva ulica", "Verovškova ulica", "Veršičeva ulica", "Veselova ulica", "Videmska ulica", "Vidergarjeva ulica", "Vidičeva ulica", "Vidovdanska cesta", "Vilharjev podhod", "Vilharjeva cesta", "Vinterca", "Vipavska ulica", "Vipotnikova ulica", "Viška cesta", "Vižmarska pot", "Vodmatska ulica", "Vodmatski trg", "Vodna steza", "Vodnikova cesta", "Vodnikovo naselje", "Vodovodna cesta", "Vogelna ulica", "Vojkova cesta", "Volaričeva ulica", "Vošnjakova ulica", "Vozna pot na Grad", "Vožarski pot", "Vrazov trg", "Vrbovec", "Vrbska ulica", "Vregova ulica", "Vrhovci, cesta I", "Vrhovci, cesta II", "Vrhovci, cesta III", "Vrhovci, cesta IX", "Vrhovci, cesta V", "Vrhovci, cesta VI", "Vrhovci, cesta X", "Vrhovci, cesta XI", "Vrhovci, cesta XII", "Vrhovci, cesta XIV", "Vrhovci, cesta XIX", "Vrhovci, cesta XV", "Vrhovci, cesta XVII", "Vrhovci, cesta XVIII", "Vrhovci, cesta XX", "Vrhovci, cesta XXII", "Vrhovci, cesta XXVI", "Vrhovci, cesta XXVIII", "Vrhovci, cesta XXXII", "Vrhovčeva ulica", "Vrhovnikova ulica", "Vrtača", "Vrtna ulica", "Vrtnarska cesta", "Vulčeva ulica", "Vzajemna ulica", "Windischerjeva ulica", "Wolfova ulica", "Za Garažami", "Za gasilskim domom", "Za Gradom", "Za krajem", "Za opekarno", "Za partizanskim domom", "Za progo", "Za vasjo", "Zadnikarjeva ulica", "Zadobrovška cesta", "Zadružna ulica", "Zajčeva pot", "Zajčevi dvori", "Zakotnikova ulica", "Zalaznikova ulica", "Zaletelova ulica", "Zaloška cesta", "Zarnikova ulica", "Zasavska cesta", "Zatišje", "Zavetiška ulica", "Završje", "Zbašnikova ulica", "Zdešarjeva cesta", "Zelena pot", "Zelenova ulica", "Zeljarska ulica", "Zevnikova ulica", "Zidarjev štradon", "Ziherlova ulica", "Zlatek", "Znamenjska ulica", "Zofke Kvedrove ulica", "Zoisova cesta", "Zupanova ulica", "Zvezda", "Zvezdarska ulica", "Zvezna ulica", "Žabarjeva ulica", "Žabjak", "Žalska ulica", "Žaucerjeva ulica", "Žeje", "Železna cesta", "Železnikarjeva ulica", "Žerjalova ulica", "Židankova ulica", "Židovska steza", "Židovska ulica", "Živaličeva ulica", "Živinozdravska ulica", "Žolgerjeva ulica", ) states = ( 'Pomurksa', 'Podravska', 'Koroška', 'Savinjska', 'Zasavska', 'Spodnjeposavska', 'Jugovzhodna Slovenija', 'Osrednjeslovenska', 'Gorenjska', 'Notranjsko - kraška', 'Goriška', 'Obalno - kraška', ) countries = ( "Afganistan", "Islamska republika Afganistan", "Albanija", "Alžirija", "Ljudska demokratična republika Alžirija", "Andora", "Angola", "Republika Angola", "Antigva in Barbuda", "Argentina", "Armenija", "Republika Armenija", "Avstralija", "Avstrija", "Azerbajdžan", "Azerbajdžanska republika", "Bahami", "Zveza Bahami", "Država Bahrajn", "Bangladeš", "Ljudska republika Bangladeš", "Belgija", "Kraljevina Belgija", "Belize", "Belorusija", "Benin", "Republika Benin", "Bocvana", "Republika Bocvana", "Republika Bolgarija", "Bolivija", "Republika Bolivija", "Brazilija", "Federativna republika Brazilija", "Brunej", "Burkina Faso", "Burundi", "Republika Burundi", "Butan", "Ciper", "Republika Ciper", "Čad", "Republika Čad", "Češka", "Čile", "Republika Čile", "Črna gora", "Republika Črna gora", "Kraljevina Danska", "Dominika", "Zveza Dominika", "Džibuti", "Republika Džibuti", "Egipt", "Arabska republika Egipt", "Republika Ekvador", "Ekvatorialna Gvineja", "Eritreja", "Estonija", "Republika Estonija", "Etiopija", "Fidži", "Filipini", "Republika Filipini", "Finska", "Republika Finska", "Francoska republika", "Gabon", "Gabonska republika", "Gambija", "Gana", "Republika Gana", "Grčija", "Helenska republika", "Grenada", "Gvajana", "Republika Gvajana", "Gvatemala", "Republika Gvatemala", "Republika Gvineja", "Gvineja Bissau", "Republika Gvineja Bissau", "Republika Haiti", "Honduras", "Republika Honduras", "Hrvaška", "Indija", "Republika Indija", "Indonezija", "Republika Indonezija", "Republika Irak", "Iran", "Islamska republika Iran", "Irska", "Republika Islandija", "Italija", "Italijanska republika", "Izrael", "Jamajka", "Japonska", "Jemen", "Republika Jemen", "Jordanija", "Južna Afrika", "Republika Južna Afrika", "Južna Koreja", "Kambodža", "Kraljevina Kambodža", "Kamerun", "Republika Kamerun", "Katar", "Država Katar", "Kazahstan", "Republika Kazahstan", "Kenija", "Kirgizistan", "Kirgiška republika", "Kiribati", "Kitajska", "Kolumbija", "Republika Kolumbija", "Komori", "Kongo", "Republika Kongo", "Demokratična republika Kongo", "Republika Kostarika", "Kuba", "Republika Kuba", "Kuvajt", "Laos", "Laoška ljudska demokratična republika", "Latvija", "Lesoto", "Kraljevina Lesoto", "Libanon", "Libanonska republika", "Republika Liberija", "Libija", "Libijska arabska džamahirija", "Lihtenštajn", "Kneževina Lihtenštajn", "Litva", "Republika Litva", "Veliko vojvodstvo Luksemburg", "Madagaskar", "Republika Madagaskar", "Republika Madžarska", "Makedonija", "Republika Makedonija", "Malavi", "Maldivi", "Republika Maldivi", "Malezija", "Mali", "Republika Mali", "Republika Malta", "Maroko", "Kraljevina Maroko", "Marshallovi otoki", "Mauritius", "Republika Mauritius", "Mavretanija", "Mehika", "Združene mehiške države", "Mikronezija", "Mjanmar", "Zveza Mjanmar", "Moldavija", "Moldavija, Republika", "Kneževina Monako", "Mongolija", "Mozambik", "Republika Mozambik", "Republika Namibija", "Nauru", "Republika Nauru", "Nemčija", "Nepal", "Kraljevina Nepal", "Niger", "Republika Niger", "Nigerija", "Nikaragva", "Republika Nikaragva", "Nizozemska", "Norveška", "Kraljevina Norveška", "Nova Zelandija", "Oman", "Pakistan", "Islamska republika Pakistan", "Palau", "Republika Palau", "Republika Panama", "Papua Nova Gvineja", "Paragvaj", "Peru", "Republika Peru", "Poljska", "Republika Poljska", "Portugalska republika", "Romunija", "Ruanda", "Republika Ruanda", "Ruska federacija", "Saint Kitts in Nevis", "Saint Lucia", "Salomonovi otoki", "Salvador", "Republika Salvador", "San Marino", "Sao Tome in Principe", "Demokratična republika Sao Tome in Principe", "Kraljevina Saudova Arabija", "Sejšeli", "Republika Sejšeli", "Republika Senegal", "Severna Koreja", "Sierra Leone", "Republika Sierra Leone", "Singapur", "Sirija", "Sirska arabska republika", "Slonokoščena obala", "Slovaška", "Slovaška republika", "Slovenija", "Republika Slovenija", "Somalska demokratična republika", "Srbija", "Republika Srbija", "Sudan", "Republika Sudan", "Surinam", "Republika Surinam", "Svazi", "Španija", "Kraljevina Španija", "Šrilanka", "Švedska", "Kraljevina Švedska", "Švica", "Tadžikistan", "Republika Tadžikistan", "Tajska", "Tajvan", "Tajvan, Provinca Kitajske", "Tanzanija", "Togo", "Togoška republika", "Tonga", "Kraljevina Tonga", "Republika Trinidad in Tobago", "Tunizija", "Republika Tunizija", "Republika Turčija", "Turkmenistan", "Tuvalu", "Uganda", "Ukrajina", "Urugvaj", "Vzhodna republika Urugvaj", "Uzbekistan", "Vanuatu", "Republika Vanuatu", "Vatikan", "Velika Britanija", "Združeno kraljestvo", "Venezuela", "Republika Venezuela", "Vietnam", "Vzhodni Timor", "Demokratična republika Vzhodni Timor", "Samoa", "Neodvisna država Zahodna Samoa", "Zambija", "Združene države Amerike", "Združene države", "Združeni arabski emirati", "Zelenortski otoki", ) @classmethod def city_name(cls): return cls.random_element(cls.cities) @classmethod def street_name(cls): return cls.random_element(cls.streets) @classmethod def state(cls): return cls.random_element(cls.states)
mit
shaitan/elliptics
example/remove_keys_from_storage_found_removed_in_eblob.py
8
1267
#!/usr/bin/python # -*- coding: utf-8 -*- import sys from libelliptics_python import * import eblob class remover: def __init__(self, remotes=[], groups=[], log='/dev/stdout', mask=8, path=''): self.log = elliptics_log_file(log, mask) self.n = elliptics_node_python(self.log) self.n.add_groups(groups) self.n.add_remotes(remotes) if len(self.n.get_routes()) == 0: raise NameError("Route table for group " + str(group) + " is empty") b = eblob.blob(path) for id in b.iterate(want_removed=True): if b.removed(): for g in groups: eid = elliptics_id(list(bytearray(id)), g, -1) self.n.remove(eid, 0) print "%s: flags: 0x%x, position: %d, data_size: %d" % \ (b.sid(count=64), b.flags, b.position, b.data_size) if __name__ == '__main__': # this script runs over index for given blob, finds all removed entries and removes them from the storage # list of tuples of remote addresses to connect and grab route table remotes = [('elisto19f.dev:1025:2')] # these groups groups = [1, 2, 3] # Path to blob to get objects from. Index file must be near with .index suffix inpath='/opt/elliptics/eblob.2/data.0' try: remover(remotes=remotes, groups=groups, path=inpath) except NameError as e: print "Completed:", e
lgpl-3.0
heyavery/lopenr
venv/lib/python2.7/site-packages/django/core/management/commands/showmigrations.py
440
4901
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.core.management.base import BaseCommand, CommandError from django.db import DEFAULT_DB_ALIAS, connections from django.db.migrations.loader import MigrationLoader class Command(BaseCommand): help = "Shows all available migrations for the current project" def add_arguments(self, parser): parser.add_argument('app_labels', nargs='*', help='App labels of applications to limit the output to.') parser.add_argument('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. Defaults to the "default" database.') formats = parser.add_mutually_exclusive_group() formats.add_argument('--list', '-l', action='store_const', dest='format', const='list', help='Shows a list of all migrations and which are applied.') formats.add_argument('--plan', '-p', action='store_const', dest='format', const='plan', help='Shows all migrations in the order they will be applied.') parser.set_defaults(format='list') def handle(self, *args, **options): self.verbosity = options.get('verbosity') # Get the database we're operating from db = options.get('database') connection = connections[db] if options['format'] == "plan": return self.show_plan(connection) else: return self.show_list(connection, options['app_labels']) def show_list(self, connection, app_names=None): """ Shows a list of all migrations on the system, or only those of some named apps. """ # Load migrations from disk/DB loader = MigrationLoader(connection, ignore_no_migrations=True) graph = loader.graph # If we were passed a list of apps, validate it if app_names: invalid_apps = [] for app_name in app_names: if app_name not in loader.migrated_apps: invalid_apps.append(app_name) if invalid_apps: raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps))) # Otherwise, show all apps in alphabetic order else: app_names = sorted(loader.migrated_apps) # For each app, print its migrations in order from oldest (roots) to # newest (leaves). for app_name in app_names: self.stdout.write(app_name, self.style.MIGRATE_LABEL) shown = set() for node in graph.leaf_nodes(app_name): for plan_node in graph.forwards_plan(node): if plan_node not in shown and plan_node[0] == app_name: # Give it a nice title if it's a squashed one title = plan_node[1] if graph.nodes[plan_node].replaces: title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces) # Mark it as applied/unapplied if plan_node in loader.applied_migrations: self.stdout.write(" [X] %s" % title) else: self.stdout.write(" [ ] %s" % title) shown.add(plan_node) # If we didn't print anything, then a small message if not shown: self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE) def show_plan(self, connection): """ Shows all known migrations in the order they will be applied """ # Load migrations from disk/DB loader = MigrationLoader(connection) graph = loader.graph targets = graph.leaf_nodes() plan = [] seen = set() # Generate the plan for target in targets: for migration in graph.forwards_plan(target): if migration not in seen: plan.append(graph.nodes[migration]) seen.add(migration) # Output def print_deps(migration): out = [] for dep in migration.dependencies: if dep[1] == "__first__": roots = graph.root_nodes(dep[0]) dep = roots[0] if roots else (dep[0], "__first__") out.append("%s.%s" % dep) if out: return " ... (%s)" % ", ".join(out) return "" for migration in plan: deps = "" if self.verbosity >= 2: deps = print_deps(migration) if (migration.app_label, migration.name) in loader.applied_migrations: self.stdout.write("[X] %s%s" % (migration, deps)) else: self.stdout.write("[ ] %s%s" % (migration, deps))
mit
etherkit/OpenBeacon2
client/macos/venv/lib/python3.8/site-packages/pip/_vendor/chardet/langhungarianmodel.py
269
12592
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin2_HungarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, 46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, 253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, 159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, 175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205, 79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231, 232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241, 82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85, 245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253, ) win1250HungarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, 46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, 253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, 161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, 177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190, 191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205, 81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, 221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231, 232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241, 84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87, 245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 94.7368% # first 1024 sequences:5.2623% # rest sequences: 0.8894% # negative sequences: 0.0009% HungarianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2, 3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0, 3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3, 0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2, 0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, 3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, 3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1, 0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0, 1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0, 1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0, 1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1, 3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1, 2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1, 2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1, 2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1, 2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0, 2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, 3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1, 2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1, 2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1, 2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1, 1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1, 1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1, 3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0, 1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1, 1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1, 2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1, 2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0, 2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1, 3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1, 2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1, 1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0, 1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0, 2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1, 2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1, 1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0, 1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1, 2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0, 1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0, 1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0, 2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1, 2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1, 2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, 1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1, 1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1, 1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0, 0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0, 2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1, 2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1, 1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1, 2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1, 1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0, 1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0, 2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0, 2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1, 2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0, 1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0, 2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0, 0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, 1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0, 0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0, 1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, 0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, 2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0, ) Latin2HungarianModel = { 'char_to_order_map': Latin2_HungarianCharToOrderMap, 'precedence_matrix': HungarianLangModel, 'typical_positive_ratio': 0.947368, 'keep_english_letter': True, 'charset_name': "ISO-8859-2", 'language': 'Hungarian', } Win1250HungarianModel = { 'char_to_order_map': win1250HungarianCharToOrderMap, 'precedence_matrix': HungarianLangModel, 'typical_positive_ratio': 0.947368, 'keep_english_letter': True, 'charset_name': "windows-1250", 'language': 'Hungarian', }
gpl-3.0
zearom32/SmartBooks
books/goods.py
1
1945
from django.shortcuts import render, render_to_response from django.http import HttpResponse, HttpResponseRedirect from django.template import RequestContext, loader from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.core import serializers from django.forms.models import model_to_dict from books.models import * from datetime import * import re import random import json from myutils import * def books_of_a_seller(request): body = json.loads(request.body) seller = body.get('seller') goods = GoodsInfo.objects.filter(seller__username = seller) if not goods: return JsonReturn(json.dumps(state(0))) ans = dict() ans['state'] = 0 book = [] for g in goods: b = g.book w = dict() w['isbn'] = b.isbn book.append(w) ans['books'] = book return JsonReturn(json.dumps(ans)) def goodsinfo(request): body = json.loads(request.body) isbn = body.get('isbn') username = body.get('username') if not isbn or not username: return JsonReturn(json.dumps(state(0))) goods = GoodsInfo.objects.filter(seller__usrname = username, book__isbn = isbn) if not goods: return JsonReturn(json.dumps(state(0))) goods = goods[0] ans = model_to_dict(goods) ans['state'] = 0 return JsonReturn(json.dumps(ans)) def sellers_of_a_book(request): body = json.loads(request.body) isbn = body.get('isbn') goods = GoodsInfo.objects.filter(book__isbn = isbn) for g in GoodsInfo.objects.all(): print g.book.isbn print goods if not goods: return JsonReturn(json.dumps(state(0))) ans = dict() ans['state'] = 0 user = [] for k in goods: u = k.seller m = dict() m['seller'] = u.username user.append(m) ans['sellers'] = user return JsonReturn(json.dumps(ans))
mit
kvar/ansible
lib/ansible/modules/remote_management/ucs/ucs_vlans.py
64
6941
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: ucs_vlans short_description: Configures VLANs on Cisco UCS Manager description: - Configures VLANs on Cisco UCS Manager. - Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe). extends_documentation_fragment: ucs options: state: description: - If C(present), will verify VLANs are present and will create if needed. - If C(absent), will verify VLANs are absent and will delete if needed. choices: [present, absent] default: present name: description: - The name assigned to the VLAN. - The VLAN name is case sensitive. - This name can be between 1 and 32 alphanumeric characters. - "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)." - You cannot change this name after the VLAN is created. required: yes multicast_policy: description: - The multicast policy associated with this VLAN. - This option is only valid if the Sharing Type field is set to None or Primary. default: '' fabric: description: - "The fabric configuration of the VLAN. This can be one of the following:" - "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases." - "A — The VLAN only applies to fabric A." - "B — The VLAN only applies to fabric B." - For upstream disjoint L2 networks, Cisco recommends that you choose common to create VLANs that apply to both fabrics. choices: [common, A, B] default: common id: description: - The unique string identifier assigned to the VLAN. - A VLAN ID can be between '1' and '3967', or between '4048' and '4093'. - You cannot create VLANs with IDs from 4030 to 4047. This range of VLAN IDs is reserved. - The VLAN IDs you specify must also be supported on the switch that you are using. - VLANs in the LAN cloud and FCoE VLANs in the SAN cloud must have different IDs. - Optional if state is absent. required: yes sharing: description: - The Sharing Type field. - "Whether this VLAN is subdivided into private or secondary VLANs. This can be one of the following:" - "none - This VLAN does not have any secondary or private VLANs. This is a regular VLAN." - "primary - This VLAN can have one or more secondary VLANs, as shown in the Secondary VLANs area. This VLAN is a primary VLAN in the private VLAN domain." - "isolated - This is a private VLAN associated with a primary VLAN. This VLAN is an Isolated VLAN." - "community - This VLAN can communicate with other ports on the same community VLAN as well as the promiscuous port. This VLAN is a Community VLAN." choices: [none, primary, isolated, community] default: none native: description: - Designates the VLAN as a native VLAN. choices: ['yes', 'no'] default: 'no' requirements: - ucsmsdk author: - David Soper (@dsoper2) - CiscoUcs (@CiscoUcs) version_added: '2.5' ''' EXAMPLES = r''' - name: Configure VLAN ucs_vlans: hostname: 172.16.143.150 username: admin password: password name: vlan2 id: '2' native: 'yes' - name: Remove VLAN ucs_vlans: hostname: 172.16.143.150 username: admin password: password name: vlan2 state: absent ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec def main(): argument_spec = ucs_argument_spec argument_spec.update( name=dict(type='str', required=True), multicast_policy=dict(type='str', default=''), fabric=dict(type='str', default='common', choices=['common', 'A', 'B']), id=dict(type='str'), sharing=dict(type='str', default='none', choices=['none', 'primary', 'isolated', 'community']), native=dict(type='str', default='no', choices=['yes', 'no']), state=dict(type='str', default='present', choices=['present', 'absent']), ) module = AnsibleModule( argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['id']], ], ) ucs = UCSModule(module) err = False # UCSModule creation above verifies ucsmsdk is present and exits on failure, so additional imports are done below. from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan changed = False try: mo_exists = False props_match = False # dn is fabric/lan/net-<name> for common vlans or fabric/lan/[A or B]/net-<name> for A or B dn_base = 'fabric/lan' if module.params['fabric'] != 'common': dn_base += '/' + module.params['fabric'] dn = dn_base + '/net-' + module.params['name'] mo = ucs.login_handle.query_dn(dn) if mo: mo_exists = True if module.params['state'] == 'absent': # mo must exist but all properties do not have to match if mo_exists: if not module.check_mode: ucs.login_handle.remove_mo(mo) ucs.login_handle.commit() changed = True else: if mo_exists: # check top-level mo props kwargs = dict(id=module.params['id']) kwargs['default_net'] = module.params['native'] kwargs['sharing'] = module.params['sharing'] kwargs['mcast_policy_name'] = module.params['multicast_policy'] if (mo.check_prop_match(**kwargs)): props_match = True if not props_match: if not module.check_mode: # create if mo does not already exist mo = FabricVlan( parent_mo_or_dn=dn_base, name=module.params['name'], id=module.params['id'], default_net=module.params['native'], sharing=module.params['sharing'], mcast_policy_name=module.params['multicast_policy'], ) ucs.login_handle.add_mo(mo, True) ucs.login_handle.commit() changed = True except Exception as e: err = True ucs.result['msg'] = "setup error: %s " % str(e) ucs.result['changed'] = changed if err: module.fail_json(**ucs.result) module.exit_json(**ucs.result) if __name__ == '__main__': main()
gpl-3.0
thfield/sf-base-election-data
venv/lib/python3.4/site-packages/setuptools/compat.py
456
2094
import sys import itertools PY3 = sys.version_info >= (3,) PY2 = not PY3 if PY2: basestring = basestring import __builtin__ as builtins import ConfigParser from StringIO import StringIO BytesIO = StringIO func_code = lambda o: o.func_code func_globals = lambda o: o.func_globals im_func = lambda o: o.im_func from htmlentitydefs import name2codepoint import httplib from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import BaseHTTPRequestHandler iteritems = lambda o: o.iteritems() long_type = long maxsize = sys.maxint unichr = unichr unicode = unicode bytes = str from urllib import url2pathname, splittag, pathname2url import urllib2 from urllib2 import urlopen, HTTPError, URLError, unquote, splituser from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit filterfalse = itertools.ifilterfalse exec("""def reraise(tp, value, tb=None): raise tp, value, tb""") if PY3: basestring = str import builtins import configparser as ConfigParser from io import StringIO, BytesIO func_code = lambda o: o.__code__ func_globals = lambda o: o.__globals__ im_func = lambda o: o.__func__ from html.entities import name2codepoint import http.client as httplib from http.server import HTTPServer, SimpleHTTPRequestHandler from http.server import BaseHTTPRequestHandler iteritems = lambda o: o.items() long_type = int maxsize = sys.maxsize unichr = chr unicode = str bytes = bytes from urllib.error import HTTPError, URLError import urllib.request as urllib2 from urllib.request import urlopen, url2pathname, pathname2url from urllib.parse import ( urlparse, urlunparse, unquote, splituser, urljoin, urlsplit, urlunsplit, splittag, ) filterfalse = itertools.filterfalse def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value
bsd-3-clause
mingwpy/numpy
tools/swig/test/testArray.py
121
12933
#! /usr/bin/env python from __future__ import division, absolute_import, print_function # System imports from distutils.util import get_platform import os import sys import unittest # Import NumPy import numpy as np major, minor = [ int(d) for d in np.__version__.split(".")[:2] ] if major == 0: BadListError = TypeError else: BadListError = ValueError import Array ###################################################################### class Array1TestCase(unittest.TestCase): def setUp(self): self.length = 5 self.array1 = Array.Array1(self.length) def testConstructor0(self): "Test Array1 default constructor" a = Array.Array1() self.failUnless(isinstance(a, Array.Array1)) self.failUnless(len(a) == 0) def testConstructor1(self): "Test Array1 length constructor" self.failUnless(isinstance(self.array1, Array.Array1)) def testConstructor2(self): "Test Array1 array constructor" na = np.arange(self.length) aa = Array.Array1(na) self.failUnless(isinstance(aa, Array.Array1)) def testConstructor3(self): "Test Array1 copy constructor" for i in range(self.array1.length()): self.array1[i] = i arrayCopy = Array.Array1(self.array1) self.failUnless(arrayCopy == self.array1) def testConstructorBad(self): "Test Array1 length constructor, negative" self.assertRaises(ValueError, Array.Array1, -4) def testLength(self): "Test Array1 length method" self.failUnless(self.array1.length() == self.length) def testLen(self): "Test Array1 __len__ method" self.failUnless(len(self.array1) == self.length) def testResize0(self): "Test Array1 resize method, length" newLen = 2 * self.length self.array1.resize(newLen) self.failUnless(len(self.array1) == newLen) def testResize1(self): "Test Array1 resize method, array" a = np.zeros((2*self.length,), dtype='l') self.array1.resize(a) self.failUnless(len(self.array1) == a.size) def testResizeBad(self): "Test Array1 resize method, negative length" self.assertRaises(ValueError, self.array1.resize, -5) def testSetGet(self): "Test Array1 __setitem__, __getitem__ methods" n = self.length for i in range(n): self.array1[i] = i*i for i in range(n): self.failUnless(self.array1[i] == i*i) def testSetBad1(self): "Test Array1 __setitem__ method, negative index" self.assertRaises(IndexError, self.array1.__setitem__, -1, 0) def testSetBad2(self): "Test Array1 __setitem__ method, out-of-range index" self.assertRaises(IndexError, self.array1.__setitem__, self.length+1, 0) def testGetBad1(self): "Test Array1 __getitem__ method, negative index" self.assertRaises(IndexError, self.array1.__getitem__, -1) def testGetBad2(self): "Test Array1 __getitem__ method, out-of-range index" self.assertRaises(IndexError, self.array1.__getitem__, self.length+1) def testAsString(self): "Test Array1 asString method" for i in range(self.array1.length()): self.array1[i] = i+1 self.failUnless(self.array1.asString() == "[ 1, 2, 3, 4, 5 ]") def testStr(self): "Test Array1 __str__ method" for i in range(self.array1.length()): self.array1[i] = i-2 self.failUnless(str(self.array1) == "[ -2, -1, 0, 1, 2 ]") def testView(self): "Test Array1 view method" for i in range(self.array1.length()): self.array1[i] = i+1 a = self.array1.view() self.failUnless(isinstance(a, np.ndarray)) self.failUnless(len(a) == self.length) self.failUnless((a == [1, 2, 3, 4, 5]).all()) ###################################################################### class Array2TestCase(unittest.TestCase): def setUp(self): self.nrows = 5 self.ncols = 4 self.array2 = Array.Array2(self.nrows, self.ncols) def testConstructor0(self): "Test Array2 default constructor" a = Array.Array2() self.failUnless(isinstance(a, Array.Array2)) self.failUnless(len(a) == 0) def testConstructor1(self): "Test Array2 nrows, ncols constructor" self.failUnless(isinstance(self.array2, Array.Array2)) def testConstructor2(self): "Test Array2 array constructor" na = np.zeros((3, 4), dtype="l") aa = Array.Array2(na) self.failUnless(isinstance(aa, Array.Array2)) def testConstructor3(self): "Test Array2 copy constructor" for i in range(self.nrows): for j in range(self.ncols): self.array2[i][j] = i * j arrayCopy = Array.Array2(self.array2) self.failUnless(arrayCopy == self.array2) def testConstructorBad1(self): "Test Array2 nrows, ncols constructor, negative nrows" self.assertRaises(ValueError, Array.Array2, -4, 4) def testConstructorBad2(self): "Test Array2 nrows, ncols constructor, negative ncols" self.assertRaises(ValueError, Array.Array2, 4, -4) def testNrows(self): "Test Array2 nrows method" self.failUnless(self.array2.nrows() == self.nrows) def testNcols(self): "Test Array2 ncols method" self.failUnless(self.array2.ncols() == self.ncols) def testLen(self): "Test Array2 __len__ method" self.failUnless(len(self.array2) == self.nrows*self.ncols) def testResize0(self): "Test Array2 resize method, size" newRows = 2 * self.nrows newCols = 2 * self.ncols self.array2.resize(newRows, newCols) self.failUnless(len(self.array2) == newRows * newCols) def testResize1(self): "Test Array2 resize method, array" a = np.zeros((2*self.nrows, 2*self.ncols), dtype='l') self.array2.resize(a) self.failUnless(len(self.array2) == a.size) def testResizeBad1(self): "Test Array2 resize method, negative nrows" self.assertRaises(ValueError, self.array2.resize, -5, 5) def testResizeBad2(self): "Test Array2 resize method, negative ncols" self.assertRaises(ValueError, self.array2.resize, 5, -5) def testSetGet1(self): "Test Array2 __setitem__, __getitem__ methods" m = self.nrows n = self.ncols array1 = [ ] a = np.arange(n, dtype="l") for i in range(m): array1.append(Array.Array1(i*a)) for i in range(m): self.array2[i] = array1[i] for i in range(m): self.failUnless(self.array2[i] == array1[i]) def testSetGet2(self): "Test Array2 chained __setitem__, __getitem__ methods" m = self.nrows n = self.ncols for i in range(m): for j in range(n): self.array2[i][j] = i*j for i in range(m): for j in range(n): self.failUnless(self.array2[i][j] == i*j) def testSetBad1(self): "Test Array2 __setitem__ method, negative index" a = Array.Array1(self.ncols) self.assertRaises(IndexError, self.array2.__setitem__, -1, a) def testSetBad2(self): "Test Array2 __setitem__ method, out-of-range index" a = Array.Array1(self.ncols) self.assertRaises(IndexError, self.array2.__setitem__, self.nrows+1, a) def testGetBad1(self): "Test Array2 __getitem__ method, negative index" self.assertRaises(IndexError, self.array2.__getitem__, -1) def testGetBad2(self): "Test Array2 __getitem__ method, out-of-range index" self.assertRaises(IndexError, self.array2.__getitem__, self.nrows+1) def testAsString(self): "Test Array2 asString method" result = """\ [ [ 0, 1, 2, 3 ], [ 1, 2, 3, 4 ], [ 2, 3, 4, 5 ], [ 3, 4, 5, 6 ], [ 4, 5, 6, 7 ] ] """ for i in range(self.nrows): for j in range(self.ncols): self.array2[i][j] = i+j self.failUnless(self.array2.asString() == result) def testStr(self): "Test Array2 __str__ method" result = """\ [ [ 0, -1, -2, -3 ], [ 1, 0, -1, -2 ], [ 2, 1, 0, -1 ], [ 3, 2, 1, 0 ], [ 4, 3, 2, 1 ] ] """ for i in range(self.nrows): for j in range(self.ncols): self.array2[i][j] = i-j self.failUnless(str(self.array2) == result) def testView(self): "Test Array2 view method" a = self.array2.view() self.failUnless(isinstance(a, np.ndarray)) self.failUnless(len(a) == self.nrows) ###################################################################### class ArrayZTestCase(unittest.TestCase): def setUp(self): self.length = 5 self.array3 = Array.ArrayZ(self.length) def testConstructor0(self): "Test ArrayZ default constructor" a = Array.ArrayZ() self.failUnless(isinstance(a, Array.ArrayZ)) self.failUnless(len(a) == 0) def testConstructor1(self): "Test ArrayZ length constructor" self.failUnless(isinstance(self.array3, Array.ArrayZ)) def testConstructor2(self): "Test ArrayZ array constructor" na = np.arange(self.length, dtype=np.complex128) aa = Array.ArrayZ(na) self.failUnless(isinstance(aa, Array.ArrayZ)) def testConstructor3(self): "Test ArrayZ copy constructor" for i in range(self.array3.length()): self.array3[i] = complex(i,-i) arrayCopy = Array.ArrayZ(self.array3) self.failUnless(arrayCopy == self.array3) def testConstructorBad(self): "Test ArrayZ length constructor, negative" self.assertRaises(ValueError, Array.ArrayZ, -4) def testLength(self): "Test ArrayZ length method" self.failUnless(self.array3.length() == self.length) def testLen(self): "Test ArrayZ __len__ method" self.failUnless(len(self.array3) == self.length) def testResize0(self): "Test ArrayZ resize method, length" newLen = 2 * self.length self.array3.resize(newLen) self.failUnless(len(self.array3) == newLen) def testResize1(self): "Test ArrayZ resize method, array" a = np.zeros((2*self.length,), dtype=np.complex128) self.array3.resize(a) self.failUnless(len(self.array3) == a.size) def testResizeBad(self): "Test ArrayZ resize method, negative length" self.assertRaises(ValueError, self.array3.resize, -5) def testSetGet(self): "Test ArrayZ __setitem__, __getitem__ methods" n = self.length for i in range(n): self.array3[i] = i*i for i in range(n): self.failUnless(self.array3[i] == i*i) def testSetBad1(self): "Test ArrayZ __setitem__ method, negative index" self.assertRaises(IndexError, self.array3.__setitem__, -1, 0) def testSetBad2(self): "Test ArrayZ __setitem__ method, out-of-range index" self.assertRaises(IndexError, self.array3.__setitem__, self.length+1, 0) def testGetBad1(self): "Test ArrayZ __getitem__ method, negative index" self.assertRaises(IndexError, self.array3.__getitem__, -1) def testGetBad2(self): "Test ArrayZ __getitem__ method, out-of-range index" self.assertRaises(IndexError, self.array3.__getitem__, self.length+1) def testAsString(self): "Test ArrayZ asString method" for i in range(self.array3.length()): self.array3[i] = complex(i+1,-i-1) self.failUnless(self.array3.asString() == "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]") def testStr(self): "Test ArrayZ __str__ method" for i in range(self.array3.length()): self.array3[i] = complex(i-2,(i-2)*2) self.failUnless(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]") def testView(self): "Test ArrayZ view method" for i in range(self.array3.length()): self.array3[i] = complex(i+1,i+2) a = self.array3.view() self.failUnless(isinstance(a, np.ndarray)) self.failUnless(len(a) == self.length) self.failUnless((a == [1+2j, 2+3j, 3+4j, 4+5j, 5+6j]).all()) ###################################################################### if __name__ == "__main__": # Build the test suite suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(Array1TestCase)) suite.addTest(unittest.makeSuite(Array2TestCase)) suite.addTest(unittest.makeSuite(ArrayZTestCase)) # Execute the test suite print("Testing Classes of Module Array") print("NumPy version", np.__version__) print() result = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(bool(result.errors + result.failures))
bsd-3-clause
elky/django
django/contrib/admin/checks.py
12
39259
from itertools import chain from django.apps import apps from django.conf import settings from django.contrib.admin.utils import ( NotRelationField, flatten, get_fields_from_path, ) from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.db import models from django.db.models.constants import LOOKUP_SEP from django.forms.models import ( BaseModelForm, BaseModelFormSet, _get_foreign_key, ) from django.template.engine import Engine def check_admin_app(app_configs, **kwargs): from django.contrib.admin.sites import all_sites errors = [] for site in all_sites: errors.extend(site.check(app_configs)) return errors def check_dependencies(**kwargs): """ Check that the admin's dependencies are correctly installed. """ errors = [] # contrib.contenttypes must be installed. if not apps.is_installed('django.contrib.contenttypes'): missing_app = checks.Error( "'django.contrib.contenttypes' must be in INSTALLED_APPS in order " "to use the admin application.", id="admin.E401", ) errors.append(missing_app) # The auth context processor must be installed if using the default # authentication backend. try: default_template_engine = Engine.get_default() except Exception: # Skip this non-critical check: # 1. if the user has a non-trivial TEMPLATES setting and Django # can't find a default template engine # 2. if anything goes wrong while loading template engines, in # order to avoid raising an exception from a confusing location # Catching ImproperlyConfigured suffices for 1. but 2. requires # catching all exceptions. pass else: if ('django.contrib.auth.context_processors.auth' not in default_template_engine.context_processors and 'django.contrib.auth.backends.ModelBackend' in settings.AUTHENTICATION_BACKENDS): missing_template = checks.Error( "'django.contrib.auth.context_processors.auth' must be in " "TEMPLATES in order to use the admin application.", id="admin.E402" ) errors.append(missing_template) return errors class BaseModelAdminChecks: def check(self, admin_obj, **kwargs): errors = [] errors.extend(self._check_raw_id_fields(admin_obj)) errors.extend(self._check_fields(admin_obj)) errors.extend(self._check_fieldsets(admin_obj)) errors.extend(self._check_exclude(admin_obj)) errors.extend(self._check_form(admin_obj)) errors.extend(self._check_filter_vertical(admin_obj)) errors.extend(self._check_filter_horizontal(admin_obj)) errors.extend(self._check_radio_fields(admin_obj)) errors.extend(self._check_prepopulated_fields(admin_obj)) errors.extend(self._check_view_on_site_url(admin_obj)) errors.extend(self._check_ordering(admin_obj)) errors.extend(self._check_readonly_fields(admin_obj)) return errors def _check_raw_id_fields(self, obj): """ Check that `raw_id_fields` only contains field names that are listed on the model. """ if not isinstance(obj.raw_id_fields, (list, tuple)): return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001') else: return list(chain.from_iterable( self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index) for index, field_name in enumerate(obj.raw_id_fields) )) def _check_raw_id_fields_item(self, obj, model, field_name, label): """ Check an item of `raw_id_fields`, i.e. check that field named `field_name` exists in model `model` and is a ForeignKey or a ManyToManyField. """ try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E002') else: if not field.many_to_many and not isinstance(field, models.ForeignKey): return must_be('a foreign key or a many-to-many field', option=label, obj=obj, id='admin.E003') else: return [] def _check_fields(self, obj): """ Check that `fields` only refer to existing fields, doesn't contain duplicates. Check if at most one of `fields` and `fieldsets` is defined. """ if obj.fields is None: return [] elif not isinstance(obj.fields, (list, tuple)): return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004') elif obj.fieldsets: return [ checks.Error( "Both 'fieldsets' and 'fields' are specified.", obj=obj.__class__, id='admin.E005', ) ] fields = flatten(obj.fields) if len(fields) != len(set(fields)): return [ checks.Error( "The value of 'fields' contains duplicate field(s).", obj=obj.__class__, id='admin.E006', ) ] return list(chain.from_iterable( self._check_field_spec(obj, obj.model, field_name, 'fields') for field_name in obj.fields )) def _check_fieldsets(self, obj): """ Check that fieldsets is properly formatted and doesn't contain duplicates. """ if obj.fieldsets is None: return [] elif not isinstance(obj.fieldsets, (list, tuple)): return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007') else: return list(chain.from_iterable( self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index) for index, fieldset in enumerate(obj.fieldsets) )) def _check_fieldsets_item(self, obj, model, fieldset, label): """ Check an item of `fieldsets`, i.e. check that this is a pair of a set name and a dictionary containing "fields" key. """ if not isinstance(fieldset, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E008') elif len(fieldset) != 2: return must_be('of length 2', option=label, obj=obj, id='admin.E009') elif not isinstance(fieldset[1], dict): return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010') elif 'fields' not in fieldset[1]: return [ checks.Error( "The value of '%s[1]' must contain the key 'fields'." % label, obj=obj.__class__, id='admin.E011', ) ] elif not isinstance(fieldset[1]['fields'], (list, tuple)): return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008') fields = flatten(fieldset[1]['fields']) if len(fields) != len(set(fields)): return [ checks.Error( "There are duplicate field(s) in '%s[1]'." % label, obj=obj.__class__, id='admin.E012', ) ] return list(chain.from_iterable( self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label) for fieldset_fields in fieldset[1]['fields'] )) def _check_field_spec(self, obj, model, fields, label): """ `fields` should be an item of `fields` or an item of fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a field name or a tuple of field names. """ if isinstance(fields, tuple): return list(chain.from_iterable( self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index)) for index, field_name in enumerate(fields) )) else: return self._check_field_spec_item(obj, model, fields, label) def _check_field_spec_item(self, obj, model, field_name, label): if field_name in obj.readonly_fields: # Stuff can be put in fields that isn't actually a model field if # it's in readonly_fields, readonly_fields will handle the # validation of such things. return [] else: try: field = model._meta.get_field(field_name) except FieldDoesNotExist: # If we can't find a field on the model that matches, it could # be an extra field on the form. return [] else: if (isinstance(field, models.ManyToManyField) and not field.remote_field.through._meta.auto_created): return [ checks.Error( "The value of '%s' cannot include the ManyToManyField '%s', " "because that field manually specifies a relationship model." % (label, field_name), obj=obj.__class__, id='admin.E013', ) ] else: return [] def _check_exclude(self, obj): """ Check that exclude is a sequence without duplicates. """ if obj.exclude is None: # default value is None return [] elif not isinstance(obj.exclude, (list, tuple)): return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014') elif len(obj.exclude) > len(set(obj.exclude)): return [ checks.Error( "The value of 'exclude' contains duplicate field(s).", obj=obj.__class__, id='admin.E015', ) ] else: return [] def _check_form(self, obj): """ Check that form subclasses BaseModelForm. """ if not issubclass(obj.form, BaseModelForm): return must_inherit_from(parent='BaseModelForm', option='form', obj=obj, id='admin.E016') else: return [] def _check_filter_vertical(self, obj): """ Check that filter_vertical is a sequence of field names. """ if not isinstance(obj.filter_vertical, (list, tuple)): return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017') else: return list(chain.from_iterable( self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index) for index, field_name in enumerate(obj.filter_vertical) )) def _check_filter_horizontal(self, obj): """ Check that filter_horizontal is a sequence of field names. """ if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018') else: return list(chain.from_iterable( self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index) for index, field_name in enumerate(obj.filter_horizontal) )) def _check_filter_item(self, obj, model, field_name, label): """ Check one item of `filter_vertical` or `filter_horizontal`, i.e. check that given field exists and is a ManyToManyField. """ try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E019') else: if not field.many_to_many: return must_be('a many-to-many field', option=label, obj=obj, id='admin.E020') else: return [] def _check_radio_fields(self, obj): """ Check that `radio_fields` is a dictionary. """ if not isinstance(obj.radio_fields, dict): return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021') else: return list(chain.from_iterable( self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') + self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name) for field_name, val in obj.radio_fields.items() )) def _check_radio_fields_key(self, obj, model, field_name, label): """ Check that a key of `radio_fields` dictionary is name of existing field and that the field is a ForeignKey or has `choices` defined. """ try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E022') else: if not (isinstance(field, models.ForeignKey) or field.choices): return [ checks.Error( "The value of '%s' refers to '%s', which is not an " "instance of ForeignKey, and does not have a 'choices' definition." % ( label, field_name ), obj=obj.__class__, id='admin.E023', ) ] else: return [] def _check_radio_fields_value(self, obj, val, label): """ Check type of a value of `radio_fields` dictionary. """ from django.contrib.admin.options import HORIZONTAL, VERTICAL if val not in (HORIZONTAL, VERTICAL): return [ checks.Error( "The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label, obj=obj.__class__, id='admin.E024', ) ] else: return [] def _check_view_on_site_url(self, obj): if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool): return [ checks.Error( "The value of 'view_on_site' must be a callable or a boolean value.", obj=obj.__class__, id='admin.E025', ) ] else: return [] def _check_prepopulated_fields(self, obj): """ Check that `prepopulated_fields` is a dictionary containing allowed field types. """ if not isinstance(obj.prepopulated_fields, dict): return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026') else: return list(chain.from_iterable( self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') + self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name) for field_name, val in obj.prepopulated_fields.items() )) def _check_prepopulated_fields_key(self, obj, model, field_name, label): """ Check a key of `prepopulated_fields` dictionary, i.e. check that it is a name of existing field and the field is one of the allowed types. """ try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E027') else: if isinstance(field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)): return [ checks.Error( "The value of '%s' refers to '%s', which must not be a DateTimeField, " "a ForeignKey, a OneToOneField, or a ManyToManyField." % (label, field_name), obj=obj.__class__, id='admin.E028', ) ] else: return [] def _check_prepopulated_fields_value(self, obj, model, val, label): """ Check a value of `prepopulated_fields` dictionary, i.e. it's an iterable of existing fields. """ if not isinstance(val, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E029') else: return list(chain.from_iterable( self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index)) for index, subfield_name in enumerate(val) )) def _check_prepopulated_fields_value_item(self, obj, model, field_name, label): """ For `prepopulated_fields` equal to {"slug": ("title",)}, `field_name` is "title". """ try: model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E030') else: return [] def _check_ordering(self, obj): """ Check that ordering refers to existing fields or is random. """ # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031') else: return list(chain.from_iterable( self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index) for index, field_name in enumerate(obj.ordering) )) def _check_ordering_item(self, obj, model, field_name, label): """ Check that `ordering` refers to existing fields. """ if field_name == '?' and len(obj.ordering) != 1: return [ checks.Error( "The value of 'ordering' has the random ordering marker '?', " "but contains other fields as well.", hint='Either remove the "?", or remove the other fields.', obj=obj.__class__, id='admin.E032', ) ] elif field_name == '?': return [] elif LOOKUP_SEP in field_name: # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). return [] else: if field_name.startswith('-'): field_name = field_name[1:] if field_name == 'pk': return [] try: model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E033') else: return [] def _check_readonly_fields(self, obj): """ Check that readonly_fields refers to proper attribute or field. """ if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034') else: return list(chain.from_iterable( self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index) for index, field_name in enumerate(obj.readonly_fields) )) def _check_readonly_fields_item(self, obj, model, field_name, label): if callable(field_name): return [] elif hasattr(obj, field_name): return [] elif hasattr(model, field_name): return [] else: try: model._meta.get_field(field_name) except FieldDoesNotExist: return [ checks.Error( "The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % ( label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id='admin.E035', ) ] else: return [] class ModelAdminChecks(BaseModelAdminChecks): def check(self, admin_obj, **kwargs): errors = super().check(admin_obj) errors.extend(self._check_save_as(admin_obj)) errors.extend(self._check_save_on_top(admin_obj)) errors.extend(self._check_inlines(admin_obj)) errors.extend(self._check_list_display(admin_obj)) errors.extend(self._check_list_display_links(admin_obj)) errors.extend(self._check_list_filter(admin_obj)) errors.extend(self._check_list_select_related(admin_obj)) errors.extend(self._check_list_per_page(admin_obj)) errors.extend(self._check_list_max_show_all(admin_obj)) errors.extend(self._check_list_editable(admin_obj)) errors.extend(self._check_search_fields(admin_obj)) errors.extend(self._check_date_hierarchy(admin_obj)) return errors def _check_save_as(self, obj): """ Check save_as is a boolean. """ if not isinstance(obj.save_as, bool): return must_be('a boolean', option='save_as', obj=obj, id='admin.E101') else: return [] def _check_save_on_top(self, obj): """ Check save_on_top is a boolean. """ if not isinstance(obj.save_on_top, bool): return must_be('a boolean', option='save_on_top', obj=obj, id='admin.E102') else: return [] def _check_inlines(self, obj): """ Check all inline model admin classes. """ if not isinstance(obj.inlines, (list, tuple)): return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103') else: return list(chain.from_iterable( self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index) for index, item in enumerate(obj.inlines) )) def _check_inlines_item(self, obj, model, inline, label): """ Check one inline model admin. """ inline_label = inline.__module__ + '.' + inline.__name__ from django.contrib.admin.options import InlineModelAdmin if not issubclass(inline, InlineModelAdmin): return [ checks.Error( "'%s' must inherit from 'InlineModelAdmin'." % inline_label, obj=obj.__class__, id='admin.E104', ) ] elif not inline.model: return [ checks.Error( "'%s' must have a 'model' attribute." % inline_label, obj=obj.__class__, id='admin.E105', ) ] elif not issubclass(inline.model, models.Model): return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106') else: return inline(model, obj.admin_site).check() def _check_list_display(self, obj): """ Check that list_display only contains fields or usable attributes. """ if not isinstance(obj.list_display, (list, tuple)): return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107') else: return list(chain.from_iterable( self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index) for index, item in enumerate(obj.list_display) )) def _check_list_display_item(self, obj, model, item, label): if callable(item): return [] elif hasattr(obj, item): return [] elif hasattr(model, item): # getattr(model, item) could be an X_RelatedObjectsDescriptor try: field = model._meta.get_field(item) except FieldDoesNotExist: try: field = getattr(model, item) except AttributeError: field = None if field is None: return [ checks.Error( "The value of '%s' refers to '%s', which is not a " "callable, an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id='admin.E108', ) ] elif isinstance(field, models.ManyToManyField): return [ checks.Error( "The value of '%s' must not be a ManyToManyField." % label, obj=obj.__class__, id='admin.E109', ) ] else: return [] else: try: model._meta.get_field(item) except FieldDoesNotExist: return [ # This is a deliberate repeat of E108; there's more than one path # required to test this condition. checks.Error( "The value of '%s' refers to '%s', which is not a callable, " "an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id='admin.E108', ) ] else: return [] def _check_list_display_links(self, obj): """ Check that list_display_links is a unique subset of list_display. """ from django.contrib.admin.options import ModelAdmin if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110') # Check only if ModelAdmin.get_list_display() isn't overridden. elif obj.get_list_display.__func__ is ModelAdmin.get_list_display: return list(chain.from_iterable( self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index) for index, field_name in enumerate(obj.list_display_links) )) return [] def _check_list_display_links_item(self, obj, field_name, label): if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not defined in 'list_display'." % ( label, field_name ), obj=obj.__class__, id='admin.E111', ) ] else: return [] def _check_list_filter(self, obj): if not isinstance(obj.list_filter, (list, tuple)): return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112') else: return list(chain.from_iterable( self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index) for index, item in enumerate(obj.list_filter) )) def _check_list_filter_item(self, obj, model, item, label): """ Check one item of `list_filter`, i.e. check if it is one of three options: 1. 'field' -- a basic field filter, possibly w/ relationships (e.g. 'field__rel') 2. ('field', SomeFieldListFilter) - a field-based list filter class 3. SomeListFilter - a non-field list filter class """ from django.contrib.admin import ListFilter, FieldListFilter if callable(item) and not isinstance(item, models.Field): # If item is option 3, it should be a ListFilter... if not issubclass(item, ListFilter): return must_inherit_from(parent='ListFilter', option=label, obj=obj, id='admin.E113') # ... but not a FieldListFilter. elif issubclass(item, FieldListFilter): return [ checks.Error( "The value of '%s' must not inherit from 'FieldListFilter'." % label, obj=obj.__class__, id='admin.E114', ) ] else: return [] elif isinstance(item, (tuple, list)): # item is option #2 field, list_filter_class = item if not issubclass(list_filter_class, FieldListFilter): return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115') else: return [] else: # item is option #1 field = item # Validate the field string try: get_fields_from_path(model, field) except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of '%s' refers to '%s', which does not refer to a Field." % (label, field), obj=obj.__class__, id='admin.E116', ) ] else: return [] def _check_list_select_related(self, obj): """ Check that list_select_related is a boolean, a list or a tuple. """ if not isinstance(obj.list_select_related, (bool, list, tuple)): return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117') else: return [] def _check_list_per_page(self, obj): """ Check that list_per_page is an integer. """ if not isinstance(obj.list_per_page, int): return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118') else: return [] def _check_list_max_show_all(self, obj): """ Check that list_max_show_all is an integer. """ if not isinstance(obj.list_max_show_all, int): return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119') else: return [] def _check_list_editable(self, obj): """ Check that list_editable is a sequence of editable fields from list_display without first element. """ if not isinstance(obj.list_editable, (list, tuple)): return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120') else: return list(chain.from_iterable( self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index) for index, item in enumerate(obj.list_editable) )) def _check_list_editable_item(self, obj, model, field_name, label): try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E121') else: if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not " "contained in 'list_display'." % (label, field_name), obj=obj.__class__, id='admin.E122', ) ] elif obj.list_display_links and field_name in obj.list_display_links: return [ checks.Error( "The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name, obj=obj.__class__, id='admin.E123', ) ] # If list_display[0] is in list_editable, check that # list_display_links is set. See #22792 and #26229 for use cases. elif (obj.list_display[0] == field_name and not obj.list_display_links and obj.list_display_links is not None): return [ checks.Error( "The value of '%s' refers to the first field in 'list_display' ('%s'), " "which cannot be used unless 'list_display_links' is set." % ( label, obj.list_display[0] ), obj=obj.__class__, id='admin.E124', ) ] elif not field.editable: return [ checks.Error( "The value of '%s' refers to '%s', which is not editable through the admin." % ( label, field_name ), obj=obj.__class__, id='admin.E125', ) ] else: return [] def _check_search_fields(self, obj): """ Check search_fields is a sequence. """ if not isinstance(obj.search_fields, (list, tuple)): return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126') else: return [] def _check_date_hierarchy(self, obj): """ Check that date_hierarchy refers to DateField or DateTimeField. """ if obj.date_hierarchy is None: return [] else: try: field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1] except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of 'date_hierarchy' refers to '%s', which " "does not refer to a Field." % obj.date_hierarchy, obj=obj.__class__, id='admin.E127', ) ] else: if not isinstance(field, (models.DateField, models.DateTimeField)): return must_be('a DateField or DateTimeField', option='date_hierarchy', obj=obj, id='admin.E128') else: return [] class InlineModelAdminChecks(BaseModelAdminChecks): def check(self, inline_obj, **kwargs): errors = super().check(inline_obj) parent_model = inline_obj.parent_model errors.extend(self._check_relation(inline_obj, parent_model)) errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model)) errors.extend(self._check_extra(inline_obj)) errors.extend(self._check_max_num(inline_obj)) errors.extend(self._check_min_num(inline_obj)) errors.extend(self._check_formset(inline_obj)) return errors def _check_exclude_of_parent_model(self, obj, parent_model): # Do not perform more specific checks if the base checks result in an # error. errors = super()._check_exclude(obj) if errors: return [] # Skip if `fk_name` is invalid. if self._check_relation(obj, parent_model): return [] if obj.exclude is None: return [] fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name) if fk.name in obj.exclude: return [ checks.Error( "Cannot exclude the field '%s', because it is the foreign key " "to the parent model '%s.%s'." % ( fk.name, parent_model._meta.app_label, parent_model._meta.object_name ), obj=obj.__class__, id='admin.E201', ) ] else: return [] def _check_relation(self, obj, parent_model): try: _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name) except ValueError as e: return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')] else: return [] def _check_extra(self, obj): """ Check that extra is an integer. """ if not isinstance(obj.extra, int): return must_be('an integer', option='extra', obj=obj, id='admin.E203') else: return [] def _check_max_num(self, obj): """ Check that max_num is an integer. """ if obj.max_num is None: return [] elif not isinstance(obj.max_num, int): return must_be('an integer', option='max_num', obj=obj, id='admin.E204') else: return [] def _check_min_num(self, obj): """ Check that min_num is an integer. """ if obj.min_num is None: return [] elif not isinstance(obj.min_num, int): return must_be('an integer', option='min_num', obj=obj, id='admin.E205') else: return [] def _check_formset(self, obj): """ Check formset is a subclass of BaseModelFormSet. """ if not issubclass(obj.formset, BaseModelFormSet): return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206') else: return [] def must_be(type, option, obj, id): return [ checks.Error( "The value of '%s' must be %s." % (option, type), obj=obj.__class__, id=id, ), ] def must_inherit_from(parent, option, obj, id): return [ checks.Error( "The value of '%s' must inherit from '%s'." % (option, parent), obj=obj.__class__, id=id, ), ] def refer_to_missing_field(field, option, model, obj, id): return [ checks.Error( "The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % ( option, field, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id=id, ), ]
bsd-3-clause
oswalpalash/remoteusermgmt
RUM/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py
535
5365
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 try: import ctypes from ctypes import LibraryLoader windll = LibraryLoader(ctypes.WinDLL) from ctypes import wintypes except (AttributeError, ImportError): windll = None SetConsoleTextAttribute = lambda *_: None winapi_test = lambda *_: None else: from ctypes import byref, Structure, c_char, POINTER COORD = wintypes._COORD class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA _SetConsoleTitleW.argtypes = [ wintypes.LPCSTR ] _SetConsoleTitleW.restype = wintypes.BOOL handles = { STDOUT: _GetStdHandle(STDOUT), STDERR: _GetStdHandle(STDERR), } def winapi_test(): handle = handles[STDOUT] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return bool(success) def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = handles[stream_id] return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): position = COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = COORD(position.Y - 1, position.X - 1) if adjust: # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = handles[stream_id] return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = handles[stream_id] char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = handles[stream_id] attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written)) def SetConsoleTitle(title): return _SetConsoleTitleW(title)
mit
iRaffnix/googletest
test/gtest_test_utils.py
674
10826
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for Google C++ Testing Framework.""" __author__ = 'wan@google.com (Zhanyong Wan)' import atexit import os import shutil import sys import tempfile import unittest _test_module = unittest # Suppresses the 'Import not at the top of the file' lint complaint. # pylint: disable-msg=C6204 try: import subprocess _SUBPROCESS_MODULE_AVAILABLE = True except: import popen2 _SUBPROCESS_MODULE_AVAILABLE = False # pylint: enable-msg=C6204 GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' IS_WINDOWS = os.name == 'nt' IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0] # The environment variable for specifying the path to the premature-exit file. PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE' environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets/unsets an environment variable to a given value.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] # Here we expose a class from a particular module, depending on the # environment. The comment suppresses the 'Invalid variable name' lint # complaint. TestCase = _test_module.TestCase # pylint: disable-msg=C6409 # Initially maps a flag to its default value. After # _ParseAndStripGTestFlags() is called, maps a flag to its actual value. _flag_map = {'source_dir': os.path.dirname(sys.argv[0]), 'build_dir': os.path.dirname(sys.argv[0])} _gtest_flags_are_parsed = False def _ParseAndStripGTestFlags(argv): """Parses and strips Google Test flags from argv. This is idempotent.""" # Suppresses the lint complaint about a global variable since we need it # here to maintain module-wide state. global _gtest_flags_are_parsed # pylint: disable-msg=W0603 if _gtest_flags_are_parsed: return _gtest_flags_are_parsed = True for flag in _flag_map: # The environment variable overrides the default value. if flag.upper() in os.environ: _flag_map[flag] = os.environ[flag.upper()] # The command line flag overrides the environment variable. i = 1 # Skips the program name. while i < len(argv): prefix = '--' + flag + '=' if argv[i].startswith(prefix): _flag_map[flag] = argv[i][len(prefix):] del argv[i] break else: # We don't increment i in case we just found a --gtest_* flag # and removed it from argv. i += 1 def GetFlag(flag): """Returns the value of the given flag.""" # In case GetFlag() is called before Main(), we always call # _ParseAndStripGTestFlags() here to make sure the --gtest_* flags # are parsed. _ParseAndStripGTestFlags(sys.argv) return _flag_map[flag] def GetSourceDir(): """Returns the absolute path of the directory where the .py files are.""" return os.path.abspath(GetFlag('source_dir')) def GetBuildDir(): """Returns the absolute path of the directory where the test binaries are.""" return os.path.abspath(GetFlag('build_dir')) _temp_dir = None def _RemoveTempDir(): if _temp_dir: shutil.rmtree(_temp_dir, ignore_errors=True) atexit.register(_RemoveTempDir) def GetTempDir(): """Returns a directory for temporary files.""" global _temp_dir if not _temp_dir: _temp_dir = tempfile.mkdtemp() return _temp_dir def GetTestExecutablePath(executable_name, build_dir=None): """Returns the absolute path of the test binary given its name. The function will print a message and abort the program if the resulting file doesn't exist. Args: executable_name: name of the test binary that the test script runs. build_dir: directory where to look for executables, by default the result of GetBuildDir(). Returns: The absolute path of the test binary. """ path = os.path.abspath(os.path.join(build_dir or GetBuildDir(), executable_name)) if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'): path += '.exe' if not os.path.exists(path): message = ( 'Unable to find the test binary "%s". Please make sure to provide\n' 'a path to the binary via the --build_dir flag or the BUILD_DIR\n' 'environment variable.' % path) print >> sys.stderr, message sys.exit(1) return path def GetExitStatus(exit_code): """Returns the argument to exit(), or -1 if exit() wasn't called. Args: exit_code: the result value of os.system(command). """ if os.name == 'nt': # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns # the argument to exit() directly. return exit_code else: # On Unix, os.WEXITSTATUS() must be used to extract the exit status # from the result of os.system(). if os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code) else: return -1 class Subprocess: def __init__(self, command, working_dir=None, capture_stderr=True, env=None): """Changes into a specified directory, if provided, and executes a command. Restores the old directory afterwards. Args: command: The command to run, in the form of sys.argv. working_dir: The directory to change into. capture_stderr: Determines whether to capture stderr in the output member or to discard it. env: Dictionary with environment to pass to the subprocess. Returns: An object that represents outcome of the executed process. It has the following attributes: terminated_by_signal True iff the child process has been terminated by a signal. signal Sygnal that terminated the child process. exited True iff the child process exited normally. exit_code The code with which the child process exited. output Child process's stdout and stderr output combined in a string. """ # The subprocess module is the preferrable way of running programs # since it is available and behaves consistently on all platforms, # including Windows. But it is only available starting in python 2.4. # In earlier python versions, we revert to the popen2 module, which is # available in python 2.0 and later but doesn't provide required # functionality (Popen4) under Windows. This allows us to support Mac # OS X 10.4 Tiger, which has python 2.3 installed. if _SUBPROCESS_MODULE_AVAILABLE: if capture_stderr: stderr = subprocess.STDOUT else: stderr = subprocess.PIPE p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=working_dir, universal_newlines=True, env=env) # communicate returns a tuple with the file obect for the child's # output. self.output = p.communicate()[0] self._return_code = p.returncode else: old_dir = os.getcwd() def _ReplaceEnvDict(dest, src): # Changes made by os.environ.clear are not inheritable by child # processes until Python 2.6. To produce inheritable changes we have # to delete environment items with the del statement. for key in dest.keys(): del dest[key] dest.update(src) # When 'env' is not None, backup the environment variables and replace # them with the passed 'env'. When 'env' is None, we simply use the # current 'os.environ' for compatibility with the subprocess.Popen # semantics used above. if env is not None: old_environ = os.environ.copy() _ReplaceEnvDict(os.environ, env) try: if working_dir is not None: os.chdir(working_dir) if capture_stderr: p = popen2.Popen4(command) else: p = popen2.Popen3(command) p.tochild.close() self.output = p.fromchild.read() ret_code = p.wait() finally: os.chdir(old_dir) # Restore the old environment variables # if they were replaced. if env is not None: _ReplaceEnvDict(os.environ, old_environ) # Converts ret_code to match the semantics of # subprocess.Popen.returncode. if os.WIFSIGNALED(ret_code): self._return_code = -os.WTERMSIG(ret_code) else: # os.WIFEXITED(ret_code) should return True here. self._return_code = os.WEXITSTATUS(ret_code) if self._return_code < 0: self.terminated_by_signal = True self.exited = False self.signal = -self._return_code else: self.terminated_by_signal = False self.exited = True self.exit_code = self._return_code def Main(): """Runs the unit test.""" # We must call _ParseAndStripGTestFlags() before calling # unittest.main(). Otherwise the latter will be confused by the # --gtest_* flags. _ParseAndStripGTestFlags(sys.argv) # The tested binaries should not be writing XML output files unless the # script explicitly instructs them to. # TODO(vladl@google.com): Move this into Subprocess when we implement # passing environment into it as a parameter. if GTEST_OUTPUT_VAR_NAME in os.environ: del os.environ[GTEST_OUTPUT_VAR_NAME] _test_module.main()
bsd-3-clause