repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
0nse/WikiWho
functions/print.py
1
8966
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @author: Maribel Acosta @author: Fabian Floeck @author: Michael Ruster ''' import csv import re import functions.TextPostProcessing as TextPostProcessing import functions.WarningTemplates as WarningTemplates import BlockTimeCalculation from datetime import datetime def writeAllRevisions(order, revisions, blocks, pageName = None): """ Writes the revisions to disk. Don't pass a pageName if you want to process deletion discussions. The pageName is used for user warnings to determine the admonished user. """ assert (not pageName) ^ (not blocks), '[E] Illegal configuration. Both parameters pageName and blocks are set. One must be empty.' for (revisionId, vandalism) in order: if not(vandalism): revision = revisions[revisionId] text = extractCurrentRevisionsText(revision, blocks) if not pageName: writeDeletionDiscussion(text, revision, blocks) else: writeUserWarning(text, revision, pageName) def extractCurrentRevisionsText(revision, blocks): """ Iterates over the revision's text and extracts all text that has been introduced in this revision as a list. """ textList = [] for hash_paragraph in revision.ordered_paragraphs: para = revision.paragraphs[hash_paragraph] paragraph = para[-1] for hash_sentence in paragraph.ordered_sentences: sentence = paragraph.sentences[hash_sentence][-1] textList.extend( [word.value for word in sentence.words if word.revision is revision.wikipedia_id] ) return TextPostProcessing.merge(textList) def writeDeletionDiscussion(text, revision, blocks): """ Writes this deletion discussion to disk. Text is this revision's text. Said text will be cleaned so that markup is removed. It will append to this directory's 'deletionRevisions.csv'. Make sure, this script has writing access. The CSV will have the following columns: timestamp | contrib ID | contrib name | rev ID | text | seconds to block Seconds to block is the time in seconds until the user who is author of this revision got blocked. Revisions of anonymous users will be ignored as IP addresses are not unique. Likewise, bots will be ignored as we are interested in human communication. The list of bots can be retrieved from running WikiWho w/o the Bot removal and executing the bash command (replacing the space delimiter with a tab): grep 'Bot ' deletionRevisions.csv | cut -f 3 -d ' ' | sort | uniq For our dump, we detected 55 bots with SineBot being the most active. """ # we will not process anonymous users or bots: if not revision.contributor_id or \ revision.contributor_name.endswith('Bot'): return text = TextPostProcessing.clean(text, revision.contributor_name) text = removeAfDText(text) # only print a line when this revision introduced new text: if text.strip(): secondsToBlock = BlockTimeCalculation.calculateSecondsUntilNextBlock(blocks, revision.contributor_name, revision.timestamp) print("[I] Writing authorship for revision %s to disk." % revision.wikipedia_id) with open('deletionRevisions.csv', 'a', newline='') as csvFile: writer = csv.writer(csvFile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL) writer.writerow([revision.timestamp, revision.contributor_id, revision.contributor_name, revision.wikipedia_id, text, secondsToBlock]) # calculate the templates once: templatesRe = WarningTemplates.mergeTemplatesRe(WarningTemplates.vandalism, WarningTemplates.disruptive, WarningTemplates.agf, WarningTemplates.harass, WarningTemplates.npa) def writeUserWarning(text, revision, pageName): """ Writes user warnings in a block log format into 'userWarnings.csv'. The columns look as follows: timestamp | blocked user name | warning | issuer ID | issuer name """ assert pageName.startswith('User talk:'), '[E] Revision is not a user page:"%s"' % pageName blockedUserName = pageName[10:] for templateRe in templatesRe: matchedTemplate = templateRe.search(text) if matchedTemplate: matchedWarning = matchedTemplate.group(1) print('[I] Writing admonished user "%s" with warning "%s" to disk.' % (blockedUserName, matchedWarning)) with open('userWarnings.csv', 'a', newline='') as csvFile: writer = csv.writer(csvFile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL) writer.writerow([revision.timestamp, blockedUserName, revision.wikipedia_id, matchedWarning, revision.contributor_id, revision.contributor_name]) break #=============================================================================== # The following method uses regular expressions. The expressions are compiled # before the method definition so that they are compiled only once: #=============================================================================== # The following list of regular expressions has been build by analysing the most # frequently made posts in AfDs. afdTemplates = [ re.compile('(wikipediadeletionprocess )?(relistingdiscussions )?((wp)?relist )?(this afd is being )?relisted to generate a (clearer consensus|more thorough discussion so (a clearer |that )?(consensus|a decision) may (usefully )?be reached)( br| emsp| please add new discussion below this notice thanks)?'), re.compile('(was proposed for deletion )?this page is an archive of (the discussion (about |surrounding ))?the proposed deletion (of the (article below|page entitled( \w)*) )?this page is (no longer live|kept as an historic record)'), re.compile('this page is now preserved as an archive of the debate and like (some )?other (delete |vfd )?(sub)?pages is no longer live subsequent comments on the issue the deletion or (on )?the decisionmaking process should be placed on the relevant live pages please do not edit this page'), re.compile('this page is an archive of the proposed deletion of the article below further comments should be made on the (appropriate discussion page such as the )?articles talk page (if it exists )?or (on a votes for undeletion nomination|after the end of this archived section)'), re.compile('note this debate has been added to the .*?deletion list of .*?deletions( ron)?'), re.compile('preceding wikipediasignatures (unsigned|undated)? comment (was )?added( at ?)?( by)?'), re.compile('remove this template when closing this afd'), re.compile('this afd nomination was incomplete (missing step )?it is listed now'), re.compile('this afd nomination was wikipediaarticlesfordeletion howtolistpagesfordeletion orphaned listing now'), re.compile('further comments should be made on the articles talk page rather than here so that this page is preserved as an historic record (br )?'), re.compile('no further edits should be made to (this )?page'), re.compile('the result( of the debate)? was'), re.compile('(the (above discussion|following discussion)|this page) is ((now )?preserved as an archive of the debate( and (like other delete pages )?is no longer live)?|an archived debate of the proposed deletion of the article( below)?)'), re.compile('(please do not modify it )?subsequent comments (on the issue the deletion or on the decisionmaking process )?should be (made|placed) on the (appropriate|relevant) (discussion|live) page(s)?( such as the articles talk page or (o|i)n an?)?') ] spacesRe = re.compile(r' {2,}') def removeAfDText(text): """ Although templates can be put into text without them expanding, it is advised against doing so. Therefore, templates are not marked as such but instead the dumps contain the templates text next to actual content. We try our best to remove the most frequently used templates using regular expressions. These were build after sorting the AfD posts by frequency. """ for templateRe in afdTemplates: text = templateRe.sub("", text) # Remove leftover consecutive spaces that could appear after applying res: text = spacesRe.sub(' ', text) return text
mit
johannmeyer/bepoppy8
start.py
12
16455
#!/usr/bin/env python from __future__ import print_function import pygtk import gtk pygtk.require('2.0') import os import shutil import datetime from fnmatch import fnmatch import subprocess class ConfChooser(object): # General Functions def update_combo(self, combo, clist, active): combo.set_sensitive(False) combo.get_model().clear() current_index = 0 for (i, text) in enumerate(clist): combo.append_text(text) if os.path.join(self.conf_dir, text) == os.path.realpath(active): current_index = i combo.set_active(current_index) combo.set_sensitive(True) def update_conf_label(self): desc = "Current conf: " if not os.path.lexists(self.conf_xml): desc += "does not exist" else: if os.path.islink(self.conf_xml): if os.path.exists(self.conf_xml): desc += "symlink to " else: desc += "broken symlink to " real_conf_path = os.path.realpath(self.conf_xml) desc += os.path.relpath(real_conf_path, self.conf_dir) self.conf_explain.set_text(desc) def update_controlpanel_label(self): desc = "Current control_panel: " if not os.path.lexists(self.controlpanel_xml): desc += "does not exist" else: if os.path.islink(self.controlpanel_xml): if os.path.exists(self.controlpanel_xml): desc += "symlink to " else: desc += "broken symlink to " real_conf_path = os.path.realpath(self.controlpanel_xml) desc += os.path.relpath(real_conf_path, self.conf_dir) self.controlpanel_explain.set_text(desc) # CallBack Functions def find_conf_files(self): conf_files = [] pattern = "*conf[._-]*xml" backup_pattern = "*conf[._-]*xml.20[0-9][0-9]-[01][0-9]-[0-3][0-9]_*" excludes = ["%gconf.xml"] for path, subdirs, files in os.walk(self.conf_dir): for name in files: if self.exclude_backups and fnmatch(name, backup_pattern): continue if fnmatch(name, pattern): filepath = os.path.join(path, name) entry = os.path.relpath(filepath, self.conf_dir) if not os.path.islink(filepath) and entry not in excludes: conf_files.append(entry) conf_files.sort() self.update_combo(self.conf_file_combo, conf_files, self.conf_xml) def find_controlpanel_files(self): controlpanel_files = [] pattern = "*control_panel[._-]*xml" backup_pattern = "*control_panel[._-]*xml.20[0-9][0-9]-[01][0-9]-[0-3][0-9]_*" excludes = [] for path, subdirs, files in os.walk(self.conf_dir): for name in files: if self.exclude_backups and fnmatch(name, backup_pattern): continue if fnmatch(name, pattern): filepath = os.path.join(path, name) entry = os.path.relpath(filepath, self.conf_dir) if not os.path.islink(filepath) and entry not in excludes: controlpanel_files.append(entry) controlpanel_files.sort() self.update_combo(self.controlpanel_file_combo, controlpanel_files, self.controlpanel_xml) def about(self, widget): about_d = gtk.AboutDialog() about_d.set_program_name("Paparazzi Configuration Selector") about_d.set_version("1.0") about_d.set_copyright("(c) GPL v2") about_d.set_comments("Select the active configuration") about_d.set_website("http://paparazzi.github.com") about_d.set_logo(gtk.gdk.pixbuf_new_from_file(os.path.join(self.paparazzi_home, "data/pictures/penguin_icon.png"))) about_d.run() about_d.destroy() def sure(self, widget, filename): dialog = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Are you sure you want to delete?") dialog.format_secondary_text("File: " + filename) response = dialog.run() ret = False if response == gtk.RESPONSE_OK: ret = True dialog.destroy() return ret def set_backups(self, widget): self.exclude_backups = not widget.get_active() self.find_conf_files() self.find_controlpanel_files() def launch(self, widget): self.accept(widget) args = ["./paparazzi"] if self.btnPythonGUI.get_active(): args += ["-python"] self.pp = subprocess.Popen(args) self.window.destroy() def backupconf(self, use_personal=False): timestr = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M") if os.path.islink(self.conf_xml): if self.verbose: self.print_status("Symlink does not need backup") else: if os.path.exists(self.conf_xml): newname = "conf.xml." + timestr backup_file = os.path.join(self.conf_dir, newname) shutil.copyfile(self.conf_xml, backup_file) self.print_status("Made a backup: " + newname) if use_personal: backup_name = self.conf_personal_name + "." + timestr conf_personal_backup = os.path.join(self.conf_dir, backup_name) if os.path.exists(self.conf_personal): self.print_status("Backup conf.xml.personal to " + backup_name) shutil.copyfile(self.conf_personal, conf_personal_backup) def backupcontrolpanel(self, use_personal=False): timestr = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M") if os.path.islink(self.controlpanel_xml): if self.verbose: self.print_status("Symlink does not need backup") else: if os.path.exists(self.controlpanel_xml): newname = "control_panel.xml." + timestr backup_file = os.path.join(self.conf_dir, newname) shutil.copyfile(self.controlpanel_xml, backup_file) self.print_status("Made a backup: " + newname) if use_personal: backup_name = self.controlpanel_personal_name + "." + timestr controlpanel_personal_backup = os.path.join(self.conf_dir, backup_name) if os.path.exists(self.controlpanel_personal): self.print_status("Backup control_panel.xml.personal to " + backup_name) shutil.copyfile(self.controlpanel_personal, controlpanel_personal_backup) def delete_conf(self, widget): filename = os.path.join(self.conf_dir, self.conf_file_combo.get_active_text()) ret = self.sure(widget, filename) if ret: if os.path.exists(filename): os.remove(filename) self.update_conf_label() self.find_conf_files() self.print_status("Deleted: " + filename) def delete_controlpanel(self, widget): filename = os.path.join(self.conf_dir, self.controlpanel_file_combo.get_active_text()) ret = self.sure(widget, filename) if ret: if os.path.exists(filename): os.remove(filename) self.update_controlpanel_label() self.find_controlpanel_files() self.print_status("Deleted: " + filename) def accept(self, widget): selected = self.conf_file_combo.get_active_text() if selected == "conf.xml": self.print_status("conf.xml is not a symlink, maybe you want to copy it to your personal file first?") else: self.backupconf() if os.path.islink(self.conf_xml) or os.path.exists(self.conf_xml): os.remove(self.conf_xml) os.symlink(selected, self.conf_xml) self.update_conf_label() self.find_conf_files() selected = self.controlpanel_file_combo.get_active_text() if selected == "control_panel.xml": self.print_status("control_panel.xml is not a symlink, maybe you want to copy it to your personal file first?") else: self.backupcontrolpanel() if os.path.islink(self.controlpanel_xml) or os.path.exists(self.controlpanel_xml): os.remove(self.controlpanel_xml) os.symlink(selected, self.controlpanel_xml) self.update_controlpanel_label() self.find_controlpanel_files() def personal_conf(self, widget): if os.path.exists(self.conf_personal): self.print_status("Your personal conf file already exists!") else: self.backupconf(True) template_file = os.path.join(self.conf_dir, self.conf_file_combo.get_active_text()) shutil.copyfile(template_file, self.conf_personal) os.remove(self.conf_xml) os.symlink(self.conf_personal_name, self.conf_xml) self.update_conf_label() self.find_conf_files() def personal_controlpanel(self, widget): if os.path.exists(self.controlpanel_personal): self.print_status("Your personal control_panel file already exists!") else: self.backupcontrolpanel(True) template_file = os.path.join(self.conf_dir, self.controlpanel_file_combo.get_active_text()) shutil.copyfile(template_file, self.controlpanel_personal) os.remove(self.controlpanel_xml) os.symlink(self.controlpanel_personal_name, self.controlpanel_xml) self.update_controlpanel_label() self.find_controlpanel_files() def print_status(self, text): self.statusbar.push(self.context_id, text) # Constructor Functions def __init__(self): # paparazzi process self.pp = None self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) self.window.set_title("Paparazzi Configuration Chooser") self.my_vbox = gtk.VBox() # if PAPARAZZI_HOME not set, then assume the tree containing this # file is a reasonable substitute self.paparazzi_home = os.getenv("PAPARAZZI_HOME", os.path.dirname(os.path.abspath(__file__))) self.conf_dir = os.path.join(self.paparazzi_home, "conf") self.conf_xml = os.path.join(self.conf_dir, "conf.xml") self.conf_personal_name = "conf_personal.xml" self.conf_personal = os.path.join(self.conf_dir, self.conf_personal_name) self.controlpanel_xml = os.path.join(self.conf_dir, "control_panel.xml") self.controlpanel_personal_name = "control_panel_personal.xml" self.controlpanel_personal = os.path.join(self.conf_dir, self.controlpanel_personal_name) self.exclude_backups = True self.verbose = False # MenuBar mb = gtk.MenuBar() # File filemenu = gtk.Menu() # File Title filem = gtk.MenuItem("File") filem.set_submenu(filemenu) exitm = gtk.MenuItem("Exit") exitm.connect("activate", gtk.main_quit) filemenu.append(exitm) mb.append(filem) # Help helpmenu = gtk.Menu() # Help Title helpm = gtk.MenuItem("Help") helpm.set_submenu(helpmenu) aboutm = gtk.MenuItem("About") aboutm.connect("activate", self.about) helpmenu.append(aboutm) mb.append(helpm) self.my_vbox.pack_start(mb, False) # Combo Bar self.conf_label = gtk.Label("Conf:") self.conf_label.set_size_request(100, 30) self.conf_file_combo = gtk.combo_box_new_text() self.find_conf_files() # self.firmwares_combo.connect("changed", self.parse_list_of_airframes) self.conf_file_combo.set_size_request(550, 30) self.btnDeleteConf = gtk.Button(None, gtk.STOCK_DELETE) self.btnDeleteConf.connect("clicked", self.delete_conf) self.btnDeleteConf.set_tooltip_text("Permanently Delete Conf") self.btnPersonalConf = gtk.Button(None, gtk.STOCK_COPY) self.btnPersonalConf.connect("clicked", self.personal_conf) self.btnPersonalConf.set_tooltip_text("Create Personal Conf Based on Selected and Activate") self.confbar = gtk.HBox() self.confbar.pack_start(self.conf_label) self.confbar.pack_start(self.conf_file_combo) self.confbar.pack_start(self.btnDeleteConf) self.confbar.pack_start(self.btnPersonalConf) self.my_vbox.pack_start(self.confbar, False) # Explain current conf config self.conf_explain = gtk.Label("") self.update_conf_label() self.conf_explain.set_size_request(0, 45) self.cfexbar = gtk.HBox() self.cfexbar.pack_start(self.conf_explain) self.my_vbox.pack_start(self.cfexbar, False) # Controlpanel self.controlpanel_label = gtk.Label("Controlpanel:") self.controlpanel_label.set_size_request(100, 30) self.controlpanel_file_combo = gtk.combo_box_new_text() self.find_controlpanel_files() self.controlpanel_file_combo.set_size_request(550, 30) # window self.btnDeleteControl = gtk.Button(None, gtk.STOCK_DELETE) self.btnDeleteControl.connect("clicked", self.delete_controlpanel) self.btnDeleteControl.set_tooltip_text("Permanently Delete") self.btnPersonalControl = gtk.Button(None, gtk.STOCK_COPY) self.btnPersonalControl.connect("clicked", self.personal_controlpanel) self.btnPersonalControl.set_tooltip_text("Create Personal Controlpanel Based on Selected and Activate") self.controlpanelbar = gtk.HBox(False) self.controlpanelbar.pack_start(self.controlpanel_label) self.controlpanelbar.pack_start(self.controlpanel_file_combo) self.controlpanelbar.pack_start(self.btnDeleteControl) self.controlpanelbar.pack_start(self.btnPersonalControl) self.my_vbox.pack_start(self.controlpanelbar, False) # Explain current controlpanel config self.controlpanel_explain = gtk.Label("") self.update_controlpanel_label() self.controlpanel_explain.set_size_request(0, 45) self.ctexbar = gtk.HBox() self.ctexbar.pack_start(self.controlpanel_explain) self.my_vbox.pack_start(self.ctexbar, False) # show backups button self.btnBackups = gtk.CheckButton("show backups") self.btnBackups.connect("toggled", self.set_backups) self.my_vbox.pack_start(self.btnBackups, False) # show gui button self.btnPythonGUI = gtk.CheckButton("new python center (beta)") self.my_vbox.pack_start(self.btnPythonGUI, False) # Buttons self.btnAccept = gtk.Button("Set Active") self.btnAccept.connect("clicked", self.accept) self.btnAccept.set_tooltip_text("Set selected Conf/Control_Panel as Active") self.btnLaunch = gtk.Button("Launch Paparazzi with selected configuration") self.btnLaunch.connect("clicked", self.launch) self.btnLaunch.set_tooltip_text("Launch Paparazzi with current conf.xml and control_panel.xml") self.btnExit = gtk.Button("Exit") self.btnExit.connect("clicked", gtk.main_quit) self.btnExit.set_tooltip_text("Close application") self.toolbar = gtk.HBox() self.toolbar.set_size_request(0, 60) self.toolbar.pack_start(self.btnLaunch) self.toolbar.pack_start(self.btnAccept) self.toolbar.pack_start(self.btnExit) self.my_vbox.pack_start(self.toolbar, False) # status bar self.statusbar = gtk.Statusbar() self.context_id = self.statusbar.get_context_id("info") #self.statusbar.push(self.context_id, "Waiting for you to do something...") self.my_vbox.pack_end(self.statusbar, False) # Bottom self.window.add(self.my_vbox) self.window.show_all() self.window.set_position(gtk.WIN_POS_CENTER_ALWAYS) self.window.connect("destroy", gtk.main_quit) def main(self): gtk.main() if self.pp: self.pp.wait() if __name__ == "__main__": import sys if len(sys.argv) > 1: airframe_file = sys.argv[1] gui = ConfChooser() gui.main()
gpl-2.0
Ardesco/selenium
py/test/selenium/webdriver/common/clear_tests.py
8
2547
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from selenium.common.exceptions import InvalidElementStateException from selenium.webdriver.common.by import By def testWritableTextInputShouldClear(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "writableTextInput") element.clear() assert "" == element.get_attribute("value") def testTextInputShouldNotClearWhenDisabled(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "textInputnotenabled") assert not element.is_enabled() with pytest.raises(InvalidElementStateException): element.clear() def testTextInputShouldNotClearWhenReadOnly(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "readOnlyTextInput") with pytest.raises(InvalidElementStateException): element.clear() def testWritableTextAreaShouldClear(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "writableTextArea") element.clear() assert "" == element.get_attribute("value") def testTextAreaShouldNotClearWhenDisabled(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "textAreaNotenabled") with pytest.raises(InvalidElementStateException): element.clear() def testTextAreaShouldNotClearWhenReadOnly(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "textAreaReadOnly") with pytest.raises(InvalidElementStateException): element.clear() def testContentEditableAreaShouldClear(driver, pages): pages.load("readOnlyPage.html") element = driver.find_element(By.ID, "content-editable") element.clear() assert "" == element.text
apache-2.0
tpfanco/tpfanco-admin
src/tpfanco_admin/thermometer.py
1
23240
#! /usr/bin/python2.7 # -*- coding: utf-8 -*- # # tpfanco - controls the fan-speed of IBM/Lenovo ThinkPad Notebooks # Copyright (C) 2011-2015 Vladyslav Shtabovenko # Copyright (C) 2007-2009 Sebastian Urban # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import math import gobject from gtk import gdk import gtk import gtk.glade import pygtk pygtk.require('2.0') class Thermometer(gtk.DrawingArea): my_xml = None max_temp = 90. min_temp = 10. scale_interval = 10. animate_interval = 30 animate_step = 0.3 unit = "°C" # maps temperatures to fan levels triggers = {0: 0} shown_temp = min_temp temperature = min_temp hysteresis_temp = None hysteresis_level = None sensor_name = "" sensor_name_x = 0 sensor_name_y = 0 sensor_name_width = 0 sensor_name_height = 0 sensor_id = 0 translate_x = 5 translate_y = 5 Rkreis = 14.0 Rrohr = 6.0 wanted_height = int(2 * Rkreis + 15) draw_temperature_unit = False draw_triggers = True dragging = False mouse_over_temp = None temp_convert_func = None decimals = 1 def __init__(self, tmsettings): self.my_xml = tmsettings['my_xml'] gtk.DrawingArea.__init__(self) self.set_events(gdk.EXPOSURE_MASK | gdk.POINTER_MOTION_MASK | gdk.POINTER_MOTION_HINT_MASK | # @UndefinedVariable gdk.BUTTON_MOTION_MASK | gdk.BUTTON_PRESS_MASK | gdk.BUTTON_RELEASE_MASK) # @UndefinedVariable self.connect("expose_event", self.expose) self.connect("motion_notify_event", self.motion_notify_event) self.connect("button_press_event", self.button_press_event) self.connect("button_release_event", self.button_release_event) self.normal_cursor = gdk.Cursor(gdk.ARROW) # @UndefinedVariable self.move_cursor = gdk.Cursor( # @UndefinedVariable gdk.SB_H_DOUBLE_ARROW) # @UndefinedVariable self.cross_cursor = gdk.Cursor(gdk.CROSS) # @UndefinedVariable self.hand_cursor = gdk.Cursor(gdk.HAND2) # @UndefinedVariable self.trigger_names = {0: _("off"), 2: _("15%"), 3: _("30%"), 4: _("45%"), 5: _("60%"), 6: _("75%"), 7: _("90%"), 8: _("100%"), 255: _("hw-ctrld"), 256: _("full")} # build popup menu self.popup_menu = gtk.Menu() self.trigger_popup_menu_items = {} keys = self.trigger_names.keys() keys.sort() for id in keys: item = gtk.MenuItem(self.trigger_names[id]) self.trigger_popup_menu_items[id] = item self.popup_menu.append(item) item.connect_object("activate", self.popup_menu_event, str(id)) item.show() seperator = gtk.SeparatorMenuItem() self.popup_menu.append(seperator) seperator.show() self.popup_menu_split = gtk.MenuItem(_("Split")) self.popup_menu.append(self.popup_menu_split) self.popup_menu_split.connect_object( "activate", self.popup_menu_event, 'split') self.popup_menu_split.show() self.popup_menu_remove = gtk.MenuItem(_("Remove")) self.popup_menu.append(self.popup_menu_remove) self.popup_menu_remove.connect_object( "activate", self.popup_menu_event, 'remove') self.popup_menu_remove.show() self.sensor_name_dialog = self.my_xml.get_widget('sensornameDialog') self.sensor_name_entry = self.my_xml.get_widget('entrySensorName') self.sensor_name_entry_title = self.my_xml.get_widget('labelTitle') self.set_temp_convert_func(lambda T: T, 0) self.set_temperature(50) self.verify_level_order(False) self.set_size_request(100, self.wanted_height) def set_temp_convert_func(self, func, decimals): """sets the function that converts the temperature from celsius""" self.temp_convert_func = lambda T: func(T) self.decimals = decimals self.queue_draw() def set_show_triggers(self, show): """Sets if triggers should be shown""" self.draw_triggers = show self.queue_draw() def set_sensor_name(self, name): """Sets the sensor name to show""" if self.sensor_name != name: self.sensor_name = name self.queue_draw() def get_sensor_name(self): """returns the sensor name""" return self.sensor_name def set_temperature(self, temp): """Sets the temperature to show""" if self.temperature != temp: self.temperature = temp gobject.timeout_add( self.animate_interval, self.animate_temperature) def get_temperature(self): """Returns the currently shown temperature""" return self.temperature def set_hysteresis_temperature(self, temp, level): """Sets the hysteresis turn off temperature and level""" if self.hysteresis_temp != temp or self.hysteresis_level != level: self.hysteresis_temp = temp self.hysteresis_level = level self.queue_draw() def set_triggers(self, trig): """Sets the fan level triggers""" self.triggers = trig self.verify_level_order(False) self.queue_draw() def get_triggers(self): """Gets the fan level triggers""" return self.triggers def end_animation(self): """Ends temperature animation""" self.shown_temp = self.temperature def popup_menu_event(self, event): if event == 'split': next_temp, dummy = self.get_key_higher_than( self.triggers, self.current_popup_temperature) if next_temp == None: temp = self.current_popup_temperature + 3 else: temp = round( (self.current_popup_temperature + next_temp) / 2.0) if temp >= self.min_temp and temp <= self.max_temp: self.triggers[temp] = 0 self.verify_level_order(False) elif event == 'remove': if len(self.triggers.keys()) > 2: del self.triggers[self.current_popup_temperature] self.verify_level_order(False) else: level = int(event) from_right = level < self.triggers[self.current_popup_temperature] self.triggers[self.current_popup_temperature] = level self.verify_level_order(from_right) self.queue_draw() self.emit('trigger_changed') def verify_level_order(self, from_right): if 0 not in self.triggers.keys() or self.triggers[0] != 0: self.triggers[0] = 0 self.emit('trigger_changed') if len(self.triggers.keys()) < 2: self.triggers[self.min_temp + 5] = 255 self.emit('trigger_changed') temps = self.triggers.keys() if from_right: temps.sort(cmp=lambda x, y: -cmp(x, y)) min_level = 256 for temp in temps: if self.triggers[temp] <= min_level: min_level = self.triggers[temp] else: self.triggers[temp] = min_level self.emit('trigger_changed') else: temps.sort(cmp=lambda x, y: cmp(x, y)) max_level = 0 for temp in temps: if self.triggers[temp] >= max_level: max_level = self.triggers[temp] else: self.triggers[temp] = max_level self.emit('trigger_changed') def motion_notify_event(self, widget, event): if event.is_hint: x, y, _state = event.window.get_pointer() else: x = event.x y = event.y _state = event.state x -= self.translate_x y -= self.translate_y cursor = self.normal_cursor temp = self.pos_to_temp(x) if y >= self.Rkreis - self.Rrohr and y <= self.Rkreis + self.Rrohr and temp >= self.min_temp and temp <= self.max_temp: if self.dragging: self.drag_temp = min( self.drag_max_temp, max(self.drag_min_temp, temp)) cursor = self.move_cursor self.queue_draw() else: if y >= self.Rkreis - self.Rrohr and y <= self.Rkreis + self.Rrohr: self.mouse_over_temp = round(temp) temp, _level = self.get_key_lower_than( self.triggers, self.mouse_over_temp) if self.mouse_over_temp in self.triggers: cursor = self.move_cursor elif temp: cursor = self.hand_cursor else: cursor = self.normal_cursor else: self.mouse_over_temp = 0 if x >= self.sensor_name_x and x <= self.sensor_name_x + self.sensor_name_width and y <= self.sensor_name_y and y >= self.sensor_name_y - self.sensor_name_height: self.mouse_over_sensor_name = True cursor = self.hand_cursor else: self.mouse_over_sensor_name = False self.window.set_cursor(cursor) return True def button_press_event(self, widget, event): if event.button == 1 and self.mouse_over_temp and not self.dragging and self.mouse_over_temp in self.triggers: # start dragging self.dragging = True self.drag_temp = self.mouse_over_temp self.drag_min_temp, dummy = self.get_key_lower_than( self.triggers, self.drag_temp) if self.drag_min_temp == None: self.drag_min_temp = self.min_temp self.drag_max_temp, dummy = self.get_key_higher_than( self.triggers, self.drag_temp) if self.drag_max_temp == None: self.drag_max_temp = self.max_temp self.drag_level = self.triggers[self.drag_temp] del self.triggers[self.drag_temp] elif event.button == 1 and self.mouse_over_sensor_name: # change sensor name self.sensor_name_entry_title.set_text( _("Name for temperature sensor %s") % self.sensor_id) self.sensor_name_entry.set_text(self.sensor_name) self.sensor_name_entry.grab_focus() self.sensor_name_entry.select_region(0, 100) self.sensor_name_dialog.set_transient_for(self.dialog_parent) if self.sensor_name_dialog.run() == 1: # OK was pressed self.sensor_name = self.sensor_name_entry.get_text() self.queue_draw() self.emit('name_changed') self.sensor_name_dialog.hide() elif event.button == 1 and self.mouse_over_temp and not self.dragging: temp, level = self.get_key_lower_than( self.triggers, self.mouse_over_temp) if temp: # show popup menu self.current_popup_temperature = temp for trid in self.trigger_popup_menu_items.iterkeys(): self.trigger_popup_menu_items[ trid].set_sensitive(not trid == level) self.popup_menu_remove.set_sensitive(len(self.triggers) > 2) self.popup_menu.popup( None, None, None, event.button, event.get_time()) return True def button_release_event(self, widget, event): if event.button == 1 and self.dragging: self.dragging = False goal_temp = round(self.drag_temp) if goal_temp in self.triggers: if goal_temp == self.drag_min_temp: self.triggers[goal_temp] = self.drag_level else: self.triggers[goal_temp] = self.drag_level self.mouse_over_temp = round(self.drag_temp) self.emit('trigger_changed') return True def animate_temperature(self): if abs(self.shown_temp - self.temperature) < self.animate_step: self.shown_temp = self.temperature self.queue_draw() return False else: if self.shown_temp < self.temperature: self.shown_temp += self.animate_step else: self.shown_temp -= self.animate_step self.queue_draw() return True def expose(self, widget, event): self.context = widget.window.cairo_create() self.context.rectangle(event.area.x, event.area.y, event.area.width, event.area.height) self.context.clip() self.draw(self.context) return False def draw(self, context): space = self.get_allocation() self.Lrohr = space.width - 2 * self.Rkreis - self.Rrohr - 20 Rrohr = self.Rrohr scale_length = 6 scale_space = 2 phi = math.asin(Rrohr / self.Rkreis) phi_empty = math.asin((Rrohr * 2.) / self.Rkreis) context.translate(self.translate_x, self.translate_y) # fill if self.shown_temp >= self.min_temp: context.arc( self.Rkreis, self.Rkreis, self.Rkreis, phi, 2 * math.pi - phi) context.line_to( self.temp_to_pos(self.shown_temp), self.Rkreis - Rrohr) context.line_to( self.temp_to_pos(self.shown_temp), self.Rkreis + Rrohr) else: context.arc( self.Rkreis, self.Rkreis, self.Rkreis, phi_empty, 2 * math.pi - phi_empty) context.set_source_rgb(1, 0, 0) context.fill() # draw temperatures context.new_path() text = ("%." + str(self.decimals) + "f") % self.temp_convert_func(self.temperature) _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) context.move_to( self.Rkreis - text_width / 2.0, self.Rkreis + text_height / 2.0) context.set_source_rgb(0, 0, 0) context.show_text(text) context.stroke() # draw sensor name context.new_path() text = self.sensor_name _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) self.sensor_name_x = 2 * self.Rkreis if len(text.strip()) > 0: self.sensor_name_y = self.Rkreis + \ Rrohr + scale_space + text_height self.sensor_name_width = text_width self.sensor_name_height = text_height else: self.sensor_name_y = self.Rkreis + Rrohr + scale_space + 10 self.sensor_name_width = 30 self.sensor_name_height = 10 context.move_to(self.sensor_name_x, self.sensor_name_y) context.set_source_rgb(0, 0, 1) context.show_text(text) context.stroke() context.set_source_rgb(0, 0, 0) # draw unit if self.draw_temperature_unit: context.new_path() text = self.unit _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) context.move_to(2 * self.Rkreis + self.Lrohr - text_width / 2, self.Rkreis - Rrohr - scale_space) context.show_text(text) context.stroke() # draw scale for i in range(1, int(math.ceil((self.max_temp + 1 - self.min_temp) / self.scale_interval))): n = i * self.scale_interval + self.min_temp context.new_path() context.move_to(self.temp_to_pos(n), self.Rkreis + Rrohr) context.rel_line_to(0, +scale_length) context.set_source_rgb(0, 0, 0) context.stroke() text = ( "%." + str(self.decimals) + "f") % self.temp_convert_func(n) _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) context.move_to(self.temp_to_pos( n) - text_width / 2, self.Rkreis + Rrohr + scale_length + text_height + scale_space) context.show_text(text) context.stroke() if self.draw_triggers: if self.hysteresis_temp != None: # find matching temperature for fan level temps = [ x for x in self.triggers.keys() if self.triggers[x] == self.hysteresis_level] if len(temps) > 0: temp = temps[0] # draw hysteresis trigger context.new_path() context.move_to( self.temp_to_pos(self.hysteresis_temp - 0.5), self.Rkreis - Rrohr) context.line_to( self.temp_to_pos(temp), self.Rkreis - Rrohr) context.line_to( self.temp_to_pos(temp), self.Rkreis + Rrohr) context.line_to( self.temp_to_pos(self.hysteresis_temp - 0.5), self.Rkreis + Rrohr) context.set_source_rgba(0.9, 0.9, 0, 0.5) context.fill() # draw trigger markers draw_triggers = self.triggers.copy() if self.dragging and (round(self.drag_temp) not in draw_triggers or round(self.drag_temp) == self.drag_min_temp): draw_triggers[self.drag_temp] = self.drag_level temp, level = self.get_key_higher_than(draw_triggers, -1) first = True while temp != None: if temp >= self.min_temp: # draw marker context.new_path() context.move_to( self.temp_to_pos(temp), self.Rkreis - Rrohr) context.rel_line_to(0, 2 * Rrohr) context.set_source_rgb(0, 0, 1) context.stroke() # draw marker temperature text = ( "%." + str(self.decimals) + "f") % self.temp_convert_func(round(temp)) _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) context.move_to( self.temp_to_pos(temp) - text_width / 2, self.Rkreis - Rrohr - scale_space) context.set_source_rgb(0, 0, 0) context.show_text(text) context.stroke() # draw level text new_temp, new_level = self.get_key_higher_than( draw_triggers, temp) if new_temp != None: level_pos = (new_temp + max(temp, self.min_temp)) / 2. avail_width = self.temp_to_pos( new_temp) - self.temp_to_pos(max(temp, self.min_temp)) - 3 else: level_pos = (self.max_temp + max(temp, self.min_temp)) / 2. avail_width = self.temp_to_pos( self.max_temp) - self.temp_to_pos(max(temp, self.min_temp)) - 3 text = self.trigger_names[level] font_size = 9 + 1 text_width = 99999 old_font_matrix = context.get_font_matrix() while font_size > 1 and text_width > avail_width: font_size = font_size - 1 context.set_font_size(font_size) _x_bearing, _y_bearing, text_width, text_height, _x_advance, _y_advance = context.text_extents( text) context.move_to( self.temp_to_pos(level_pos) - text_width / 2, self.Rkreis + text_height / 2) if first: context.set_source_rgb(0, 0, 0) first = False else: context.set_source_rgb(0, 0, 1) context.show_text(text) context.stroke() context.set_font_matrix(old_font_matrix) temp, level = new_temp, new_level # border context.arc( self.Rkreis, self.Rkreis, self.Rkreis, phi, 2 * math.pi - phi) context.line_to(2 * self.Rkreis + self.Lrohr, self.Rkreis - Rrohr) context.arc(2 * self.Rkreis + self.Lrohr, self.Rkreis, Rrohr, 3. / 2. * math.pi, 1. / 2. * math.pi) context.close_path() context.set_source_rgb(0, 0, 0) context.stroke() def get_key_lower_than(self, kdict, higher_bound): highest_key = None highest_value = None for key, value in kdict.iteritems(): if key < higher_bound and (highest_key == None or key > highest_key): highest_key, highest_value = key, value return highest_key, highest_value def get_key_higher_than(self, kdict, lower_bound): lowest_key = None lowest_value = None for key, value in kdict.iteritems(): if key > lower_bound and (lowest_key == None or key < lowest_key): lowest_key, lowest_value = key, value return lowest_key, lowest_value def temp_to_pos(self, temp): if temp < self.min_temp: return self.temp_to_pos(self.min_temp) elif temp > self.max_temp: return self.temp_to_pos(self.max_temp) else: return 2 * self.Rkreis + (temp - self.min_temp) * self.Lrohr / (self.max_temp - self.min_temp) def pos_to_temp(self, pos): return (pos - 2 * self.Rkreis) / (self.Lrohr / (self.max_temp - self.min_temp)) + self.min_temp # Register signals gobject.signal_new( # @UndefinedVariable 'trigger_changed', Thermometer, gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()) # @UndefinedVariable gobject.signal_new( # @UndefinedVariable 'name_changed', Thermometer, gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()) # @UndefinedVariable # Test case def main(): window = gtk.Window() widget = Thermometer() window.add(widget) window.resize(600, 200) window.connect("destroy", gtk.main_quit) window.show_all() gtk.main() if __name__ == "__main__": main()
gpl-3.0
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/python/Lib/numpy/polynomial/tests/test_chebyshev.py
24
18772
"""Tests for chebyshev module. """ from __future__ import division import numpy as np import numpy.polynomial.chebyshev as ch from numpy.testing import * def trim(x) : return ch.chebtrim(x, tol=1e-6) T0 = [ 1] T1 = [ 0, 1] T2 = [-1, 0, 2] T3 = [ 0, -3, 0, 4] T4 = [ 1, 0, -8, 0, 8] T5 = [ 0, 5, 0, -20, 0, 16] T6 = [-1, 0, 18, 0, -48, 0, 32] T7 = [ 0, -7, 0, 56, 0, -112, 0, 64] T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128] T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256] Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] class TestPrivate(TestCase) : def test__cseries_to_zseries(self) : for i in range(5) : inp = np.array([2] + [1]*i, np.double) tgt = np.array([.5]*i + [2] + [.5]*i, np.double) res = ch._cseries_to_zseries(inp) assert_equal(res, tgt) def test__zseries_to_cseries(self) : for i in range(5) : inp = np.array([.5]*i + [2] + [.5]*i, np.double) tgt = np.array([2] + [1]*i, np.double) res = ch._zseries_to_cseries(inp) assert_equal(res, tgt) class TestConstants(TestCase) : def test_chebdomain(self) : assert_equal(ch.chebdomain, [-1, 1]) def test_chebzero(self) : assert_equal(ch.chebzero, [0]) def test_chebone(self) : assert_equal(ch.chebone, [1]) def test_chebx(self) : assert_equal(ch.chebx, [0, 1]) class TestArithmetic(TestCase) : def test_chebadd(self) : for i in range(5) : for j in range(5) : msg = "At i=%d, j=%d" % (i,j) tgt = np.zeros(max(i,j) + 1) tgt[i] += 1 tgt[j] += 1 res = ch.chebadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebsub(self) : for i in range(5) : for j in range(5) : msg = "At i=%d, j=%d" % (i,j) tgt = np.zeros(max(i,j) + 1) tgt[i] += 1 tgt[j] -= 1 res = ch.chebsub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebmulx(self): assert_equal(ch.chebmulx([0]), [0]) assert_equal(ch.chebmulx([1]), [0,1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [.5, 0, .5] assert_equal(ch.chebmulx(ser), tgt) def test_chebmul(self) : for i in range(5) : for j in range(5) : msg = "At i=%d, j=%d" % (i,j) tgt = np.zeros(i + j + 1) tgt[i + j] += .5 tgt[abs(i - j)] += .5 res = ch.chebmul([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebdiv(self) : for i in range(5) : for j in range(5) : msg = "At i=%d, j=%d" % (i,j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = ch.chebadd(ci, cj) quo, rem = ch.chebdiv(tgt, ci) res = ch.chebadd(ch.chebmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebval(self) : def f(x) : return x*(x**2 - 1) #check empty input assert_equal(ch.chebval([], [1]).size, 0) #check normal input) for i in range(5) : tgt = 1 res = ch.chebval(1, [0]*i + [1]) assert_almost_equal(res, tgt) tgt = (-1)**i res = ch.chebval(-1, [0]*i + [1]) assert_almost_equal(res, tgt) zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = 0 res = ch.chebval(zeros, [0]*i + [1]) assert_almost_equal(res, tgt) x = np.linspace(-1,1) tgt = f(x) res = ch.chebval(x, [0, -.25, 0, .25]) assert_almost_equal(res, tgt) #check that shape is preserved for i in range(3) : dims = [2]*i x = np.zeros(dims) assert_equal(ch.chebval(x, [1]).shape, dims) assert_equal(ch.chebval(x, [1,0]).shape, dims) assert_equal(ch.chebval(x, [1,0,0]).shape, dims) class TestCalculus(TestCase) : def test_chebint(self) : # check exceptions assert_raises(ValueError, ch.chebint, [0], .5) assert_raises(ValueError, ch.chebint, [0], -1) assert_raises(ValueError, ch.chebint, [0], 1, [0,0]) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = ch.chebint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5) : scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] chebpol = ch.poly2cheb(pol) chebint = ch.chebint(chebpol, m=1, k=[i]) res = ch.cheb2poly(chebint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5) : scl = i + 1 pol = [0]*i + [1] chebpol = ch.poly2cheb(pol) chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1) assert_almost_equal(ch.chebval(-1, chebint), i) # check single integration with integration constant and scaling for i in range(5) : scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] chebpol = ch.poly2cheb(pol) chebint = ch.chebint(chebpol, m=1, k=[i], scl=2) res = ch.cheb2poly(chebint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5) : for j in range(2,5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : tgt = ch.chebint(tgt, m=1) res = ch.chebint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5) : for j in range(2,5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : tgt = ch.chebint(tgt, m=1, k=[k]) res = ch.chebint(pol, m=j, k=range(j)) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5) : for j in range(2,5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1) res = ch.chebint(pol, m=j, k=range(j), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5) : for j in range(2,5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : tgt = ch.chebint(tgt, m=1, k=[k], scl=2) res = ch.chebint(pol, m=j, k=range(j), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_chebder(self) : # check exceptions assert_raises(ValueError, ch.chebder, [0], .5) assert_raises(ValueError, ch.chebder, [0], -1) # check that zeroth deriviative does nothing for i in range(5) : tgt = [1] + [0]*i res = ch.chebder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5) : for j in range(2,5) : tgt = [1] + [0]*i res = ch.chebder(ch.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : for j in range(2,5) : tgt = [1] + [0]*i res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) class TestMisc(TestCase) : def test_chebfromroots(self) : res = ch.chebfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1,5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = [0]*i + [1] res = ch.chebfromroots(roots)*2**(i-1) assert_almost_equal(trim(res),trim(tgt)) def test_chebroots(self) : assert_almost_equal(ch.chebroots([1]), []) assert_almost_equal(ch.chebroots([1, 2]), [-.5]) for i in range(2,5) : tgt = np.linspace(-1, 1, i) res = ch.chebroots(ch.chebfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_chebvander(self) : # check for 1d x x = np.arange(3) v = ch.chebvander(x, 3) assert_(v.shape == (3,4)) for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[...,i], ch.chebval(x, coef)) # check for 2d x x = np.array([[1,2],[3,4],[5,6]]) v = ch.chebvander(x, 3) assert_(v.shape == (3,2,4)) for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[...,i], ch.chebval(x, coef)) def test_chebfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, ch.chebfit, [1], [1], -1) assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) assert_raises(TypeError, ch.chebfit, [], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = ch.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(ch.chebval(x, coef3), y) # coef4 = ch.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(ch.chebval(x, coef4), y) # coef2d = ch.chebfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = ch.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) def test_chebtrim(self) : coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, ch.chebtrim, coef, -1) # Test results assert_equal(ch.chebtrim(coef), coef[:-1]) assert_equal(ch.chebtrim(coef, 1), coef[:-3]) assert_equal(ch.chebtrim(coef, 2), [0]) def test_chebline(self) : assert_equal(ch.chebline(3,4), [3, 4]) def test_cheb2poly(self) : for i in range(10) : assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i]) def test_poly2cheb(self) : for i in range(10) : assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1]) def test_chebpts1(self): #test exceptions assert_raises(ValueError, ch.chebpts1, 1.5) assert_raises(ValueError, ch.chebpts1, 0) #test points tgt = [0] assert_almost_equal(ch.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] assert_almost_equal(ch.chebpts1(2), tgt) tgt = [-0.86602540378443871, 0, 0.86602540378443871] assert_almost_equal(ch.chebpts1(3), tgt) tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] assert_almost_equal(ch.chebpts1(4), tgt) def test_chebpts2(self): #test exceptions assert_raises(ValueError, ch.chebpts2, 1.5) assert_raises(ValueError, ch.chebpts2, 1) #test points tgt = [-1, 1] assert_almost_equal(ch.chebpts2(2), tgt) tgt = [-1, 0, 1] assert_almost_equal(ch.chebpts2(3), tgt) tgt = [-1, -0.5, .5, 1] assert_almost_equal(ch.chebpts2(4), tgt) tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] assert_almost_equal(ch.chebpts2(5), tgt) class TestChebyshevClass(TestCase) : p1 = ch.Chebyshev([1,2,3]) p2 = ch.Chebyshev([1,2,3], [0,1]) p3 = ch.Chebyshev([1,2]) p4 = ch.Chebyshev([2,2,3]) p5 = ch.Chebyshev([3,2,3]) def test_equal(self) : assert_(self.p1 == self.p1) assert_(self.p2 == self.p2) assert_(not self.p1 == self.p2) assert_(not self.p1 == self.p3) assert_(not self.p1 == [1,2,3]) def test_not_equal(self) : assert_(not self.p1 != self.p1) assert_(not self.p2 != self.p2) assert_(self.p1 != self.p2) assert_(self.p1 != self.p3) assert_(self.p1 != [1,2,3]) def test_add(self) : tgt = ch.Chebyshev([2,4,6]) assert_(self.p1 + self.p1 == tgt) assert_(self.p1 + [1,2,3] == tgt) assert_([1,2,3] + self.p1 == tgt) def test_sub(self) : tgt = ch.Chebyshev([1]) assert_(self.p4 - self.p1 == tgt) assert_(self.p4 - [1,2,3] == tgt) assert_([2,2,3] - self.p1 == tgt) def test_mul(self) : tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5]) assert_(self.p1 * self.p1 == tgt) assert_(self.p1 * [1,2,3] == tgt) assert_([1,2,3] * self.p1 == tgt) def test_floordiv(self) : tgt = ch.Chebyshev([1]) assert_(self.p4 // self.p1 == tgt) assert_(self.p4 // [1,2,3] == tgt) assert_([2,2,3] // self.p1 == tgt) def test_mod(self) : tgt = ch.Chebyshev([1]) assert_((self.p4 % self.p1) == tgt) assert_((self.p4 % [1,2,3]) == tgt) assert_(([2,2,3] % self.p1) == tgt) def test_divmod(self) : tquo = ch.Chebyshev([1]) trem = ch.Chebyshev([2]) quo, rem = divmod(self.p5, self.p1) assert_(quo == tquo and rem == trem) quo, rem = divmod(self.p5, [1,2,3]) assert_(quo == tquo and rem == trem) quo, rem = divmod([3,2,3], self.p1) assert_(quo == tquo and rem == trem) def test_pow(self) : tgt = ch.Chebyshev([1]) for i in range(5) : res = self.p1**i assert_(res == tgt) tgt *= self.p1 def test_call(self) : # domain = [-1, 1] x = np.linspace(-1, 1) tgt = 3*(2*x**2 - 1) + 2*x + 1 assert_almost_equal(self.p1(x), tgt) # domain = [0, 1] x = np.linspace(0, 1) xx = 2*x - 1 assert_almost_equal(self.p2(x), self.p1(xx)) def test_degree(self) : assert_equal(self.p1.degree(), 2) def test_cutdeg(self) : assert_raises(ValueError, self.p1.cutdeg, .5) assert_raises(ValueError, self.p1.cutdeg, -1) assert_equal(len(self.p1.cutdeg(3)), 3) assert_equal(len(self.p1.cutdeg(2)), 3) assert_equal(len(self.p1.cutdeg(1)), 2) assert_equal(len(self.p1.cutdeg(0)), 1) def test_convert(self) : x = np.linspace(-1,1) p = self.p1.convert(domain=[0,1]) assert_almost_equal(p(x), self.p1(x)) def test_mapparms(self) : parms = self.p2.mapparms() assert_almost_equal(parms, [-1, 2]) def test_trim(self) : coef = [1, 1e-6, 1e-12, 0] p = ch.Chebyshev(coef) assert_equal(p.trim().coef, coef[:3]) assert_equal(p.trim(1e-10).coef, coef[:2]) assert_equal(p.trim(1e-5).coef, coef[:1]) def test_truncate(self) : assert_raises(ValueError, self.p1.truncate, .5) assert_raises(ValueError, self.p1.truncate, 0) assert_equal(len(self.p1.truncate(4)), 3) assert_equal(len(self.p1.truncate(3)), 3) assert_equal(len(self.p1.truncate(2)), 2) assert_equal(len(self.p1.truncate(1)), 1) def test_copy(self) : p = self.p1.copy() assert_(self.p1 == p) def test_integ(self) : p = self.p2.integ() assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5)) p = self.p2.integ(lbnd=0) assert_almost_equal(p(0), 0) p = self.p2.integ(1, 1) assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5)) p = self.p2.integ(2, [1, 2]) assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5)) def test_deriv(self) : p = self.p2.integ(2, [1, 2]) assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef) assert_almost_equal(p.deriv(2).coef, self.p2.coef) def test_roots(self) : p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1]) res = p.roots() tgt = [0, .5, 1] assert_almost_equal(res, tgt) def test_linspace(self): xdes = np.linspace(0, 1, 20) ydes = self.p2(xdes) xres, yres = self.p2.linspace(20) assert_almost_equal(xres, xdes) assert_almost_equal(yres, ydes) def test_fromroots(self) : roots = [0, .5, 1] p = ch.Chebyshev.fromroots(roots, domain=[0, 1]) res = p.coef tgt = ch.poly2cheb([0, -1, 0, 1]) assert_almost_equal(res, tgt) def test_fit(self) : def f(x) : return x*(x - 1)*(x - 2) x = np.linspace(0,3) y = f(x) # test default value of domain p = ch.Chebyshev.fit(x, y, 3) assert_almost_equal(p.domain, [0,3]) # test that fit works in given domains p = ch.Chebyshev.fit(x, y, 3, None) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, [0,3]) p = ch.Chebyshev.fit(x, y, 3, []) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, [-1, 1]) # test that fit accepts weights. w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 yw[0::2] = 0 p = ch.Chebyshev.fit(x, yw, 3, w=w) assert_almost_equal(p(x), y) def test_identity(self) : x = np.linspace(0,3) p = ch.Chebyshev.identity() assert_almost_equal(p(x), x) p = ch.Chebyshev.identity([1,3]) assert_almost_equal(p(x), x) # if __name__ == "__main__": run_module_suite()
agpl-3.0
Matt-Deacalion/django
django/db/backends/oracle/schema.py
404
5313
import binascii import copy import datetime import re from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.utils import DatabaseError from django.utils import six from django.utils.text import force_text class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s" sql_alter_column_type = "MODIFY %(column)s %(type)s" sql_alter_column_null = "MODIFY %(column)s NULL" sql_alter_column_not_null = "MODIFY %(column)s NOT NULL" sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s" sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS" def quote_value(self, value): if isinstance(value, (datetime.date, datetime.time, datetime.datetime)): return "'%s'" % value elif isinstance(value, six.string_types): return "'%s'" % six.text_type(value).replace("\'", "\'\'") elif isinstance(value, six.buffer_types): return "'%s'" % force_text(binascii.hexlify(value)) elif isinstance(value, bool): return "1" if value else "0" else: return str(value) def delete_model(self, model): # Run superclass action super(DatabaseSchemaEditor, self).delete_model(model) # Clean up any autoincrement trigger self.execute(""" DECLARE i INTEGER; BEGIN SELECT COUNT(*) INTO i FROM USER_CATALOG WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE'; IF i = 1 THEN EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; END IF; END; /""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)}) def alter_field(self, model, old_field, new_field, strict=False): try: super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict) except DatabaseError as e: description = str(e) # If we're changing type to an unsupported type we need a # SQLite-ish workaround if 'ORA-22858' in description or 'ORA-22859' in description: self._alter_field_type_workaround(model, old_field, new_field) else: raise def _alter_field_type_workaround(self, model, old_field, new_field): """ Oracle refuses to change from some type to other type. What we need to do instead is: - Add a nullable version of the desired field with a temporary name - Update the table to transfer values from old to new - Drop old column - Rename the new column and possibly drop the nullable property """ # Make a new field that's like the new one but with a temporary # column name. new_temp_field = copy.deepcopy(new_field) new_temp_field.null = True new_temp_field.column = self._generate_temp_name(new_field.column) # Add it self.add_field(model, new_temp_field) # Explicit data type conversion # https://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements002.htm#sthref340 new_value = self.quote_name(old_field.column) old_type = old_field.db_type(self.connection) if re.match('^N?CLOB', old_type): new_value = "TO_CHAR(%s)" % new_value old_type = 'VARCHAR2' if re.match('^N?VARCHAR2', old_type): new_internal_type = new_field.get_internal_type() if new_internal_type == 'DateField': new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value elif new_internal_type == 'DateTimeField': new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value elif new_internal_type == 'TimeField': # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value # Transfer values across self.execute("UPDATE %s set %s=%s" % ( self.quote_name(model._meta.db_table), self.quote_name(new_temp_field.column), new_value, )) # Drop the old field self.remove_field(model, old_field) # Rename and possibly make the new field NOT NULL super(DatabaseSchemaEditor, self).alter_field(model, new_temp_field, new_field) def normalize_name(self, name): """ Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes. """ nn = self.quote_name(name) if nn[0] == '"' and nn[-1] == '"': nn = nn[1:-1] return nn def _generate_temp_name(self, for_name): """ Generates temporary names for workarounds that need temp columns """ suffix = hex(hash(for_name)).upper()[1:] return self.normalize_name(for_name + "_" + suffix) def prepare_default(self, value): return self.quote_value(value)
bsd-3-clause
bzero/bitex
libs/coinkit/coinkit/wallet.py
10
1797
# -*- coding: utf-8 -*- """ Coinkit ~~~~~ :copyright: (c) 2014 by Halfmoon Labs :license: MIT, see LICENSE for more details. """ from inspect import isclass from .keypair import * from .passphrase import random_256bit_passphrase, random_160bit_passphrase def is_cryptocurrency_keypair_class(cls): if not isclass(cls): return False if cls.__name__ == 'BitcoinKeypair': return True if len(cls.__bases__) > 0 and cls.__bases__[0].__name__ == 'BitcoinKeypair': return True _messages = { "SHORT_PASSPHRASE": "Warning! Passphrase must be at least %s characters.", "INVALID_KEYPAIR_CLASS": "Class must be a valid currency keypair class.", } class SDWallet(): """ A sequential deterministic wallet. """ def __init__(self, passphrase=None): """ Create wallet from a passphrase input. """ if not passphrase: passphrase = random_160bit_passphrase() self._passphrase = passphrase def passphrase(self): return self._passphrase def keypair(self, i, keypair_class): """ Return the keypair that corresponds to the provided sequence number and keypair class (BitcoinKeypair, etc.). """ # Make sure keypair_class is a valid cryptocurrency keypair if not is_cryptocurrency_keypair_class(keypair_class): raise Exception(_messages["INVALID_KEYPAIR_CLASS"]) currency_name = keypair_class.__name__.lower().replace('keypair', '') k = keypair_class.from_passphrase( self._passphrase + " " + currency_name + str(i)) return k class HDWallet(): """ A hierarchical deterministic wallet in accordance with BIP 32. """ def __init__(self): raise NotImplementedError()
gpl-3.0
Snifer/BurpSuite-Plugins
Sqlmap/tamper/versionedkeywords.py
7
1619
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re from lib.core.common import singleTimeWarnMessage from lib.core.data import kb from lib.core.enums import DBMS from lib.core.enums import PRIORITY __priority__ = PRIORITY.HIGHER def dependencies(): singleTimeWarnMessage("tamper script '%s' is only meant to be run against %s" % (os.path.basename(__file__).split(".")[0], DBMS.MYSQL)) def tamper(payload, **kwargs): """ Encloses each non-function keyword with versioned MySQL comment Requirement: * MySQL Tested against: * MySQL 4.0.18, 5.1.56, 5.5.11 Notes: * Useful to bypass several web application firewalls when the back-end database management system is MySQL >>> tamper('1 UNION ALL SELECT NULL, NULL, CONCAT(CHAR(58,104,116,116,58),IFNULL(CAST(CURRENT_USER() AS CHAR),CHAR(32)),CHAR(58,100,114,117,58))#') '1/*!UNION*//*!ALL*//*!SELECT*//*!NULL*/,/*!NULL*/, CONCAT(CHAR(58,104,116,116,58),IFNULL(CAST(CURRENT_USER()/*!AS*//*!CHAR*/),CHAR(32)),CHAR(58,100,114,117,58))#' """ def process(match): word = match.group('word') if word.upper() in kb.keywords: return match.group().replace(word, "/*!%s*/" % word) else: return match.group() retVal = payload if payload: retVal = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=[^\w(]|\Z)", lambda match: process(match), retVal) retVal = retVal.replace(" /*!", "/*!").replace("*/ ", "*/") return retVal
gpl-2.0
alrusdi/lettuce
tests/integration/lib/Django-1.2.5/django/template/debug.py
66
3732
from django.template import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError from django.utils.encoding import force_unicode from django.utils.html import escape from django.utils.safestring import SafeData, EscapeData from django.utils.formats import localize class DebugLexer(Lexer): def __init__(self, template_string, origin): super(DebugLexer, self).__init__(template_string, origin) def tokenize(self): "Return a list of tokens from a given template_string" result, upto = [], 0 for match in tag_re.finditer(self.template_string): start, end = match.span() if start > upto: result.append(self.create_token(self.template_string[upto:start], (upto, start), False)) upto = start result.append(self.create_token(self.template_string[start:end], (start, end), True)) upto = end last_bit = self.template_string[upto:] if last_bit: result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False)) return result def create_token(self, token_string, source, in_tag): token = super(DebugLexer, self).create_token(token_string, in_tag) token.source = self.origin, source return token class DebugParser(Parser): def __init__(self, lexer): super(DebugParser, self).__init__(lexer) self.command_stack = [] def enter_command(self, command, token): self.command_stack.append( (command, token.source) ) def exit_command(self): self.command_stack.pop() def error(self, token, msg): return self.source_error(token.source, msg) def source_error(self, source,msg): e = TemplateSyntaxError(msg) e.source = source return e def create_nodelist(self): return DebugNodeList() def create_variable_node(self, contents): return DebugVariableNode(contents) def extend_nodelist(self, nodelist, node, token): node.source = token.source super(DebugParser, self).extend_nodelist(nodelist, node, token) def unclosed_block_tag(self, parse_until): command, source = self.command_stack.pop() msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until)) raise self.source_error(source, msg) def compile_function_error(self, token, e): if not hasattr(e, 'source'): e.source = token.source class DebugNodeList(NodeList): def render_node(self, node, context): try: result = node.render(context) except TemplateSyntaxError, e: if not hasattr(e, 'source'): e.source = node.source raise except Exception, e: from sys import exc_info wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' % (e.__class__.__name__, force_unicode(e, errors='replace'))) wrapped.source = node.source wrapped.exc_info = exc_info() raise wrapped, None, wrapped.exc_info[2] return result class DebugVariableNode(VariableNode): def render(self, context): try: output = self.filter_expression.resolve(context) output = localize(output) output = force_unicode(output) except TemplateSyntaxError, e: if not hasattr(e, 'source'): e.source = self.source raise except UnicodeDecodeError: return '' if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData): return escape(output) else: return output
gpl-3.0
abstract-open-solutions/server-tools
auth_from_http_remote_user/controllers/main.py
41
4132
# -*- coding: utf-8 -*- ############################################################################## # # Author: Laurent Mignon # Copyright 2014 'ACSONE SA/NV' # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID import openerp from openerp import http from openerp.http import request from openerp.addons.web.controllers import main from openerp.addons.auth_from_http_remote_user.model import \ AuthFromHttpRemoteUserInstalled from .. import utils import random import logging import werkzeug _logger = logging.getLogger(__name__) class Home(main.Home): _REMOTE_USER_ATTRIBUTE = 'HTTP_REMOTE_USER' @http.route('/web', type='http', auth="none") def web_client(self, s_action=None, **kw): main.ensure_db() try: self._bind_http_remote_user(http.request.session.db) except http.AuthenticationError: return werkzeug.exceptions.Unauthorized().get_response() return super(Home, self).web_client(s_action, **kw) def _search_user(self, res_users, login, cr): user_ids = res_users.search(cr, SUPERUSER_ID, [('login', '=', login), ('active', '=', True)]) assert len(user_ids) < 2 if user_ids: return user_ids[0] return None def _bind_http_remote_user(self, db_name): try: registry = openerp.registry(db_name) with registry.cursor() as cr: if AuthFromHttpRemoteUserInstalled._name not in registry: # module not installed in database, # continue usual behavior return headers = http.request.httprequest.headers.environ login = headers.get(self._REMOTE_USER_ATTRIBUTE, None) if not login: # no HTTP_REMOTE_USER header, # continue usual behavior return request_login = request.session.login if request_login: if request_login == login: # already authenticated return else: request.session.logout(keep_db=True) res_users = registry.get('res.users') user_id = self._search_user(res_users, login, cr) if not user_id: # HTTP_REMOTE_USER login not found in database request.session.logout(keep_db=True) raise http.AuthenticationError() # generate a specific key for authentication key = randomString(utils.KEY_LENGTH, '0123456789abcdef') res_users.write(cr, SUPERUSER_ID, [user_id], {'sso_key': key}) request.session.authenticate(db_name, login=login, password=key, uid=user_id) except http.AuthenticationError, e: raise e except Exception, e: _logger.error("Error binding Http Remote User session", exc_info=True) raise e randrange = random.SystemRandom().randrange def randomString(length, chrs): """Produce a string of length random bytes, chosen from chrs.""" n = len(chrs) return ''.join([chrs[randrange(n)] for _ in xrange(length)])
agpl-3.0
coderbone/SickRage-alt
lib/github/Installation.py
7
3318
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2017 Jannis Gebauer <ja.geb@me.com> # # # # This file is part of PyGithub. # # http://pygithub.github.io/PyGithub/v1/index.html # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import github.GithubObject import github.PaginatedList import github.Gist import github.Repository import github.NamedUser import github.Plan import github.Organization import github.UserKey import github.Issue import github.Event import github.Authorization import github.Notification INTEGRATION_PREVIEW_HEADERS = {"Accept": "application/vnd.github.machine-man-preview+json"} class Installation(github.GithubObject.NonCompletableGithubObject): """ This class represents Installations as in https://developer.github.com/v3/integrations/installations """ def __repr__(self): return self.get__repr__({"id": self._id.value}) @property def id(self): return self._id def get_repos(self): """ :calls: `GET /installation/repositories <https://developer.github.com/v3/integrations/installations/#list-repositories>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository` """ url_parameters = dict() return github.PaginatedList.PaginatedList( contentClass=github.Repository.Repository, requester=self._requester, firstUrl="/installation/repositories", firstParams=url_parameters, headers=INTEGRATION_PREVIEW_HEADERS, list_item='repositories' ) def _initAttributes(self): self._id = github.GithubObject.NotSet def _useAttributes(self, attributes): if "id" in attributes: # pragma no branch self._id = self._makeIntAttribute(attributes["id"])
gpl-3.0
jeromecc/doctoctocbot
src/doctocnet/urls.py
1
2933
"""doctocnet URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.contrib.auth import views as auth_views from django.urls import include, path, re_path from django.views.generic import TemplateView from django.utils.translation import gettext_lazy as _ from django.conf.urls.i18n import i18n_patterns # wagtail start #from wagtail.admin import urls as wagtailadmin_urls #from wagtail.documents import urls as wagtaildocs_urls #from wagtail.core import urls as wagtail_urls # wagtail stop from ajax_select import urls as ajax_select_urls from doctocnet.views import media_access app_name = 'doctocnet' urlpatterns = i18n_patterns( path('admin/', admin.site.urls), ) urlpatterns += [ re_path(r'^media/(?P<path>.*)', media_access, name='media'), path('silver/', include('silver.urls')), path('optin/', include('optin.urls')), path('moderation/', include('moderation.urls')), path('', include('ip.urls')), path('', include('landing.urls')), path('', include('users.urls')), path('', include('registration.urls')), path('', include('customer.urls')), path('', include('gpgcontact.urls')), path('', include('discourse.urls')), path('', include('invite.urls')), path('accounts/', include('django.contrib.auth.urls') ), path('financement/', include('crowdfunding.urls')), path('display/', include('display.urls', namespace='display')), path('oauth/', include('social_django.urls', namespace='social')), path('community/', include('community.urls')), path('conversation/', include('conversation.urls')), path('tagging/', include('tagging.urls')), re_path(r'^ajax_select/', include(ajax_select_urls)), path('crowd/', TemplateView.as_view(template_name='base.html')), path('charts/', include('charts.urls', namespace='charts')), path('invitations/', include('invitations.urls', namespace='invitations')), path('authorize/', include('authorize.urls')), path('cas/', include('mama_cas.urls')), ] # Use static() to add url mappings to serve static files during development (only) from django.conf import settings from django.conf.urls.static import static urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
mpl-2.0
madjelan/scikit-learn
sklearn/preprocessing/data.py
113
56747
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils.extmath import row_norms from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, min_max_axis, inplace_row_scale) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) std_ = _handle_zeros_in_scale(std_) else: std_ = None return mean_, std_ def _handle_zeros_in_scale(scale): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0: scale = 1. elif isinstance(scale, np.ndarray): scale[scale == 0.0] = 1.0 scale[~np.isfinite(scale)] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like or CSR matrix. The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) mean_, std_ = _mean_and_std( X, axis, with_mean=with_mean, with_std=with_std) if copy: X = X.copy() # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: Xr /= std_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # std_ is very small so that mean_2 = mean_1/std_ > 0, even if # mean_1 was close to zero. The problem is thus essentially due # to the lack of precision of mean_. A solution is then to # substract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min data_range = _handle_zeros_in_scale(data_range) self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. std_ : array of floats with shape [n_features] The standard deviation for each feature in the training set. Set to one if the standard deviation is zero for a given feature. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : array-like or CSR matrix with shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. """ X = check_array(X, accept_sparse='csr', copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.mean_ = None if self.with_std: var = mean_variance_axis(X, axis=0)[1] self.std_ = np.sqrt(var) self.std_ = _handle_zeros_in_scale(self.std_) else: self.std_ = None return self else: self.mean_, self.std_ = _mean_and_std( X, axis=0, with_mean=self.with_mean, with_std=self.with_std) return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.std_ is not None: inplace_column_scale(X, 1 / self.std_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.std_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.std_ is not None: inplace_column_scale(X, self.std_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.std_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, copy=True): self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) scales = np.maximum(np.abs(mins), np.abs(maxs)) else: scales = np.abs(X).max(axis=0) scales = np.array(scales) scales = scales.reshape(-1) self.scale_ = _handle_zeros_in_scale(scales) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : array-like or CSR matrix. The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) else: inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MaxAbsScaler(copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) elif self.axis == 0: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like. The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0, 0, 1], [ 1, 2, 3, 4, 6, 9], [ 1, 4, 5, 16, 20, 25]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0], [ 1, 2, 3, 6], [ 1, 4, 5, 20]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array with shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms) X /= norms[:, np.newaxis] if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K) if copy: K = K.copy() K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : array or scipy.sparse matrix with shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo']) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : maximum value for all features. - array : maximum value per feature. categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'float'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those catgorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape=(n_samples, n_features) Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
dmsimard/ansible
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py
47
4455
# -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The VyOS interfaces fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import absolute_import, division, print_function __metaclass__ = type import platform import re from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import ( run_commands, get_capabilities, ) class LegacyFactsBase(object): COMMANDS = frozenset() def __init__(self, module): self.module = module self.facts = dict() self.warnings = list() self.responses = None def populate(self): self.responses = run_commands(self.module, list(self.COMMANDS)) class Default(LegacyFactsBase): COMMANDS = [ "show version", ] def populate(self): super(Default, self).populate() data = self.responses[0] self.facts["serialnum"] = self.parse_serialnum(data) self.facts.update(self.platform_facts()) def parse_serialnum(self, data): match = re.search(r"HW S/N:\s+(\S+)", data) if match: return match.group(1) def platform_facts(self): platform_facts = {} resp = get_capabilities(self.module) device_info = resp["device_info"] platform_facts["system"] = device_info["network_os"] for item in ("model", "image", "version", "platform", "hostname"): val = device_info.get("network_os_%s" % item) if val: platform_facts[item] = val platform_facts["api"] = resp["network_api"] platform_facts["python_version"] = platform.python_version() return platform_facts class Config(LegacyFactsBase): COMMANDS = [ "show configuration commands", "show system commit", ] def populate(self): super(Config, self).populate() self.facts["config"] = self.responses commits = self.responses[1] entries = list() entry = None for line in commits.split("\n"): match = re.match(r"(\d+)\s+(.+)by(.+)via(.+)", line) if match: if entry: entries.append(entry) entry = dict( revision=match.group(1), datetime=match.group(2), by=str(match.group(3)).strip(), via=str(match.group(4)).strip(), comment=None, ) else: entry["comment"] = line.strip() self.facts["commits"] = entries class Neighbors(LegacyFactsBase): COMMANDS = [ "show lldp neighbors", "show lldp neighbors detail", ] def populate(self): super(Neighbors, self).populate() all_neighbors = self.responses[0] if "LLDP not configured" not in all_neighbors: neighbors = self.parse(self.responses[1]) self.facts["neighbors"] = self.parse_neighbors(neighbors) def parse(self, data): parsed = list() values = None for line in data.split("\n"): if not line: continue elif line[0] == " ": values += "\n%s" % line elif line.startswith("Interface"): if values: parsed.append(values) values = line if values: parsed.append(values) return parsed def parse_neighbors(self, data): facts = dict() for item in data: interface = self.parse_interface(item) host = self.parse_host(item) port = self.parse_port(item) if interface not in facts: facts[interface] = list() facts[interface].append(dict(host=host, port=port)) return facts def parse_interface(self, data): match = re.search(r"^Interface:\s+(\S+),", data) return match.group(1) def parse_host(self, data): match = re.search(r"SysName:\s+(.+)$", data, re.M) if match: return match.group(1) def parse_port(self, data): match = re.search(r"PortDescr:\s+(.+)$", data, re.M) if match: return match.group(1)
gpl-3.0
hypothesis/via
via/views/index.py
1
1231
from urllib.parse import urlparse from pyramid.httpexceptions import HTTPFound, HTTPNotFound from pyramid.view import view_config, view_defaults from via.views._helpers import url_from_user_input from via.views.exceptions import BadURL @view_defaults(route_name="index") class IndexViews: def __init__(self, request): self.request = request self.enabled = request.registry.settings["enable_front_page"] @view_config(request_method="GET", renderer="via:templates/index.html.jinja2") def get(self): if not self.enabled: return HTTPNotFound() self.request.response.headers["X-Robots-Tag"] = "all" return {} @view_config(request_method="POST") def post(self): if not self.enabled: return HTTPNotFound() url = url_from_user_input(self.request.params.get("url", "")) try: parsed = urlparse(url) except ValueError as ex: raise BadURL(url) from ex url_without_query = parsed._replace(query="", fragment="").geturl() return HTTPFound( self.request.route_url( route_name="proxy", url=url_without_query, _query=parsed.query ) )
mit
pculture/unisubs
apps/auth/forms.py
1
10180
# Amara, universalsubtitles.org # # Copyright (C) 2013 Participatory Culture Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # http://www.gnu.org/licenses/agpl-3.0.html. from django import forms from django.contrib.auth.forms import UserCreationForm, AuthenticationForm from django.core.validators import EMPTY_VALUES from django.urls import reverse from django.utils.encoding import force_bytes from django.utils.html import format_html from django.utils.http import urlsafe_base64_encode from captcha.fields import CaptchaField from django.template import loader from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.tokens import default_token_generator from django.conf import settings from auth.models import CustomUser as User from auth.validators import PasswordStrengthValidator class UserField(forms.Field): default_error_messages = { 'invalid': _(u'Invalid user'), } def prepare_value(self, value): if isinstance(value, User): return value.username return value def to_python(self, value): if value in EMPTY_VALUES: return None if isinstance(value, User): return value try: return User.objects.get(username=value) except User.DoesNotExist: raise forms.ValidationError(self.error_messages['invalid']) class CustomUserCreationForm(UserCreationForm): captcha = CaptchaField() class Meta: model = User fields = ("username", "email") def __init__(self, *args, **kwargs): if 'prefix' not in kwargs: kwargs['prefix'] = 'create' super(CustomUserCreationForm, self).__init__(*args, **kwargs) self.fields['email'].required = True def validate_password(self, password): # remove this post-1.9 when setting is used user_inputs = [self.cleaned_data.get("email"), self.cleaned_data.get("username")] validator = PasswordStrengthValidator() validator.validate(password, user_inputs=user_inputs) def clean_password2(self): # Check that the two password entries match password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError("Passwords don't match") return password2 def clean_password1(self): try: self.validate_password(self.cleaned_data.get("password1")) return self.cleaned_data.get("password1") except forms.ValidationError as e: raise e def save(self, commit=True): # Save the provided password in hashed format user = super(UserCreationForm, self).save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user class ChooseUserForm(forms.Form): """ Used in the login trap mechanism """ username = forms.CharField(max_length=100) def clean_username(self): data = self.cleaned_data['username'] try: data = User.objects.get(username=data) except User.DoesNotExist: raise forms.ValidationError("User doesn't exist.") return data class SecureAuthenticationForm(AuthenticationForm): captcha = CaptchaField() class EmailForm(forms.Form): email = forms.EmailField(label=_("E-mail"), max_length=100) url = forms.URLField(required=False, widget=forms.HiddenInput()) first_name = forms.CharField(max_length=100, required=False, widget=forms.HiddenInput()) last_name = forms.CharField(max_length=100, required=False, widget=forms.HiddenInput()) avatar = forms.URLField(required=False, widget=forms.HiddenInput()) class CustomSetPasswordForm(forms.Form): """ A form that lets a user change set their password without entering the old password """ error_messages = { 'invalid_email': _("The email address given doesn't match the user."), 'password_mismatch': _("The two password fields didn't match."), } email_address = forms.EmailField(label=_("Verify email address")) new_password1 = forms.CharField(label=_("New password"), widget=forms.PasswordInput) new_password2 = forms.CharField(label=_("New password confirmation"), widget=forms.PasswordInput) def __init__(self, user, *args, **kwargs): self.user = user super(CustomSetPasswordForm, self).__init__(*args, **kwargs) def validate_password(self, password): # remove this post-1.9 when setting is used user_inputs = [self.user.email, self.user.username] validator = PasswordStrengthValidator() validator.validate(password, user_inputs) def clean_new_password1(self): password = self.cleaned_data.get("new_password1") try: self.validate_password(password) except forms.ValidationError as e: raise e return password def clean_new_password2(self): password1 = self.cleaned_data.get('new_password1') password2 = self.cleaned_data.get('new_password2') if password1 and password2: if password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) return password2 def clean_email_address(self): email = self.cleaned_data.get('email_address') if email != self.user.email: raise forms.ValidationError( self.error_messages['invalid_email'], code='invalid_email', ) def save(self, commit=True): self.user.set_password(self.cleaned_data['new_password1']) if commit: self.user.save() return self.user class CustomPasswordResetForm(forms.Form): """ This custom version of the password reset form has two differences with the default one: * It sends an email to every user matching the address, even to the ones where has_usable_password is false so that oauth users can set a password and become a regular amara user * It adds data to context for the templates so that emails and views can describe better what will happen to the account if password is reset """ email = forms.EmailField(label=_("E-mail"), max_length=75) def clean_email(self): """ Validates that an active user exists with the given email address. """ email = self.cleaned_data["email"] self.users_cache = User.objects.filter(email__iexact=email, is_active=True) return email def save(self, subject_template_name='registration/password_reset_subject.txt', email_template_name='registration/password_reset_email.html', html_email_template_name=None, use_https=False, token_generator=default_token_generator, from_email=None, request=None, **opts): """ Generates a one-use only link for resetting password and sends to the user. """ from django.core.mail import send_mail for user in self.users_cache: c = { 'email': user.email, 'domain': settings.HOSTNAME, 'site_name': 'Amara', 'uid': urlsafe_base64_encode(force_bytes(user.id)), 'user': user, 'token': token_generator.make_token(user), 'protocol': use_https and 'https' or 'http', 'amara_user': user.has_valid_password(), } subject = loader.render_to_string(subject_template_name, c) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) # FIXME: should use html_email_template_name if present email = loader.render_to_string(email_template_name, c) send_mail(subject, email, from_email, [user.email]) class SecureCustomPasswordResetForm(CustomPasswordResetForm): captcha = CaptchaField() class DeleteUserForm(forms.Form): def __init__(self, *args, **kwargs): super(DeleteUserForm, self).__init__(*args, **kwargs) # Can't find a lazy format_html self.fields['password'].help_text = format_html( _('<a href="{link}">Forgot your password?</a>'), link=reverse('password_reset') ) self.fields['delete_videos_and_subtitles'].help_text = format_html( _('This will delete videos that you have added to Amara that no other user has added subtitles to. It will also delete the related subtitles. For more details on deactivating or deleting your profile or removing subtitles and videos you\'ve collaborated on with other Amara members please read <a href="{link}">Deactivating your Amara Account</a>'), link="https://support.amara.org/support/solutions/articles/216336-deactivating-your-user-account" ) password = forms.CharField(widget=forms.PasswordInput(), label="Please enter your password to confirm.") delete_account_data = forms.BooleanField(required=False, help_text="This will erase your personal data, including your personal name, photo, and any other data on your user profile.") delete_videos_and_subtitles = forms.BooleanField(required=False)
agpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Source/Python/Lib/python27/bdb.py
144
21714
"""Debugger basics""" import fnmatch import sys import os import types __all__ = ["BdbQuit","Bdb","Breakpoint"] class BdbQuit(Exception): """Exception to give up completely""" class Bdb: """Generic Python debugger base class. This class takes care of details of the trace facility; a derived class should implement user interaction. The standard debugger class (pdb.Pdb) is an example. """ def __init__(self, skip=None): self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": return filename canonic = self.fncache.get(filename) if not canonic: canonic = os.path.abspath(filename) canonic = os.path.normcase(canonic) self.fncache[filename] = canonic return canonic def reset(self): import linecache linecache.checkcache() self.botframe = None self._set_stopinfo(None, None) def trace_dispatch(self, frame, event, arg): if self.quitting: return # None if event == 'line': return self.dispatch_line(frame) if event == 'call': return self.dispatch_call(frame, arg) if event == 'return': return self.dispatch_return(frame, arg) if event == 'exception': return self.dispatch_exception(frame, arg) if event == 'c_call': return self.trace_dispatch if event == 'c_exception': return self.trace_dispatch if event == 'c_return': return self.trace_dispatch print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event) return self.trace_dispatch def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch def dispatch_call(self, frame, arg): # XXX 'arg' is no longer used if self.botframe is None: # First call of dispatch since reset() self.botframe = frame.f_back # (CT) Note that this may also be None! return self.trace_dispatch if not (self.stop_here(frame) or self.break_anywhere(frame)): # No need to trace this function return # None self.user_call(frame, arg) if self.quitting: raise BdbQuit return self.trace_dispatch def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: try: self.frame_returning = frame self.user_return(frame, arg) finally: self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch def dispatch_exception(self, frame, arg): if self.stop_here(frame): self.user_exception(frame, arg) if self.quitting: raise BdbQuit return self.trace_dispatch # Normally derived classes don't override the following # methods, but they may if they want to redefine the # definition of stopping and breakpoints. def is_skipped_module(self, module_name): for pattern in self.skip: if fnmatch.fnmatch(module_name, pattern): return True return False def stop_here(self, frame): # (CT) stopframe may now also be None, see dispatch_call. # (CT) the former test for None is therefore removed from here. if self.skip and \ self.is_skipped_module(frame.f_globals.get('__name__')): return False if frame is self.stopframe: if self.stoplineno == -1: return False return frame.f_lineno >= self.stoplineno while frame is not None and frame is not self.stopframe: if frame is self.botframe: return True frame = frame.f_back return False def break_here(self, frame): filename = self.canonic(frame.f_code.co_filename) if not filename in self.breaks: return False lineno = frame.f_lineno if not lineno in self.breaks[filename]: # The line itself has no breakpoint, but maybe the line is the # first line of a function with breakpoint set by function name. lineno = frame.f_code.co_firstlineno if not lineno in self.breaks[filename]: return False # flag says ok to delete temp. bp (bp, flag) = effective(filename, lineno, frame) if bp: self.currentbp = bp.number if (flag and bp.temporary): self.do_clear(str(bp.number)) return True else: return False def do_clear(self, arg): raise NotImplementedError, "subclass of bdb must implement do_clear()" def break_anywhere(self, frame): return self.canonic(frame.f_code.co_filename) in self.breaks # Derived classes should override the user_* methods # to gain control. def user_call(self, frame, argument_list): """This method is called when there is the remote possibility that we ever need to stop in this function.""" pass def user_line(self, frame): """This method is called when we stop or break at this line.""" pass def user_return(self, frame, return_value): """This method is called when a return trap is set here.""" pass def user_exception(self, frame, exc_info): exc_type, exc_value, exc_traceback = exc_info """This method is called if an exception occurs, but only if we are to stop at or just below this level.""" pass def _set_stopinfo(self, stopframe, returnframe, stoplineno=0): self.stopframe = stopframe self.returnframe = returnframe self.quitting = 0 # stoplineno >= 0 means: stop at line >= the stoplineno # stoplineno -1 means: don't stop at all self.stoplineno = stoplineno # Derived classes and clients can call the following methods # to affect the stepping state. def set_until(self, frame): #the name "until" is borrowed from gdb """Stop when the line with the line no greater than the current one is reached or when returning from current frame""" self._set_stopinfo(frame, frame, frame.f_lineno+1) def set_step(self): """Stop after one line of code.""" # Issue #13183: pdb skips frames after hitting a breakpoint and running # step commands. # Restore the trace function in the caller (that may not have been set # for performance reasons) when returning from the current frame. if self.frame_returning: caller_frame = self.frame_returning.f_back if caller_frame and not caller_frame.f_trace: caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): """Stop on the next line in or below the given frame.""" self._set_stopinfo(frame, None) def set_return(self, frame): """Stop when returning from the given frame.""" self._set_stopinfo(frame.f_back, frame) def set_trace(self, frame=None): """Start debugging from `frame`. If frame is not specified, debugging starts from caller's frame. """ if frame is None: frame = sys._getframe().f_back self.reset() while frame: frame.f_trace = self.trace_dispatch self.botframe = frame frame = frame.f_back self.set_step() sys.settrace(self.trace_dispatch) def set_continue(self): # Don't stop except at breakpoints or when finished self._set_stopinfo(self.botframe, None, -1) if not self.breaks: # no breakpoints; run without debugger overhead sys.settrace(None) frame = sys._getframe().f_back while frame and frame is not self.botframe: del frame.f_trace frame = frame.f_back def set_quit(self): self.stopframe = self.botframe self.returnframe = None self.quitting = 1 sys.settrace(None) # Derived classes and clients can call the following methods # to manipulate breakpoints. These methods return an # error message is something went wrong, None if all is well. # Set_break prints out the breakpoint line and file:lineno. # Call self.get_*break*() to see the breakpoints or better # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint(). def set_break(self, filename, lineno, temporary=0, cond = None, funcname=None): filename = self.canonic(filename) import linecache # Import as late as possible line = linecache.getline(filename, lineno) if not line: return 'Line %s:%d does not exist' % (filename, lineno) if not filename in self.breaks: self.breaks[filename] = [] list = self.breaks[filename] if not lineno in list: list.append(lineno) bp = Breakpoint(filename, lineno, temporary, cond, funcname) def _prune_breaks(self, filename, lineno): if (filename, lineno) not in Breakpoint.bplist: self.breaks[filename].remove(lineno) if not self.breaks[filename]: del self.breaks[filename] def clear_break(self, filename, lineno): filename = self.canonic(filename) if not filename in self.breaks: return 'There are no breakpoints in %s' % filename if lineno not in self.breaks[filename]: return 'There is no breakpoint at %s:%d' % (filename, lineno) # If there's only one bp in the list for that file,line # pair, then remove the breaks entry for bp in Breakpoint.bplist[filename, lineno][:]: bp.deleteMe() self._prune_breaks(filename, lineno) def clear_bpbynumber(self, arg): try: number = int(arg) except: return 'Non-numeric breakpoint number (%s)' % arg try: bp = Breakpoint.bpbynumber[number] except IndexError: return 'Breakpoint number (%d) out of range' % number if not bp: return 'Breakpoint (%d) already deleted' % number bp.deleteMe() self._prune_breaks(bp.file, bp.line) def clear_all_file_breaks(self, filename): filename = self.canonic(filename) if not filename in self.breaks: return 'There are no breakpoints in %s' % filename for line in self.breaks[filename]: blist = Breakpoint.bplist[filename, line] for bp in blist: bp.deleteMe() del self.breaks[filename] def clear_all_breaks(self): if not self.breaks: return 'There are no breakpoints' for bp in Breakpoint.bpbynumber: if bp: bp.deleteMe() self.breaks = {} def get_break(self, filename, lineno): filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename] def get_breaks(self, filename, lineno): filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename] and \ Breakpoint.bplist[filename, lineno] or [] def get_file_breaks(self, filename): filename = self.canonic(filename) if filename in self.breaks: return self.breaks[filename] else: return [] def get_all_breaks(self): return self.breaks # Derived classes and clients can call the following method # to get a data structure representing a stack trace. def get_stack(self, f, t): stack = [] if t and t.tb_frame is f: t = t.tb_next while f is not None: stack.append((f, f.f_lineno)) if f is self.botframe: break f = f.f_back stack.reverse() i = max(0, len(stack) - 1) while t is not None: stack.append((t.tb_frame, t.tb_lineno)) t = t.tb_next if f is None: i = max(0, len(stack) - 1) return stack, i # def format_stack_entry(self, frame_lineno, lprefix=': '): import linecache, repr frame, lineno = frame_lineno filename = self.canonic(frame.f_code.co_filename) s = '%s(%r)' % (filename, lineno) if frame.f_code.co_name: s = s + frame.f_code.co_name else: s = s + "<lambda>" if '__args__' in frame.f_locals: args = frame.f_locals['__args__'] else: args = None if args: s = s + repr.repr(args) else: s = s + '()' if '__return__' in frame.f_locals: rv = frame.f_locals['__return__'] s = s + '->' s = s + repr.repr(rv) line = linecache.getline(filename, lineno, frame.f_globals) if line: s = s + lprefix + line.strip() return s # The following two methods can be called by clients to use # a debugger to debug a statement, given as a string. def run(self, cmd, globals=None, locals=None): if globals is None: import __main__ globals = __main__.__dict__ if locals is None: locals = globals self.reset() sys.settrace(self.trace_dispatch) if not isinstance(cmd, types.CodeType): cmd = cmd+'\n' try: exec cmd in globals, locals except BdbQuit: pass finally: self.quitting = 1 sys.settrace(None) def runeval(self, expr, globals=None, locals=None): if globals is None: import __main__ globals = __main__.__dict__ if locals is None: locals = globals self.reset() sys.settrace(self.trace_dispatch) if not isinstance(expr, types.CodeType): expr = expr+'\n' try: return eval(expr, globals, locals) except BdbQuit: pass finally: self.quitting = 1 sys.settrace(None) def runctx(self, cmd, globals, locals): # B/W compatibility self.run(cmd, globals, locals) # This method is more useful to debug a single function call. def runcall(self, func, *args, **kwds): self.reset() sys.settrace(self.trace_dispatch) res = None try: res = func(*args, **kwds) except BdbQuit: pass finally: self.quitting = 1 sys.settrace(None) return res def set_trace(): Bdb().set_trace() class Breakpoint: """Breakpoint class Implements temporary breakpoints, ignore counts, disabling and (re)-enabling, and conditionals. Breakpoints are indexed by number through bpbynumber and by the file,line tuple using bplist. The former points to a single instance of class Breakpoint. The latter points to a list of such instances since there may be more than one breakpoint per line. """ # XXX Keeping state in the class is a mistake -- this means # you cannot have more than one active Bdb instance. next = 1 # Next bp to be assigned bplist = {} # indexed by (file, lineno) tuple bpbynumber = [None] # Each entry is None or an instance of Bpt # index 0 is unused, except for marking an # effective break .... see effective() def __init__(self, file, line, temporary=0, cond=None, funcname=None): self.funcname = funcname # Needed if funcname is not None. self.func_first_executable_line = None self.file = file # This better be in canonical form! self.line = line self.temporary = temporary self.cond = cond self.enabled = 1 self.ignore = 0 self.hits = 0 self.number = Breakpoint.next Breakpoint.next = Breakpoint.next + 1 # Build the two lists self.bpbynumber.append(self) if (file, line) in self.bplist: self.bplist[file, line].append(self) else: self.bplist[file, line] = [self] def deleteMe(self): index = (self.file, self.line) self.bpbynumber[self.number] = None # No longer in list self.bplist[index].remove(self) if not self.bplist[index]: # No more bp for this f:l combo del self.bplist[index] def enable(self): self.enabled = 1 def disable(self): self.enabled = 0 def bpprint(self, out=None): if out is None: out = sys.stdout if self.temporary: disp = 'del ' else: disp = 'keep ' if self.enabled: disp = disp + 'yes ' else: disp = disp + 'no ' print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp, self.file, self.line) if self.cond: print >>out, '\tstop only if %s' % (self.cond,) if self.ignore: print >>out, '\tignore next %d hits' % (self.ignore) if (self.hits): if (self.hits > 1): ss = 's' else: ss = '' print >>out, ('\tbreakpoint already hit %d time%s' % (self.hits, ss)) # -----------end of Breakpoint class---------- def checkfuncname(b, frame): """Check whether we should break here because of `b.funcname`.""" if not b.funcname: # Breakpoint was set via line number. if b.line != frame.f_lineno: # Breakpoint was set at a line with a def statement and the function # defined is called: don't break. return False return True # Breakpoint set via function name. if frame.f_code.co_name != b.funcname: # It's not a function call, but rather execution of def statement. return False # We are in the right frame. if not b.func_first_executable_line: # The function is entered for the 1st time. b.func_first_executable_line = frame.f_lineno if b.func_first_executable_line != frame.f_lineno: # But we are not at the first line number: don't break. return False return True # Determines if there is an effective (active) breakpoint at this # line of code. Returns breakpoint number or 0 if none def effective(file, line, frame): """Determine which breakpoint for this file:line is to be acted upon. Called only if we know there is a bpt at this location. Returns breakpoint that was triggered and a flag that indicates if it is ok to delete a temporary bp. """ possibles = Breakpoint.bplist[file,line] for i in range(0, len(possibles)): b = possibles[i] if b.enabled == 0: continue if not checkfuncname(b, frame): continue # Count every hit when bp is enabled b.hits = b.hits + 1 if not b.cond: # If unconditional, and ignoring, # go on to next, else break if b.ignore > 0: b.ignore = b.ignore -1 continue else: # breakpoint and marker that's ok # to delete if temporary return (b,1) else: # Conditional bp. # Ignore count applies only to those bpt hits where the # condition evaluates to true. try: val = eval(b.cond, frame.f_globals, frame.f_locals) if val: if b.ignore > 0: b.ignore = b.ignore -1 # continue else: return (b,1) # else: # continue except: # if eval fails, most conservative # thing is to stop on breakpoint # regardless of ignore count. # Don't delete temporary, # as another hint to user. return (b,0) return (None, None) # -------------------- testing -------------------- class Tdb(Bdb): def user_call(self, frame, args): name = frame.f_code.co_name if not name: name = '???' print '+++ call', name, args def user_line(self, frame): import linecache name = frame.f_code.co_name if not name: name = '???' fn = self.canonic(frame.f_code.co_filename) line = linecache.getline(fn, frame.f_lineno, frame.f_globals) print '+++', fn, frame.f_lineno, name, ':', line.strip() def user_return(self, frame, retval): print '+++ return', retval def user_exception(self, frame, exc_stuff): print '+++ exception', exc_stuff self.set_continue() def foo(n): print 'foo(', n, ')' x = bar(n*10) print 'bar returned', x def bar(a): print 'bar(', a, ')' return a/2 def test(): t = Tdb() t.run('import bdb; bdb.foo(10)') # end
mit
CongLi/avocado-vt
virttest/libvirt_vm.py
2
106547
""" Utility classes and functions to handle Virtual Machine creation using libvirt. :copyright: 2011 Red Hat Inc. """ import time import string import os import logging import fcntl import re import shutil import tempfile import platform import aexpect from avocado.utils import process from avocado.utils import crypto from avocado.core import exceptions from . import error_context from . import utils_misc from . import virt_vm from . import storage from . import remote from . import virsh from . import libvirt_xml from . import data_dir from . import xml_utils from . import utils_selinux def normalize_connect_uri(connect_uri): """ Processes connect_uri Cartesian into something virsh can use :param connect_uri: Cartesian Params setting :return: Normalized connect_uri """ if connect_uri == "default": result = virsh.canonical_uri() else: result = virsh.canonical_uri(uri=connect_uri) if not result: raise ValueError("Normalizing connect_uri '%s' failed, is libvirt " "running?" % connect_uri) return result def complete_uri(ip_address, protocol=None, port=None): """ Return a complete URI with the combination of ip_address and local uri. It is useful when you need to connect remote hypervisor. :param ip_address: an ip address or a hostname :param protocol: protocol for uri eg: tcp, spice etc. :param port: port for the protocol :return: a complete uri """ if protocol and port: complete_uri = "%s://%s:%s" % (protocol, ip_address, port) else: # Allow to raise CmdError if canonical_uri is failed uri = virsh.canonical_uri(ignore_status=False) driver = uri.split(":")[0] # The libvirtd daemon's mode(system or session on qemu) daemon_mode = uri.split("/")[-1] complete_uri = "%s+ssh://%s/%s" % (driver, ip_address, daemon_mode) return complete_uri def get_uri_with_transport(uri_type='qemu', transport="", dest_ip=""): """ Return a URI to connect driver on dest with a specified transport. :param origin_uri: The URI on dest used to connect itself directly. :param transport: The transport type connect to dest. :param dest_ip: The ip of destination. """ _type2uri_ = {'qemu': "qemu:///system", 'qemu_system': "qemu:///system", 'qemu_session': "qemu:///session", 'lxc': "lxc:///", 'xen': "xen:///", 'esx': "esx:///"} try: origin_uri = _type2uri_[uri_type] except KeyError: raise ValueError("Param uri_type = %s is not supported." % (uri_type)) # For example: # ("qemu:///system")-->("qemu", "system") # ("lxc:///")-->("lxc", "") origin_uri_elems = origin_uri.split(":///") transport_uri_driver = origin_uri_elems[0] transport_uri_dest = origin_uri_elems[-1] if transport: transport_uri_driver = ("%s+%s" % (transport_uri_driver, transport)) transport_uri_dest = ("://%s/%s" % (dest_ip, transport_uri_dest)) return ("%s%s" % (transport_uri_driver, transport_uri_dest)) class VM(virt_vm.BaseVM): """ This class handles all basic VM operations for libvirt. """ def __init__(self, name, params, root_dir, address_cache, state=None): """ Initialize the object and set a few attributes. :param name: The name of the object :param params: A dict containing VM params (see method make_create_command for a full description) :param root_dir: Base directory for relative filenames :param address_cache: A dict that maps MAC addresses to IP addresses :param state: If provided, use this as self.__dict__ """ if state: self.__dict__ = state else: self.process = None self.serial_ports = [] self.serial_console_log = None self.serial_console = None self.redirs = {} self.vnc_port = None self.vnc_autoport = True self.pci_assignable = None self.netdev_id = [] self.device_id = [] self.pci_devices = [] self.uuid = None self.remote_sessions = [] self.spice_port = 8000 self.name = name self.params = params self.root_dir = root_dir self.address_cache = address_cache self.vnclisten = "0.0.0.0" self.connect_uri = normalize_connect_uri(params.get("connect_uri", "default")) self.driver_type = virsh.driver(uri=self.connect_uri) self.params['driver_type_' + self.name] = self.driver_type # virtnet init depends on vm_type/driver_type being set w/in params super(VM, self).__init__(name, params) logging.info("Libvirt VM '%s', driver '%s', uri '%s'", self.name, self.driver_type, self.connect_uri) def is_lxc(self): """ Return True if VM is linux container. """ return (self.connect_uri and self.connect_uri.count("lxc")) def is_qemu(self): """ Return True if VM is a qemu guest. """ return (self.connect_uri and self.connect_uri.count("qemu")) def is_xen(self): """ Return True if VM is a xen guest. """ return (self.connect_uri and self.connect_uri.count("xen")) def is_esx(self): """ Return True if VM is a esx guest. """ return (self.connect_uri and self.connect_uri.count("esx")) def verify_alive(self): """ Make sure the VM is alive. :raise VMDeadError: If the VM is dead """ if not self.is_alive(): raise virt_vm.VMDeadError("Domain %s is inactive" % self.name, self.state()) def is_alive(self): """ Return True if VM is alive. """ return virsh.is_alive(self.name, uri=self.connect_uri) def is_dead(self): """ Return True if VM is dead. """ return virsh.is_dead(self.name, uri=self.connect_uri) def is_paused(self): """ Return True if VM is paused. """ return (self.state() == "paused") def is_persistent(self): """ Return True if VM is persistent. """ try: dominfo = (virsh.dominfo(self.name, uri=self.connect_uri).stdout.strip()) return bool(re.search(r"^Persistent:\s+[Yy]es", dominfo, re.MULTILINE)) except process.CmdError: return False def is_autostart(self): """ Return True if VM is autostart. """ try: dominfo = (virsh.dominfo(self.name, uri=self.connect_uri).stdout.strip()) return bool(re.search(r"^Autostart:\s+enable", dominfo, re.MULTILINE)) except process.CmdError: return False def exists(self): """ Return True if VM exists. """ return virsh.domain_exists(self.name, uri=self.connect_uri) def undefine(self, options=None): """ Undefine the VM. """ try: virsh.undefine(self.name, options=options, uri=self.connect_uri, ignore_status=False) except process.CmdError, detail: logging.error("Undefined VM %s failed:\n%s", self.name, detail) return False return True def define(self, xml_file): """ Define the VM. """ if not os.path.exists(xml_file): logging.error("File %s not found." % xml_file) return False try: virsh.define(xml_file, uri=self.connect_uri, ignore_status=False) except process.CmdError, detail: logging.error("Defined VM from %s failed:\n%s", xml_file, detail) return False return True def state(self): """ Return domain state. """ return virsh.domstate(self.name, uri=self.connect_uri).stdout.strip() def get_id(self): """ Return VM's ID. """ return virsh.domid(self.name, uri=self.connect_uri).stdout.strip() def get_xml(self): """ Return VM's xml file. """ return virsh.dumpxml(self.name, uri=self.connect_uri).stdout.strip() def backup_xml(self, active=False): """ Backup the guest's xmlfile. """ # Since backup_xml() is not a function for testing, # we have to handle the exception here. try: xml_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) if active: extra = "" else: extra = "--inactive" virsh.dumpxml(self.name, extra=extra, to_file=xml_file, uri=self.connect_uri) return xml_file except Exception, detail: if os.path.exists(xml_file): os.remove(xml_file) logging.error("Failed to backup xml file:\n%s", detail) return "" def clone(self, name=None, params=None, root_dir=None, address_cache=None, copy_state=False): """ Return a clone of the VM object with optionally modified parameters. The clone is initially not alive and needs to be started using create(). Any parameters not passed to this function are copied from the source VM. :param name: Optional new VM name :param params: Optional new VM creation parameters :param root_dir: Optional new base directory for relative filenames :param address_cache: A dict that maps MAC addresses to IP addresses :param copy_state: If True, copy the original VM's state to the clone. Mainly useful for make_create_command(). """ if name is None: name = self.name if params is None: params = self.params.copy() if root_dir is None: root_dir = self.root_dir if address_cache is None: address_cache = self.address_cache if copy_state: state = self.__dict__.copy() else: state = None return VM(name, params, root_dir, address_cache, state) def make_create_command(self, name=None, params=None, root_dir=None): """ Generate a libvirt command line. All parameters are optional. If a parameter is not supplied, the corresponding value stored in the class attributes is used. :param name: The name of the object :param params: A dict containing VM params :param root_dir: Base directory for relative filenames :note: The params dict should contain: mem -- memory size in MBs cdrom -- ISO filename to use with the qemu -cdrom parameter extra_params -- a string to append to the qemu command shell_port -- port of the remote shell daemon on the guest (SSH, Telnet or the home-made Remote Shell Server) shell_client -- client program to use for connecting to the remote shell daemon on the guest (ssh, telnet or nc) x11_display -- if specified, the DISPLAY environment variable will be be set to this value for the qemu process (useful for SDL rendering) images -- a list of image object names, separated by spaces nics -- a list of NIC object names, separated by spaces For each image in images: drive_format -- string to pass as 'if' parameter for this image (e.g. ide, scsi) image_snapshot -- if yes, pass 'snapshot=on' to qemu for this image image_boot -- if yes, pass 'boot=on' to qemu for this image In addition, all parameters required by get_image_filename. For each NIC in nics: nic_model -- string to pass as 'model' parameter for this NIC (e.g. e1000) """ # helper function for command line option wrappers def has_option(help_text, option): return bool(re.search(r"--%s" % option, help_text, re.MULTILINE)) def has_os_variant(os_text, os_variant): return bool(re.search(r"%s" % os_variant, os_text, re.MULTILINE)) def has_sub_option(option, sub_option): option_help_text = process.system_output("%s --%s help" % (virt_install_binary, option), verbose=False) return bool(re.search(r"%s" % sub_option, option_help_text, re.MULTILINE)) # Wrappers for all supported libvirt command line parameters. # This is meant to allow support for multiple libvirt versions. # Each of these functions receives the output of 'libvirt --help' as a # parameter, and should add the requested command line option # accordingly. def add_name(help_text, name): return " --name '%s'" % name def add_machine_type(help_text, machine_type): if has_option(help_text, "machine"): return " --machine %s" % machine_type else: return "" def add_hvm_or_pv(help_text, hvm_or_pv): if hvm_or_pv == "hvm": return " --hvm --accelerate" elif hvm_or_pv == "pv": return " --paravirt" else: logging.warning("Unknown virt type hvm_or_pv, using default.") return "" def add_mem(help_text, mem, maxmem=None): if has_option(help_text, "memory"): cmd = " --memory=%s" % mem if maxmem: if not has_sub_option('memory', 'maxmemory'): logging.warning("maxmemory option not supported by " "virt-install") else: cmd += ",maxmemory=%s" % maxmem return cmd else: return " --ram=%s" % mem def add_check_cpu(help_text): if has_option(help_text, "check-cpu"): return " --check-cpu" else: return "" def add_smp(help_text, smp, maxvcpus=None, sockets=None, cores=None, threads=None): cmd = " --vcpu=%s" % smp if maxvcpus: cmd += ",maxvcpus=%s" % maxvcpus if sockets: cmd += ",sockets=%s" % sockets if cores: cmd += ",cores=%s" % cores if threads: cmd += ",threads=%s" % threads return cmd def add_location(help_text, location): if has_option(help_text, "location"): return " --location %s" % location else: return "" def add_cdrom(help_text, filename, index=None): if has_option(help_text, "cdrom"): return " --cdrom %s" % filename else: return "" def add_pxe(help_text): if has_option(help_text, "pxe"): return " --pxe" else: return "" def add_import(help_text): if has_option(help_text, "import"): return " --import" else: return "" def add_controller(model=None): """ Add controller option for virt-install command line. :param model: string, controller model. :return: string, empty or controller option. """ if model == 'virtio-scsi': return " --controller type=scsi,model=virtio-scsi" else: return "" def check_controller(virt_install_cmd_line, controller): """ Check for the controller already available in virt-install command line. :param virt_install_cmd_line: string, virt-install command line. :param controller: string, controller model. :return: True if succeed of False if failed. """ found = False output = re.findall( r"controller\stype=(\S+),model=(\S+)", virt_install_cmd_line) for item in output: if controller in item[1]: found = True break return found def add_drive(help_text, filename, pool=None, vol=None, device=None, bus=None, perms=None, size=None, sparse=False, cache=None, fmt=None): cmd = " --disk" if filename: cmd += " path=%s" % filename elif pool: if vol: cmd += " vol=%s/%s" % (pool, vol) else: cmd += " pool=%s" % pool if device: cmd += ",device=%s" % device if bus: cmd += ",bus=%s" % bus if perms: cmd += ",%s" % perms if size: cmd += ",size=%s" % size.rstrip("Gg") if sparse: cmd += ",sparse=false" if fmt: cmd += ",format=%s" % fmt if cache: cmd += ",cache=%s" % cache return cmd def add_floppy(help_text, filename): return " --disk path=%s,device=floppy,ro" % filename def add_vnc(help_text, vnc_port=None): if vnc_port: return " --vnc --vncport=%d" % (vnc_port) else: return " --vnc" def add_vnclisten(help_text, vnclisten): if has_option(help_text, "vnclisten"): return " --vnclisten=%s" % (vnclisten) else: return "" def add_sdl(help_text): if has_option(help_text, "sdl"): return " --sdl" else: return "" def add_nographic(help_text): return " --nographics" def add_video(help_text, video_device): if has_option(help_text, "video"): return " --video=%s" % (video_device) else: return "" def add_uuid(help_text, uuid): if has_option(help_text, "uuid"): return " --uuid %s" % uuid else: return "" def add_os_type(help_text, os_type): if has_option(help_text, "os-type"): return " --os-type %s" % os_type else: return "" def add_os_variant(help_text, os_variant): if has_option(help_text, "os-variant"): return " --os-variant %s" % os_variant else: return "" def add_pcidevice(help_text, pci_device): if has_option(help_text, "host-device"): return " --host-device %s" % pci_device else: return "" def add_soundhw(help_text, sound_device): if has_option(help_text, "soundhw"): return " --soundhw %s" % sound_device else: return "" def add_serial(help_text): if has_option(help_text, "serial"): return " --serial pty" else: return "" def add_kernel_cmdline(help_text, cmdline): return " -append %s" % cmdline def add_connect_uri(help_text, uri): if uri and has_option(help_text, "connect"): return " --connect=%s" % uri else: return "" def add_security(help_text, sec_type, sec_label=None, sec_relabel=None): """ Return security options for install command. """ if has_option(help_text, "security"): result = " --security" if sec_type == 'static': if sec_label is None: raise ValueError("Seclabel is not setted for static.") result += " type=static,label=%s" % (sec_label) elif sec_type == 'dynamic': result += " type=dynamic" else: raise ValueError("Security type %s is not supported." % sec_type) if sec_relabel is not None: result += ",relabel=%s" % sec_relabel else: result = "" return result def add_nic(help_text, nic_params): """ Return additional command line params based on dict-like nic_params """ mac = nic_params.get('mac') nettype = nic_params.get('nettype') netdst = nic_params.get('netdst') nic_model = nic_params.get('nic_model') if nettype: result = " --network=%s" % nettype else: result = "" if has_option(help_text, "bridge"): # older libvirt (--network=NATdev --bridge=bridgename # --mac=mac) if nettype != 'user': result += ':%s' % netdst if mac: # possible to specify --mac w/o --network result += " --mac=%s" % mac else: # newer libvirt (--network=mynet,model=virtio,mac=00:11) if nettype != 'user': result += '=%s' % netdst if nettype and nic_model: # only supported along with nettype result += ",model=%s" % nic_model if nettype and mac: result += ',mac=%s' % mac elif mac: # possible to specify --mac w/o --network result += " --mac=%s" % mac logging.debug("vm.make_create_command.add_nic returning: %s", result) return result def add_memballoon(help_text, memballoon_model): """ Adding memballoon device to the vm. :param help_text: string, virt-install help text. :param memballon_model: string, memballoon model. :return: string, empty or memballoon model option. """ if has_option(help_text, "memballoon"): result = " --memballoon model=%s" % memballoon_model else: logging.warning("memballoon is not supported") result = "" logging.debug("vm.add_memballoon returning: %s", result) return result def add_kernel(help_text, cmdline, kernel_path=None, initrd_path=None, kernel_args=None): """ Adding Custom kernel option to boot. : param help_text: string, virt-install help text : param cmdline: string, current virt-install cmdline : param kernel_path: string, custom kernel path. : param initrd_path: string, custom initrd path. : param kernel_args: string, custom boot args. """ if has_option(help_text, "boot"): if "--boot" in cmdline: result = "," else: result = " --boot " if has_sub_option("boot", "kernel") and kernel_path: result += "kernel=%s," % kernel_path if has_sub_option("boot", "initrd") and initrd_path: result += "initrd=%s," % initrd_path if has_sub_option("boot", "kernel_args") and kernel_args: result += "kernel_args=%s," % kernel_args else: result = "" logging.warning("boot option is not supported") return result.strip(',') # End of command line option wrappers if name is None: name = self.name if params is None: params = self.params if root_dir is None: root_dir = self.root_dir # Clone this VM using the new params vm = self.clone(name, params, root_dir, copy_state=True) virt_install_binary = utils_misc.get_path( root_dir, params.get("virt_install_binary", "virt-install")) help_text = process.system_output("%s --help" % virt_install_binary, verbose=False) try: os_text = process.system_output("osinfo-query os --fields short-id", verbose=False) except process.CmdError: os_text = process.system_output("%s --os-variant list" % virt_install_binary, verbose=False) # Find all supported machine types, so we can rule out an unsupported # machine type option passed in the configuration. hvm_or_pv = params.get("hvm_or_pv", "hvm") # default to 'uname -m' output arch_name = params.get("vm_arch_name", platform.machine()) capabs = libvirt_xml.CapabilityXML() try: support_machine_type = capabs.guest_capabilities[ hvm_or_pv][arch_name]['machine'] except KeyError, detail: if detail.args[0] == hvm_or_pv: raise KeyError("No libvirt support for %s virtualization, " "does system hardware + software support it?" % hvm_or_pv) elif detail.args[0] == arch_name: raise KeyError("No libvirt support for %s virtualization of " "%s, does system hardware + software support " "it?" % (hvm_or_pv, arch_name)) raise logging.debug("Machine types supported for %s/%s: %s", hvm_or_pv, arch_name, support_machine_type) # Start constructing the qemu command virt_install_cmd = "" # Set the X11 display parameter if requested if params.get("x11_display"): virt_install_cmd += "DISPLAY=%s " % params.get("x11_display") # Add the qemu binary virt_install_cmd += virt_install_binary # set connect uri virt_install_cmd += add_connect_uri(help_text, self.connect_uri) # hvm or pv specified by libvirt switch (pv used by Xen only) if hvm_or_pv: virt_install_cmd += add_hvm_or_pv(help_text, hvm_or_pv) # Add the VM's name virt_install_cmd += add_name(help_text, name) machine_type = params.get("machine_type") if machine_type: if machine_type in support_machine_type: virt_install_cmd += add_machine_type(help_text, machine_type) else: raise exceptions.TestSkipError("Unsupported machine type %s." % (machine_type)) mem = params.get("mem") maxmemory = params.get("maxmemory", None) if mem: virt_install_cmd += add_mem(help_text, mem, maxmemory) # TODO: should we do the check before we call ? negative case ? check_cpu = params.get("use_check_cpu") if check_cpu: virt_install_cmd += add_check_cpu(help_text) smp = params.get("smp") vcpu_max_cpus = params.get("vcpu_maxcpus") vcpu_sockets = params.get("vcpu_sockets") vcpu_cores = params.get("vcpu_cores") vcpu_threads = params.get("vcpu_threads") if smp: virt_install_cmd += add_smp(help_text, smp, vcpu_max_cpus, vcpu_sockets, vcpu_cores, vcpu_threads) # TODO: directory location for vmlinuz/kernel for cdrom install ? location = None if params.get("medium") == 'url': location = params.get('url') elif params.get("medium") == 'kernel_initrd': # directory location of kernel/initrd pair (directory layout must # be in format libvirt will recognize) location = params.get("image_dir") elif params.get("medium") == 'nfs': location = "nfs:%s:%s" % (params.get("nfs_server"), params.get("nfs_dir")) elif params.get("medium") == 'cdrom': if params.get("use_libvirt_cdrom_switch") == 'yes': virt_install_cmd += add_cdrom( help_text, params.get("cdrom_cd1")) elif params.get("unattended_delivery_method") == "integrated": cdrom_path = os.path.join(data_dir.get_data_dir(), params.get("cdrom_unattended")) virt_install_cmd += add_cdrom(help_text, cdrom_path) else: location = data_dir.get_data_dir() kernel_dir = os.path.dirname(params.get("kernel")) kernel_parent_dir = os.path.dirname(kernel_dir) pxeboot_link = os.path.join(kernel_parent_dir, "pxeboot") if os.path.islink(pxeboot_link): os.unlink(pxeboot_link) if os.path.isdir(pxeboot_link): logging.info("Removed old %s leftover directory", pxeboot_link) shutil.rmtree(pxeboot_link) os.symlink(kernel_dir, pxeboot_link) elif params.get("medium") == "import": virt_install_cmd += add_import(help_text) if location: virt_install_cmd += add_location(help_text, location) # Disable display when vga is disabled (used mainly by machines.cfg) if params.get("vga") == "none": virt_install_cmd += add_nographic(help_text) elif params.get("display") == "vnc": if params.get("vnc_autoport") == "yes": vm.vnc_autoport = True else: vm.vnc_autoport = False if not vm.vnc_autoport and params.get("vnc_port"): vm.vnc_port = int(params.get("vnc_port")) virt_install_cmd += add_vnc(help_text, vm.vnc_port) if params.get("vnclisten"): vm.vnclisten = params.get("vnclisten") virt_install_cmd += add_vnclisten(help_text, vm.vnclisten) elif params.get("display") == "sdl": virt_install_cmd += add_sdl(help_text) elif params.get("display") == "nographic": virt_install_cmd += add_nographic(help_text) video_device = params.get("video_device") if video_device: virt_install_cmd += add_video(help_text, video_device) sound_device = params.get("sound_device") if sound_device: virt_install_cmd += add_soundhw(help_text, sound_device) # if none is given a random UUID will be generated by libvirt if params.get("uuid"): virt_install_cmd += add_uuid(help_text, params.get("uuid")) # selectable OS type if params.get("use_os_type") == "yes": virt_install_cmd += add_os_type(help_text, params.get("os_type")) # selectable OS variant if params.get("use_os_variant") == "yes": if not has_os_variant(os_text, params.get("os_variant")): raise exceptions.TestSkipError("Unsupported OS variant: %s.\n" "Supported variants: %s" % (params.get('os_variant'), os_text)) virt_install_cmd += add_os_variant( help_text, params.get("os_variant")) # Add serial console virt_install_cmd += add_serial(help_text) # Add memballoon device memballoon_model = params.get("memballoon_model") if memballoon_model: virt_install_cmd += add_memballoon(help_text, memballoon_model) # If the PCI assignment step went OK, add each one of the PCI assigned # devices to the command line. if self.pci_devices: for pci_id in self.pci_devices: virt_install_cmd += add_pcidevice(help_text, pci_id) for image_name in params.objects("images"): image_params = params.object_params(image_name) base_dir = image_params.get("images_base_dir", data_dir.get_data_dir()) filename = storage.get_image_filename(image_params, base_dir) if image_params.get("use_storage_pool") == "yes": filename = None virt_install_cmd += add_drive(help_text, filename, image_params.get("image_pool"), image_params.get("image_vol"), image_params.get("image_device"), image_params.get("image_bus"), image_params.get("image_perms"), image_params.get("image_size"), image_params.get("drive_sparse"), image_params.get("drive_cache"), image_params.get("image_format")) if image_params.get("boot_drive") == "no": continue if filename: libvirt_controller = image_params.get( "libvirt_controller", None) _drive_format = image_params.get("drive_format") if libvirt_controller: if not check_controller(virt_install_cmd, libvirt_controller): virt_install_cmd += add_controller(libvirt_controller) # this will reset the scsi-hd to scsi as we are adding controller # to mention the drive format if 'scsi' in _drive_format: _drive_format = "scsi" virt_install_cmd += add_drive(help_text, filename, None, None, None, _drive_format, None, image_params.get("image_size"), image_params.get("drive_sparse"), image_params.get("drive_cache"), image_params.get("image_format")) unattended_integrated = (params.get('unattended_delivery_method') != 'integrated') xen_pv = self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv' if unattended_integrated and not xen_pv: for cdrom in params.objects("cdroms"): cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") if params.get("use_libvirt_cdrom_switch") == 'yes': # we don't want to skip the winutils iso if not cdrom == 'winutils': logging.debug( "Using --cdrom instead of --disk for install") logging.debug("Skipping CDROM:%s:%s", cdrom, iso) continue if params.get("medium") == 'cdrom_no_kernel_initrd': if iso == params.get("cdrom_cd1"): logging.debug("Using cdrom or url for install") logging.debug("Skipping CDROM: %s", iso) continue if iso: iso_path = utils_misc.get_path(root_dir, iso) iso_image_pool = image_params.get("iso_image_pool") iso_image_vol = image_params.get("iso_image_vol") virt_install_cmd += add_drive(help_text, iso_path, iso_image_pool, virt_install_cmd, 'cdrom', None, None, None, None, None, None) # We may want to add {floppy_otps} parameter for -fda # {fat:floppy:}/path/. However vvfat is not usually recommended. # Only support to add the main floppy if you want to add the second # one please modify this part. floppy = params.get("floppy_name") if floppy: floppy = utils_misc.get_path(data_dir.get_data_dir(), floppy) virt_install_cmd += add_drive(help_text, floppy, None, None, 'floppy', None, None, None, None, None, None) # setup networking parameters for nic in vm.virtnet: # make_create_command can be called w/o vm.create() nic = vm.add_nic(**dict(nic)) logging.debug("make_create_command() setting up command for" " nic: %s" % str(nic)) virt_install_cmd += add_nic(help_text, nic) if params.get("use_no_reboot") == "yes": virt_install_cmd += " --noreboot" if params.get("use_autostart") == "yes": virt_install_cmd += " --autostart" if params.get("virt_install_debug") == "yes": virt_install_cmd += " --debug" emulator_path = params.get("emulator_path", None) if emulator_path: if not has_sub_option('boot', 'emulator'): logging.warning("emulator option not supported by virt-install") else: virt_install_cmd += " --boot emulator=%s" % emulator_path kernel = params.get("kernel", None) initrd = params.get("initrd", None) kernel_args = params.get("kernel_args", None) if (kernel or initrd) and kernel_args: virt_install_cmd += add_kernel(help_text, virt_install_cmd, kernel, initrd, kernel_args) # bz still open, not fully functional yet if params.get("use_virt_install_wait") == "yes": virt_install_cmd += (" --wait %s" % params.get("virt_install_wait_time")) kernel_params = params.get("kernel_params") if kernel_params: virt_install_cmd += " --extra-args '%s'" % kernel_params virt_install_cmd += " --noautoconsole" sec_type = params.get("sec_type", None) if sec_type: sec_label = params.get("sec_label", None) sec_relabel = params.get("sec_relabel", None) virt_install_cmd += add_security(help_text, sec_type=sec_type, sec_label=sec_label, sec_relabel=sec_relabel) return virt_install_cmd def get_serial_console_filename(self, name): """ Return the serial console filename. :param name: The serial port name. """ return "serial-%s-%s-%s.log" % (name, self.name, utils_misc.generate_random_string(4)) def get_serial_console_filenames(self): """ Return a list of all serial console filenames (as specified in the VM's params). """ return [self.get_serial_console_filename(_) for _ in self.params.objects("serials")] def create_serial_console(self): """ Establish a session with the serial console. The libvirt version uses virsh console to manage it. """ if not self.serial_ports: for serial in self.params.objects("serials"): self.serial_ports.append(serial) if self.serial_console is None: try: cmd = 'virsh' if self.connect_uri: cmd += ' -c %s' % self.connect_uri cmd += (" console %s %s" % (self.name, self.serial_ports[0])) except IndexError: raise virt_vm.VMConfigMissingError(self.name, "serial") output_func = utils_misc.log_line # Because qemu-kvm uses this # Because qemu-kvm hard-codes this output_filename = self.get_serial_console_filename(self.serial_ports[0]) output_params = (output_filename,) prompt = self.params.get("shell_prompt", "[\#\$]") self.serial_console = aexpect.ShellSession(command=cmd, auto_close=False, output_func=output_func, output_params=output_params, prompt=prompt) # Cause serial_console.close() to close open log file self.serial_console.set_log_file(output_filename) self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(), output_filename) def set_root_serial_console(self, device, remove=False): """ Allow or ban root to login through serial console. :param device: device to set root login :param allow_root: do remove operation """ try: session = self.login() except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: securetty_output = session.cmd_output("cat /etc/securetty") devices = str(securetty_output).strip().splitlines() if device not in devices: if not remove: session.sendline("echo %s >> /etc/securetty" % device) else: if remove: session.sendline("sed -i -e /%s/d /etc/securetty" % device) logging.debug("Set root login for %s successfully.", device) return True finally: session.close() logging.debug("Set root login for %s failed.", device) return False def set_kernel_console(self, device, speed=None, remove=False): """ Set kernel parameter for given console device. :param device: a console device :param speed: speed of serial console :param remove: do remove operation """ try: session = self.login() except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: grub = "/boot/grub/grub.conf" if not session.cmd_status("ls /boot/grub2/grub.cfg"): grub = "/boot/grub2/grub.cfg" kernel_params = "console=%s" % device if speed is not None: kernel_params += ",%s" % speed output = session.cmd_output("cat %s" % grub) if not re.search("console=%s" % device, output): if not remove: session.sendline("sed -i -e \'s/vmlinuz-.*/& %s/g\'" " %s; sync" % (kernel_params, grub)) else: if remove: session.sendline("sed -i -e \'s/console=%s\w*\s//g\'" " %s; sync" % (device, grub)) logging.debug("Set kernel params for %s successfully.", device) return True finally: session.close() logging.debug("Set kernel params for %s failed.", device) return False def set_kernel_param(self, parameter, value=None, remove=False): """ Set a specific kernel parameter. :param option: A kernel parameter to set. :param value: The value of the parameter to be set. :param remove: Remove the parameter if True. :return: True if succeed of False if failed. """ if self.is_dead(): logging.error("Can't set kernel param on a dead VM.") return False session = self.wait_for_login() try: grub_path = utils_misc.get_bootloader_cfg(session) if not grub_path: return False grub_text = session.cmd_output("cat %s" % grub_path) kernel_lines = [l.strip() for l in grub_text.splitlines() if re.match(r"\s*(linux|kernel).*", l)] if not kernel_lines: logging.error("Can't find any kernel lines in grub " "file %s:\n%s" % (grub_path, grub_text)) return False for line in kernel_lines: line = line.replace('\t', r'\t') if remove: new_string = "" else: if value is None: new_string = parameter else: new_string = "%s=%s" % (parameter, value) patts = [ "\s+(%s=\S*)(\s|$)" % parameter, "\s+(%s)(\s|$)" % parameter, ] old_string = "" for patt in patts: res = re.search(patt, line) if res: old_string = res.group(1) break if old_string: new_line = line.replace(old_string, new_string) else: new_line = " ".join((line, new_string)) line_patt = "\s*".join(line.split()) logging.debug("Substituting grub line '%s' to '%s'." % (line, new_line)) stat_sed, output = session.cmd_status_output( "sed -i --follow-symlinks -e \"s@%s@%s@g\" %s" % (line_patt, new_line, grub_path)) if stat_sed: logging.error("Failed to substitute grub file:\n%s" % output) return False if remove: logging.debug("Remove kernel params %s successfully.", parameter) else: logging.debug("Set kernel params %s to %s successfully.", parameter, value) return True finally: session.close() def set_boot_kernel(self, index, debug_kernel=False): """ Set default kernel to the second one or to debug kernel :param index: index of kernel to set to default :param debug_kernel: True if set debug kernel to default :return: default kernel """ if self.is_dead(): logging.error("Can't set kernel param on a dead VM.") return False session = self.wait_for_login() try: grub_path = utils_misc.get_bootloader_cfg(session) if not grub_path: return if "grub2" in grub_path: grub = 2 output = session.cmd("cat %s |grep menuentry" % grub_path) kernel_list = re.findall("menuentry '.*?'", output) else: grub = 1 output = session.cmd("cat %s |grep initramfs" % grub_path) kernel_list = re.findall("-.*", output) if index >= len(kernel_list): logging.error("Index out of kernel list") return logging.debug("kernel list of vm:") logging.debug(kernel_list) if debug_kernel: index = -1 logging.info("Setting debug kernel as default") for i in range(len(kernel_list)): if "debug" in kernel_list[i]: index = i break if index == -1: logging.error("No debug kernel in grub file!") return if grub == 1: cmd_set_grub = "sed -i 's/default=./default=%d/' " % index cmd_set_grub += grub_path boot_kernel = kernel_list[index].strip("-") else: boot_kernel = kernel_list[index].split("'")[1].strip("'") cmd_set_grub = 'grub2-set-default %d' % index session.cmd(cmd_set_grub) return boot_kernel finally: session.close() def has_swap(self): """ Check if there is any active swap partition/file. :return : True if swap is on or False otherwise. """ if self.is_dead(): logging.error("Can't check swap on a dead VM.") return False session = self.wait_for_login() try: cmd = "swapon -s" output = session.cmd_output(cmd) if output.strip(): return True return False finally: session.close() def create_swap_partition(self, swap_path=None): """ Make a swap partition and active it. A cleanup_swap() should be call after use to clean up the environment changed. :param swap_path: Swap image path. """ if self.is_dead(): logging.error("Can't create swap on a dead VM.") return False if not swap_path: swap_path = os.path.join(data_dir.get_tmp_dir(), "swap_image") swap_size = self.get_used_mem() process.run("qemu-img create %s %s" % (swap_path, swap_size * 1024)) self.created_swap_path = swap_path device = self.attach_disk(swap_path, extra="--persistent") session = self.wait_for_login() try: dev_path = "/dev/" + device session.cmd_status("mkswap %s" % dev_path) session.cmd_status("swapon %s" % dev_path) self.set_kernel_param("resume", dev_path) return True finally: session.close() logging.error("Failed to create a swap partition.") return False def create_swap_file(self, swapfile='/swapfile'): """ Make a swap file and active it through a session. A cleanup_swap() should be call after use to clean up the environment changed. :param swapfile: Swap file path in VM to be created. """ if self.is_dead(): logging.error("Can't create swap on a dead VM.") return False session = self.wait_for_login() try: # Get memory size. swap_size = self.get_used_mem() / 1024 # Create, change permission, and make a swap file. cmd = ("dd if=/dev/zero of={1} bs=1M count={0} && " "chmod 600 {1} && " "mkswap {1}".format(swap_size, swapfile)) stat_create, output = session.cmd_status_output(cmd) if stat_create: logging.error("Fail to create swap file in guest." "\n%s" % output) return False self.created_swap_file = swapfile # Get physical swap file offset for kernel param resume_offset. cmd = "filefrag -v %s" % swapfile output = session.cmd_output(cmd) # For compatibility of different version of filefrag # Sample output of 'filefrag -v /swapfile' # On newer version: # Filesystem type is: 58465342 # File size of /swapfile is 1048576000 (256000 blocks of 4096 bytes) # ext: logical_offset: physical_offset: length: expected: flags: # 0: 0.. 65519: 395320.. 460839: 65520: # ... # On older version: # Filesystem type is: ef53 # File size of /swapfile is 1048576000 (256000 blocks, blocksize 4096) # ext logical physical expected length flags # 0 0 2465792 32768 # ... offset_line = output.splitlines()[3] if '..' in offset_line: offset = offset_line.split()[3].rstrip('..') else: offset = offset_line.split()[2] # Get physical swap file device for kernel param resume. cmd = "df %s" % swapfile output = session.cmd_output(cmd) # Sample output of 'df /swapfile': # Filesystem 1K-blocks Used Available Use% Mounted on # /dev/vdb 52403200 15513848 36889352 30% / device = output.splitlines()[1].split()[0] # Set kernel parameters. self.set_kernel_param("resume", device) self.set_kernel_param("resume_offset", offset) finally: session.close() self.reboot() session = self.wait_for_login() try: # Activate a swap file. cmd = "swapon %s" % swapfile stat_swapon, output = session.cmd_status_output(cmd) if stat_create: logging.error("Fail to activate swap file in guest." "\n%s" % output) return False finally: session.close() if self.has_swap(): logging.debug("Successfully created swapfile %s." % swapfile) return True else: logging.error("Failed to create swap file.") return False def cleanup_swap(self): """ Cleanup environment changed by create_swap_partition() or create_swap_file(). """ if self.is_dead(): logging.error("Can't cleanup swap on a dead VM.") return False # Remove kernel parameters. self.set_kernel_param("resume", remove=True) self.set_kernel_param("resume_offset", remove=True) # Deactivate swap partition/file. session = self.wait_for_login() try: session.cmd_status("swapoff -a") if "created_swap_file" in dir(self): session.cmd_status("rm -f %s" % self.created_swap_file) del self.created_swap_file finally: session.close() # Cold unplug attached swap disk if self.shutdown(): if "created_swap_device" in dir(self): self.detach_disk( self.created_swap_device, extra="--persistent") del self.created_swap_device if "created_swap_path" in dir(self): os.remove(self.created_swap_path) del self.created_swap_path def set_console_getty(self, device, getty="mgetty", remove=False): """ Set getty for given console device. :param device: a console device :param getty: getty type: agetty, mgetty and so on. :param remove: do remove operation """ try: session = self.login() except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: # Only configurate RHEL5 and below regex = "gettys are handled by" # As of RHEL7 systemd message is displayed regex += "|inittab is no longer used when using systemd" output = session.cmd_output("cat /etc/inittab") if re.search(regex, output): logging.debug("Skip setting inittab for %s", device) return True getty_str = "co:2345:respawn:/sbin/%s %s" % (getty, device) matched_str = "respawn:/sbin/*getty %s" % device if not re.search(matched_str, output): if not remove: session.sendline("echo %s >> /etc/inittab" % getty_str) else: if remove: session.sendline("sed -i -e /%s/d " "/etc/inittab" % matched_str) logging.debug("Set inittab for %s successfully.", device) return True finally: session.close() logging.debug("Set inittab for %s failed.", device) return False def cleanup_serial_console(self): """ Close serial console and associated log file """ if self.serial_console is not None: if self.is_lxc(): self.serial_console.sendline("^]") self.serial_console.close() self.serial_console = None self.serial_console_log = None if hasattr(self, "migration_file"): try: os.unlink(self.migration_file) except OSError: pass def wait_for_login(self, nic_index=0, timeout=None, internal_timeout=None, serial=False, restart_network=False, username=None, password=None): """ Override the wait_for_login method of virt_vm to support other guest in libvirt. If connect_uri is lxc related, we call wait_for_serial_login() directly, without attempting login it via network. Other connect_uri, call virt_vm.wait_for_login(). """ # Set the default value of parameters if user did not use it. if not timeout: timeout = super(VM, self).LOGIN_WAIT_TIMEOUT if not internal_timeout: internal_timeout = super(VM, self).LOGIN_TIMEOUT if self.is_lxc(): self.cleanup_serial_console() self.create_serial_console() return self.wait_for_serial_login(timeout, internal_timeout, restart_network, username, password) return super(VM, self).wait_for_login(nic_index, timeout, internal_timeout, serial, restart_network, username, password) @error_context.context_aware def create(self, name=None, params=None, root_dir=None, timeout=5.0, migration_mode=None, mac_source=None, autoconsole=True): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. :param name: The name of the object :param params: A dict containing VM params :param root_dir: Base directory for relative filenames :param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'tcp', 'unix' or 'exec') :param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' :param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. :raise VMCreateError: If qemu terminates unexpectedly :raise VMKVMInitError: If KVM initialization fails :raise VMHugePageError: If hugepage initialization fails :raise VMImageMissingError: If a CD image is missing :raise VMHashMismatchError: If a CD image hash has doesn't match the expected hash :raise VMBadPATypeError: If an unsupported PCI assignment type is requested :raise VMPAError: If no PCI assignable devices could be assigned """ error_context.context("creating '%s'" % self.name) self.destroy(free_mac_addresses=False) if name is not None: self.name = name if params is not None: self.params = params if root_dir is not None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO images for cdrom in params.objects("cdroms"): if params.get("medium") == "import": break cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") xen_pv = (self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv') iso_is_ks = os.path.basename(iso) == 'ks.iso' if xen_pv and iso_is_ks: continue if iso: iso = utils_misc.get_path(data_dir.get_data_dir(), iso) if not os.path.exists(iso): raise virt_vm.VMImageMissingError(iso) compare = False if cdrom_params.get("skip_hash", "no") == "yes": logging.debug("Skipping hash comparison") elif cdrom_params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = crypto.hash_file( iso, 1048576, algorithm="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "ISO file...") actual_hash = crypto.hash_file(iso, algorithm="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum " "of ISO file...") actual_hash = crypto.hash_file(iso, algorithm="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: raise virt_vm.VMHashMismatchError(actual_hash, expected_hash) # Make sure the following code is not executed by more than one thread # at the same time lockfilename = os.path.join(data_dir.get_tmp_dir(), "libvirt-autotest-vm-create.lock") lockfile = open(lockfilename, "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = params.objects("redirs") host_ports = utils_misc.find_free_ports( 5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = params.object_params(redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Find available PCI devices self.pci_devices = [] for device in params.objects("pci_devices"): self.pci_devices.append(device) # Find available VNC port, if needed if params.get("display") == "vnc": if params.get("vnc_autoport") == "yes": self.vnc_port = None self.vnc_autoport = True else: self.vnc_port = utils_misc.find_free_port(5900, 6100) self.vnc_autoport = False # Find available spice port, if needed if params.get("spice"): self.spice_port = utils_misc.find_free_port(8000, 8100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Generate or copy MAC addresses for all NICs for nic in self.virtnet: nic_params = dict(nic) if mac_source is not None: # Will raise exception if source doesn't # have corresponding nic logging.debug("Copying mac for nic %s from VM %s", nic.nic_name, mac_source.name) nic_params['mac'] = mac_source.get_mac_address( nic.nic_name) # make_create_command() calls vm.add_nic (i.e. on a copy) nic = self.add_nic(**nic_params) logging.debug('VM.create activating nic %s' % nic) self.activate_nic(nic.nic_name) # Make qemu command install_command = self.make_create_command() logging.info("Running libvirt command (reformatted):") for item in install_command.replace(" -", " \n -").splitlines(): logging.info("%s", item) try: process.run(install_command, verbose=True, shell=True) except process.CmdError, details: stderr = details.result.stderr.strip() # This is a common newcomer mistake, be more helpful... if stderr.count('IDE CDROM must use'): testname = params.get('name', "") if testname.count('unattended_install.cdrom'): if not testname.count('http_ks'): e_msg = ("Install command " "failed:\n%s \n\nNote: " "Older versions of " "libvirt won't work " "properly with kickstart " "on cdrom install. " "Try using the " "unattended_install.cdrom.http_ks method " "instead." % details.result) raise exceptions.TestSkipError(e_msg) if stderr.count('failed to launch bridge helper'): if utils_selinux.is_enforcing(): raise exceptions.TestSkipError("SELinux is enabled " "and preventing the " "bridge helper from " "accessing the bridge. " "Consider running as " "root or placing " "SELinux into " "permissive mode.") # some other problem happened, raise normally raise # Wait for the domain to be created utils_misc.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s to start" % self.name)) self.uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip() # Create isa serial ports. self.create_serial_console() finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close() def migrate(self, dest_uri="", option="--live --timeout 60", extra="", ignore_status=False, debug=False, virsh_opt=""): """ Migrate a VM to a remote host. :param dest_uri: Destination libvirt URI :param option: Migration options before <domain> <desturi> :param extra: Migration options after <domain> <desturi> :return: True if command succeeded """ logging.info("Migrating VM %s from %s to %s" % (self.name, self.connect_uri, dest_uri)) result = virsh.migrate(self.name, dest_uri, option, extra, uri=self.connect_uri, ignore_status=ignore_status, debug=debug, virsh_opt=virsh_opt) # Close down serial_console logging process self.cleanup_serial_console() # On successful migration, point to guests new hypervisor. # Since dest_uri could be None, checking it is necessary. if result.exit_status == 0 and dest_uri: self.connect_uri = dest_uri self.create_serial_console() return result def attach_disk(self, source, target=None, prefix="vd", extra="", ignore_status=False, debug=False): """ Attach a disk to VM and return the target device name. :param source: source of disk device :param target: target of disk device, None for automatic assignment. :param prefix: disk device prefix. :param extra: additional arguments to command :return: target device name if succeed, Otherwise None """ # Find the next available target device name. if target is None: disks = self.get_disk_devices() for ch in string.ascii_lowercase: target = prefix + ch if target not in disks: break result = virsh.attach_disk(self.name, source, target, extra, uri=self.connect_uri, ignore_status=ignore_status, debug=debug) if result.exit_status: logging.error("Failed to attach disk %s to VM." "Detail: %s." % (source, result.stderr)) return None return target def detach_disk(self, target, extra="", ignore_status=False, debug=False): """ Detach a disk from VM. :param target: target of disk device need to be detached. :param extra: additional arguments to command """ return virsh.detach_disk(self.name, target, extra, uri=self.connect_uri, ignore_status=ignore_status, debug=debug) def attach_interface(self, option="", ignore_status=False, debug=False): """ Attach a NIC to VM. """ return virsh.attach_interface(self.name, option, uri=self.connect_uri, ignore_status=ignore_status, debug=debug) def detach_interface(self, option="", ignore_status=False, debug=False): """ Detach a NIC from VM. """ return virsh.detach_interface(self.name, option, uri=self.connect_uri, ignore_status=ignore_status, debug=debug) def destroy(self, gracefully=True, free_mac_addresses=True): """ Destroy the VM. If gracefully is True, first attempt to shutdown the VM with a shell command. If that fails, send SIGKILL to the qemu process. :param gracefully: If True, an attempt will be made to end the VM using a shell command before trying to end the qemu process with a 'quit' or a kill signal. :param free_mac_addresses: If vm is undefined with libvirt, also release/reset associated mac address """ try: # Is it already dead? if self.is_alive(): logging.debug("Destroying VM") if self.is_paused(): self.resume() if (not self.is_lxc() and gracefully and self.params.get("shutdown_command")): # Try to destroy with shell command logging.debug("Trying to shutdown VM with shell command") try: session = self.login() except (remote.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: # Send the shutdown command session.sendline( self.params.get("shutdown_command")) logging.debug("Shutdown command sent; waiting for VM " "to go down...") if utils_misc.wait_for(self.is_dead, 60, 1, 1): logging.debug("VM is down") return finally: session.close() # Destroy VM directly, as 'ignore_status=True' by default, so destroy # a shutoff domain is also acceptable here. destroy_opt = '' if gracefully: destroy_opt = '--graceful' virsh.destroy(self.name, destroy_opt, uri=self.connect_uri) finally: self.cleanup_serial_console() if free_mac_addresses: if self.is_persistent(): logging.warning("Requested MAC address release from " "persistent vm %s. Ignoring." % self.name) else: logging.debug("Releasing MAC addresses for vm %s." % self.name) for nic_name in self.virtnet.nic_name_list(): self.virtnet.free_mac_address(nic_name) def remove(self): self.destroy(gracefully=True, free_mac_addresses=False) # If the current machine contains nvram, we have to set --nvram if self.params.get("vir_domain_undefine_nvram") == "yes": options = "--nvram" else: options = None if not self.undefine(options): raise virt_vm.VMRemoveError("VM '%s' undefine error" % self.name) self.destroy(gracefully=False, free_mac_addresses=True) logging.debug("VM '%s' was removed", self.name) def remove_with_storage(self): """ Virsh undefine provides an option named --remove-all-storage, but it only removes the storage which is managed by libvirt. This method undefines vm and removes the all storages related with this vm, no matter storages are managed by libvirt or not. """ blklist = self.get_disk_devices().values() self.remove() for blk in blklist: path = blk['source'] if os.path.exists(path): os.remove(path) def get_uuid(self): """ Return VM's UUID. """ uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip() # only overwrite it if it's not set if self.uuid is None: self.uuid = uuid return self.uuid def get_ifname(self, nic_index=0): raise NotImplementedError def get_virsh_mac_address(self, nic_index=0): """ Get the MAC of this VM domain. :param nic_index: Index of the NIC :raise VMMACAddressMissingError: If no MAC address is defined for the requested NIC """ cmd_result = virsh.dumpxml(self.name, uri=self.connect_uri) if cmd_result.exit_status: raise exceptions.TestFail("dumpxml %s failed.\n" "Detail: %s.\n" % (self.name, cmd_result)) thexml = cmd_result.stdout.strip() xtf = xml_utils.XMLTreeFile(thexml) interfaces = xtf.find('devices').findall('interface') # Range check try: mac = interfaces[nic_index].find('mac').get('address') if mac is not None: return mac except IndexError: pass # Allow other exceptions through # IndexError (range check) or mac is None raise virt_vm.VMMACAddressMissingError(nic_index) def get_pid(self): """ Return the VM's PID. :return: int with PID. If VM is not alive, returns None. """ if self.is_lxc(): pid_file = "/var/run/libvirt/lxc/%s.pid" % self.name elif self.is_qemu(): pid_file = "/var/run/libvirt/qemu/%s.pid" % self.name elif self.is_esx(): pid_file = "/var/run/libvirt/esx/%s.pid" % self.name # TODO: Add more vm driver type else: raise ValueError("Unsupport connect uri: %s." % self.connect_uri) pid = None if os.path.exists(pid_file): try: pid_file_contents = open(pid_file).read() pid = int(pid_file_contents) except IOError: logging.error("Could not read %s to get PID", pid_file) except TypeError: logging.error("PID file %s has invalid contents: '%s'", pid_file, pid_file_contents) else: logging.debug("PID file %s not present", pid_file) return pid def get_vcpus_pid(self): """ Return the vcpu's pid for a given VM. :return: list of PID of vcpus of a VM. """ output = virsh.qemu_monitor_command(self.name, "info cpus", "--hmp", uri=self.connect_uri) vcpu_pids = re.findall(r'thread_id=(\d+)', output.stdout) return vcpu_pids def get_shell_pid(self): """ Return the PID of the parent shell process. :note: This works under the assumption that ``self.process.get_pid()`` returns the PID of the parent shell process. """ return self.process.get_pid() def get_shared_meminfo(self): """ Returns the VM's shared memory information. :return: Shared memory used by VM (MB) """ if self.is_dead(): logging.error("Could not get shared memory info from dead VM.") return None filename = "/proc/%d/statm" % self.get_pid() shm = int(open(filename).read().split()[2]) # statm stores informations in pages, translate it to MB return shm * 4.0 / 1024 def get_cpu_topology_in_cmdline(self): """ Return the VM's cpu topology in VM cmdline. :return: A dirt of cpu topology """ cpu_topology = {} vm_pid = self.get_pid() if vm_pid is None: logging.error("Fail to get VM pid") else: cmdline = open("/proc/%d/cmdline" % vm_pid).read() values = re.findall("sockets=(\d+),cores=(\d+),threads=(\d+)", cmdline)[0] cpu_topology = dict(zip(["sockets", "cores", "threads"], values)) return cpu_topology def get_cpu_topology_in_vm(self): cpu_topology = {} cpu_info = utils_misc.get_cpu_info(self.wait_for_login()) if cpu_info: cpu_topology['sockets'] = cpu_info['Socket(s)'] cpu_topology['cores'] = cpu_info['Core(s) per socket'] cpu_topology['threads'] = cpu_info['Thread(s) per core'] return cpu_topology def activate_nic(self, nic_index_or_name): # TODO: Implement nic hotplugging pass # Just a stub for now def deactivate_nic(self, nic_index_or_name): # TODO: Implement nic hot un-plugging pass # Just a stub for now @error_context.context_aware def reboot(self, session=None, method="shell", nic_index=0, timeout=240, serial=False): """ Reboot the VM and wait for it to come back up by trying to log in until timeout expires. :param session: A shell session object or None. :param method: Reboot method. Can be "shell" (send a shell reboot command). :param nic_index: Index of NIC to access in the VM, when logging in after rebooting. :param timeout: Time to wait for login to succeed (after rebooting). :param serial: Just use to unify api in virt_vm module. :return: A new shell session object. """ error_context.base_context("rebooting '%s'" % self.name, logging.info) error_context.context("before reboot") session = session or self.login(timeout=timeout) error_context.context() if method == "shell": session.sendline(self.params.get("reboot_command")) else: raise virt_vm.VMRebootError("Unknown reboot method: %s" % method) error_context.context("waiting for guest to go down", logging.info) if not utils_misc.wait_for(lambda: not session.is_responsive(timeout=30), 120, 0, 1): raise virt_vm.VMRebootError("Guest refuses to go down") session.close() error_context.context("logging in after reboot", logging.info) return self.wait_for_login(nic_index, timeout=timeout) def screendump(self, filename, debug=False): if debug: logging.debug("Requesting screenshot %s" % filename) return virsh.screenshot(self.name, filename, uri=self.connect_uri) def start(self, autoconsole=True): """ Starts this VM. """ self.uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip() logging.debug("Starting vm '%s'", self.name) result = virsh.start(self.name, uri=self.connect_uri) if not result.exit_status: # Wait for the domain to be created has_started = utils_misc.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s " "to start" % self.name)) if has_started is None: raise virt_vm.VMStartError(self.name, "libvirt domain not " "active after start") self.uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip() # Establish a session with the serial console if autoconsole: self.create_serial_console() else: raise virt_vm.VMStartError(self.name, result.stderr.strip()) # Pull in mac addresses from libvirt guest definition for index, nic in enumerate(self.virtnet): try: mac = self.get_virsh_mac_address(index) if not nic.has_key('mac'): logging.debug("Updating nic %d with mac %s on vm %s" % (index, mac, self.name)) nic.mac = mac elif nic.mac != mac: logging.warning("Requested mac %s doesn't match mac %s " "as defined for vm %s", nic.mac, mac, self.name) # TODO: Checkout/Set nic_model, nettype, netdst also except virt_vm.VMMACAddressMissingError: logging.warning("Nic %d requested by test but not defined for" " vm %s" % (index, self.name)) def wait_for_shutdown(self, count=60): """ Return True on successful domain shutdown. Wait for a domain to shutdown, libvirt does not block on domain shutdown so we need to watch for successful completion. :param name: VM name :param name: Optional timeout value """ timeout = count while count > 0: # check every 5 seconds if count % 5 == 0: if virsh.is_dead(self.name, uri=self.connect_uri): logging.debug("Shutdown took %d seconds", timeout - count) return True count -= 1 time.sleep(1) logging.debug("Waiting for guest to shutdown %d", count) return False def shutdown(self): """ Shuts down this VM. """ try: if self.state() != 'shut off': virsh.shutdown(self.name, uri=self.connect_uri) if self.wait_for_shutdown(): logging.debug("VM %s shut down", self.name) self.cleanup_serial_console() return True else: logging.error("VM %s failed to shut down", self.name) return False except process.CmdError: logging.error("VM %s failed to shut down", self.name) return False def pause(self): try: state = self.state() if state != 'paused': virsh.suspend( self.name, uri=self.connect_uri, ignore_statues=False) return True except Exception: logging.error("VM %s failed to suspend", self.name) return False def resume(self): try: virsh.resume(self.name, ignore_status=False, uri=self.connect_uri) if self.is_alive(): logging.debug("Resumed VM %s", self.name) return True else: return False except process.CmdError, detail: logging.error("Resume VM %s failed:\n%s", self.name, detail) return False def save_to_file(self, path): """ Override BaseVM save_to_file method """ if self.is_dead(): raise virt_vm.VMStatusError( "Cannot save a VM that is %s" % self.state()) logging.debug("Saving VM %s to %s" % (self.name, path)) result = virsh.save(self.name, path, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Save VM to %s failed.\n" "Detail: %s." % (path, result.stderr)) if self.is_alive(): raise virt_vm.VMStatusError("VM not shut off after save") self.cleanup_serial_console() def restore_from_file(self, path): """ Override BaseVM restore_from_file method """ if self.is_alive(): raise virt_vm.VMStatusError( "Can not restore VM that is %s" % self.state()) logging.debug("Restoring VM from %s" % path) result = virsh.restore(path, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Restore VM from %s failed.\n" "Detail: %s." % (path, result.stderr)) if self.is_dead(): raise virt_vm.VMStatusError( "VM should not be %s after restore." % self.state()) self.create_serial_console() def managedsave(self): """ Managed save of VM's state """ if self.is_dead(): raise virt_vm.VMStatusError( "Cannot save a VM that is %s" % self.state()) logging.debug("Managed saving VM %s" % self.name) result = virsh.managedsave(self.name, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Managed save VM failed.\n" "Detail: %s." % result.stderr) if self.is_alive(): raise virt_vm.VMStatusError("VM not shut off after managed save") self.cleanup_serial_console() def pmsuspend(self, target='mem', duration=0): """ Suspend a domain gracefully using power management functions """ if self.is_dead(): raise virt_vm.VMStatusError( "Cannot pmsuspend a VM that is %s" % self.state()) logging.debug("PM suspending VM %s" % self.name) result = virsh.dompmsuspend(self.name, target=target, duration=duration, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("PM suspending VM failed.\n" "Detail: %s." % result.stderr) self.cleanup_serial_console() def pmwakeup(self): """ Wakeup a domain from pmsuspended state """ if self.is_dead(): raise virt_vm.VMStatusError( "Cannot pmwakeup a VM that is %s" % self.state()) logging.debug("PM waking up VM %s" % self.name) result = virsh.dompmwakeup(self.name, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("PM waking up VM failed.\n" "Detail: %s." % result.stderr) self.create_serial_console() def vcpupin(self, vcpu, cpu_list, options=""): """ To pin vcpu to cpu_list """ result = virsh.vcpupin(self.name, vcpu, cpu_list, options, uri=self.connect_uri) if result.exit_status: raise exceptions.TestFail("Virsh vcpupin command failed.\n" "Detail: %s.\n" % result) def dominfo(self): """ Return a dict include vm's information. """ output = virsh.dominfo(self.name, uri=self.connect_uri).stdout.strip() # Key: word before ':' | value: content after ':' (stripped) dominfo_dict = {} for line in output.splitlines(): key = line.split(':')[0].strip() value = line.split(':')[-1].strip() dominfo_dict[key] = value return dominfo_dict def vcpuinfo(self): """ Return a dict's list include vm's vcpu information. """ output = virsh.vcpuinfo(self.name, uri=self.connect_uri).stdout.strip() # Key: word before ':' | value: content after ':' (stripped) vcpuinfo_list = [] vcpuinfo_dict = {} for line in output.splitlines(): key = line.split(':')[0].strip() value = line.split(':')[-1].strip() vcpuinfo_dict[key] = value if key == "CPU Affinity": vcpuinfo_list.append(vcpuinfo_dict) return vcpuinfo_list def get_used_mem(self): """ Get vm's current memory(kilobytes). """ dominfo_dict = self.dominfo() memory = dominfo_dict['Used memory'].split(' ')[0] # strip off ' kb' return int(memory) def get_blk_devices(self): """ Get vm's block devices. Return a dict include all devices detail info. example: {target: {'type': value, 'device': value, 'source': value}} """ domblkdict = {} options = "--details" result = virsh.domblklist(self.name, options, ignore_status=True, uri=self.connect_uri) blklist = result.stdout.strip().splitlines() if result.exit_status != 0: logging.info("Get vm devices failed.") else: blklist = blklist[2:] for line in blklist: linesplit = line.split(None, 4) target = linesplit[2] blk_detail = {'type': linesplit[0], 'device': linesplit[1], 'source': linesplit[3]} domblkdict[target] = blk_detail return domblkdict def get_disk_devices(self): """ Get vm's disk type block devices. """ blk_devices = self.get_blk_devices() disk_devices = {} for target in blk_devices: details = blk_devices[target] if details['device'] == "disk": disk_devices[target] = details return disk_devices def get_first_disk_devices(self): """ Get vm's first disk type block devices. """ disk = {} options = "--details" result = virsh.domblklist(self.name, options, ignore_status=True, uri=self.connect_uri) blklist = result.stdout.strip().splitlines() if result.exit_status != 0: logging.info("Get vm devices failed.") else: blklist = blklist[2:] linesplit = blklist[0].split(None, 4) disk = {'type': linesplit[0], 'device': linesplit[1], 'target': linesplit[2], 'source': linesplit[3]} return disk def get_device_details(self, device_target): device_details = {} result = virsh.domblkinfo(self.name, device_target, uri=self.connect_uri) details = result.stdout.strip().splitlines() if result.exit_status != 0: logging.info("Get vm device details failed.") else: for line in details: attrs = line.split(":") device_details[attrs[0].strip()] = attrs[-1].strip() return device_details def get_device_size(self, device_target): domblkdict = self.get_blk_devices() if device_target not in domblkdict.keys(): return None path = domblkdict[device_target]["source"] size = self.get_device_details(device_target)["Capacity"] return path, size def get_max_mem(self): """ Get vm's maximum memory(kilobytes). """ dominfo_dict = self.dominfo() max_mem = dominfo_dict['Max memory'].split(' ')[0] # strip off 'kb' return int(max_mem) def domjobabort(self): """ Abort job for vm. """ result = virsh.domjobabort(self.name, ignore_status=True) if result.exit_status: logging.debug(result) return False return True def dump(self, path, option=""): """ Dump self to path. :raise: exceptions.TestFail if dump fail. """ cmd_result = virsh.dump(self.name, path=path, option=option, uri=self.connect_uri) if cmd_result.exit_status: raise exceptions.TestFail("Failed to dump %s to %s.\n" "Detail: %s." % (self.name, path, cmd_result)) def get_job_type(self): jobresult = virsh.domjobinfo(self.name, uri=self.connect_uri) if not jobresult.exit_status: for line in jobresult.stdout.splitlines(): key = line.split(':')[0] value = line.split(':')[-1] if key.count("type"): return value.strip() else: logging.error(jobresult) return False def get_pci_devices(self, device_str=None): """ Get PCI devices in vm accroding to given device character. :param device_str: a string to identify device. """ session = self.wait_for_login() if device_str is None: cmd = "lspci -D" else: cmd = "lspci -D | grep %s" % device_str lines = session.cmd_output(cmd) session.close() pci_devices = [] for line in lines.splitlines(): pci_devices.append(line.split()[0]) return pci_devices def get_disks(self, diskname=None): """ Get disks in vm. :param diskname: Specify disk to be listed, used for checking given disk. """ cmd = "lsblk --nodeps -n" if diskname: cmd += " | grep %s" % diskname session = self.wait_for_login() lines = session.cmd_output(cmd) session.close() disks = [] for line in lines.splitlines(): if line.count(" disk "): disks.append("/dev/%s" % line.split()[0]) return disks def get_interfaces(self): """ Get available interfaces in vm. """ cmd = "cat /proc/net/dev" session = self.wait_for_login() lines = session.cmd_output(cmd) session.close() interfaces = [] for line in lines.splitlines(): if len(line.split(':')) != 2: continue interfaces.append(line.split(':')[0].strip()) return interfaces def get_interface_mac(self, interface): """ Get mac address of interface by given name. """ if interface not in self.get_interfaces(): return None cmd = "cat /sys/class/net/%s/address" % interface session = self.wait_for_login() try: mac = session.cmd_output(cmd) except Exception, detail: session.close() logging.error(str(detail)) return None session.close() return mac.strip() def install_package(self, name): """ Install a package on VM. ToDo: Support multiple package manager. :param name: Name of package to be installed """ session = self.wait_for_login() vm_distro = self.get_distro() try: # distro specific support for package manager if vm_distro.lower() == 'ubuntu': query_cmd = "dpkg -l | grep %s" % name cmd = "apt install -y %s" % name update_cmd = "apt upgrade -y %s" % name else: query_cmd = "rpm -q %s" % name cmd = "yum install -y %s" % name update_cmd = "yum update -y %s" % name if session.cmd_status(query_cmd): # Install the package if it does not exists status, output = session.cmd_status_output(cmd, timeout=300) # Just check status is not enough # It's necessary to check if install successfully if status != 0 or session.cmd_status(query_cmd) != 0: raise virt_vm.VMError("Installation of package %s failed:" "\n%s" % (name, output)) else: # Update the package status, output = session.cmd_status_output(update_cmd, timeout=600) if status: raise virt_vm.VMError("Update of package %s failed:\n%s" % (name, output)) finally: session.close() def remove_package(self, name): """ Remove a package from VM. ToDo: Support multiple package manager. :param name: Name of package to be removed """ session = self.wait_for_login() vm_distro = self.get_distro() try: # Remove the package if it exists if vm_distro.lower() == 'ubuntu': cmd = "! (dpkg -l | grep %s | grep ^ii) || apt remove -y %s"\ % (name, name) else: cmd = "! rpm -q %s || rpm -e %s" % (name, name) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: raise virt_vm.VMError("Removal of package %s failed:\n%s" % (name, output)) finally: session.close() def prepare_guest_agent(self, prepare_xml=True, channel=True, start=True): """ Prepare qemu guest agent on the VM. :param prepare_xml: Whether change VM's XML :param channel: Whether add agent channel in VM. Only valid if prepare_xml is True :param start: Whether install and start the qemu-ga service """ if prepare_xml: vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(self.name) # Check if we need to change XML of VM. if channel != bool(vmxml.get_agent_channels()): if self.is_alive(): self.destroy() if channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() if not self.is_alive(): self.start() self.install_package('pm-utils') self.install_package('qemu-guest-agent') session = self.wait_for_login() def _is_ga_running(): return (not session.cmd_status("pgrep qemu-ga")) def _is_ga_finished(): return (session.cmd_status("pgrep qemu-ga") == 1) def _start_ga(): if not _is_ga_running(): cmd = "service qemu-guest-agent start" status, output = session.cmd_status_output(cmd) # Sometimes the binary of the guest agent was corrupted on the # filesystem due to the guest being destroyed and cause service # masked, so need to reinstall agent to fix it if status and "is masked" in output: self.remove_package('qemu-guest-agent') self.install_package('qemu-guest-agent') status, output = session.cmd_status_output(cmd) if status and "unrecognized service" in output: cmd = "service qemu-ga start" status, output = session.cmd_status_output(cmd) if status: raise virt_vm.VMError("Start qemu-guest-agent failed:" "\n%s" % output) def _stop_ga(): if _is_ga_running(): cmd = "service qemu-guest-agent stop" status, output = session.cmd_status_output(cmd) if status and "unrecognized service" in output: cmd = "service qemu-ga stop" status, output = session.cmd_status_output(cmd) if status: raise virt_vm.VMError("Stop qemu-guest-agent failed:" "\n%s" % output) try: # Start/stop qemu-guest-agent if start: _start_ga() else: _stop_ga() # Check qemu-guest-agent status if start: if not utils_misc.wait_for(_is_ga_running, timeout=60): raise virt_vm.VMError("qemu-guest-agent is not running.") else: if not utils_misc.wait_for(_is_ga_finished, timeout=60): raise virt_vm.VMError("qemu-guest-agent is running") finally: session.close() def getenforce(self): """ Set SELinux mode in the VM. :return: SELinux mode [Enforcing|Permissive|Disabled] """ self.install_package('libselinux-utils') session = self.wait_for_login() try: status, output = session.cmd_status_output("getenforce") if status != 0: raise virt_vm.VMError("Get SELinux mode failed:\n%s" % output) return output.strip() finally: session.close() def setenforce(self, mode): """ Set SELinux mode in the VM. :param mode: SELinux mode [Enforcing|Permissive|1|0] """ self.install_package('selinux-policy') self.install_package('selinux-policy-targeted') self.install_package('libselinux-utils') try: if int(mode) == 1: target_mode = 'Enforcing' elif int(mode) == 0: target_mode = 'Permissive' except ValueError: pass session = self.wait_for_login() try: current_mode = self.getenforce() if current_mode == 'Disabled': logging.warning("VM SELinux disabled. Can't set mode.") return elif current_mode != target_mode: cmd = "setenforce %s" % mode status, output = session.cmd_status_output(cmd) if status != 0: raise virt_vm.VMError( "Set SELinux mode failed:\n%s" % output) else: logging.debug("VM SELinux mode don't need change.") finally: session.close()
gpl-2.0
frank10704/DF_GCS_W
MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/distutils/command/upload.py
54
7196
"""distutils.command.upload Implements the Distutils 'upload' subcommand (upload package to PyPI).""" import os import socket import platform from urllib2 import urlopen, Request, HTTPError from base64 import standard_b64encode import urlparse import cStringIO as StringIO from hashlib import md5 from distutils.errors import DistutilsOptionError from distutils.core import PyPIRCCommand from distutils.spawn import spawn from distutils import log class upload(PyPIRCCommand): description = "upload binary package to PyPI" user_options = PyPIRCCommand.user_options + [ ('sign', 's', 'sign files to upload using gpg'), ('identity=', 'i', 'GPG identity used to sign files'), ] boolean_options = PyPIRCCommand.boolean_options + ['sign'] def initialize_options(self): PyPIRCCommand.initialize_options(self) self.username = '' self.password = '' self.show_response = 0 self.sign = False self.identity = None def finalize_options(self): PyPIRCCommand.finalize_options(self) if self.identity and not self.sign: raise DistutilsOptionError( "Must use --sign for --identity to have meaning" ) config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] # getting the password from the distribution # if previously set by the register command if not self.password and self.distribution.password: self.password = self.distribution.password def run(self): if not self.distribution.dist_files: raise DistutilsOptionError("No dist file created in earlier command") for command, pyversion, filename in self.distribution.dist_files: self.upload_file(command, pyversion, filename) def upload_file(self, command, pyversion, filename): # Makes sure the repository URL is compliant schema, netloc, url, params, query, fragments = \ urlparse.urlparse(self.repository) if params or query or fragments: raise AssertionError("Incompatible url %s" % self.repository) if schema not in ('http', 'https'): raise AssertionError("unsupported schema " + schema) # Sign if requested if self.sign: gpg_args = ["gpg", "--detach-sign", "-a", filename] if self.identity: gpg_args[2:2] = ["--local-user", self.identity] spawn(gpg_args, dry_run=self.dry_run) # Fill in the data - send all the meta-data in case we need to # register a new release f = open(filename,'rb') try: content = f.read() finally: f.close() meta = self.distribution.metadata data = { # action ':action': 'file_upload', 'protcol_version': '1', # identify release 'name': meta.get_name(), 'version': meta.get_version(), # file content 'content': (os.path.basename(filename),content), 'filetype': command, 'pyversion': pyversion, 'md5_digest': md5(content).hexdigest(), # additional meta-data 'metadata_version' : '1.0', 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } comment = '' if command == 'bdist_rpm': dist, version, id = platform.dist() if dist: comment = 'built for %s %s' % (dist, version) elif command == 'bdist_dumb': comment = 'built for %s' % platform.platform(terse=1) data['comment'] = comment if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", open(filename+".asc").read()) # set up the authentication auth = "Basic " + standard_b64encode(self.username + ":" + self.password) # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary end_boundary = sep_boundary + '--' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name if not isinstance(value, list): value = [value] for value in value: if isinstance(value, tuple): fn = ';filename="%s"' % value[0] value = value[1] else: fn = "" body.write(sep_boundary) body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write(fn) body.write("\n\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body.write("\n") body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) # build the Request headers = {'Content-type': 'multipart/form-data; boundary=%s' % boundary, 'Content-length': str(len(body)), 'Authorization': auth} request = Request(self.repository, data=body, headers=headers) # send the data try: result = urlopen(request) status = result.getcode() reason = result.msg if self.show_response: msg = '\n'.join(('-' * 75, r.read(), '-' * 75)) self.announce(msg, log.INFO) except socket.error, e: self.announce(str(e), log.ERROR) return except HTTPError, e: status = e.code reason = e.msg if status == 200: self.announce('Server response (%s): %s' % (status, reason), log.INFO) else: self.announce('Upload failed (%s): %s' % (status, reason), log.ERROR)
gpl-3.0
dasbruns/netzob
src/netzob/Common/Models/Vocabulary/UnknownSymbol.py
1
4213
#-*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 01101111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org | #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| File contributors : | #| - Georges Bossert <georges.bossert (a) supelec.fr> | #| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Related third party imports | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Local application imports | #+---------------------------------------------------------------------------+ from netzob.Common.Utils.Decorators import typeCheck from netzob.Common.Models.Vocabulary.Symbol import Symbol from netzob.Common.Models.Vocabulary.Messages.RawMessage import RawMessage from netzob.Common.Models.Vocabulary.Messages.AbstractMessage import AbstractMessage class UnknownSymbol(Symbol): """An unknown symbol is a special type of symbol that is returned to represent a message that cannot be abstracted >>> from netzob.all import * >>> u = UnknownSymbol() >>> print u.name Unknown Symbol >>> msg = RawMessage("hello") >>> u = UnknownSymbol(msg) >>> print u.name Unknown Symbol """ def __init__(self, message=None): self.message = message super(UnknownSymbol, self).__init__(fields=None, name="Unknown Symbol", messages=[self.message]) @property def message(self): """This message represents the unknown symbol :type: class:`RawMessage` """ return self.__message @message.setter @typeCheck(AbstractMessage) def message(self, message): if message is None: message = RawMessage() self.__message = message
gpl-3.0
beni55/fuzzywuzzy
fuzzywuzzy/utils.py
11
1841
from __future__ import unicode_literals import sys from fuzzywuzzy.string_processing import StringProcessor PY3 = sys.version_info[0] == 3 def validate_string(s): try: return len(s) > 0 except TypeError: return False bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit! if PY3: translation_table = dict((ord(c), None) for c in bad_chars) unicode = str def asciionly(s): if PY3: return s.translate(translation_table) else: return s.translate(None, bad_chars) def asciidammit(s): if type(s) is str: return asciionly(s) elif type(s) is unicode: return asciionly(s.encode('ascii', 'ignore')) else: return asciidammit(unicode(s)) def make_type_consistent(s1, s2): """If both objects aren't either both string or unicode instances force them to unicode""" if isinstance(s1, str) and isinstance(s2, str): return s1, s2 elif isinstance(s1, unicode) and isinstance(s2, unicode): return s1, s2 else: return unicode(s1), unicode(s2) def full_process(s, force_ascii=False): """Process string by -- removing all but letters and numbers -- trim whitespace -- force to lower case if force_ascii == True, force convert to ascii""" if s is None: return "" if force_ascii: s = asciidammit(s) # Keep only Letters and Numbers (see Unicode docs). string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s) # Force into lowercase. string_out = StringProcessor.to_lower_case(string_out) # Remove leading and trailing whitespaces. string_out = StringProcessor.strip(string_out) return string_out def intr(n): '''Returns a correctly rounded integer''' return int(round(n))
mit
ep1cman/workload-automation
wlauto/core/workload.py
5
4009
# Copyright 2014-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A workload is the unit of execution. It represents a set of activities are are performed and measured together, as well as the necessary setup and teardown procedures. A single execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more :class:`wlauto.core.result.WorkloadMetric`\ s and/or :class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation. """ from wlauto.core.extension import Extension from wlauto.exceptions import WorkloadError class Workload(Extension): """ This is the base class for the workloads executed by the framework. Each of the methods throwing NotImplementedError *must* be implemented by the derived classes. """ supported_devices = [] supported_platforms = [] summary_metrics = [] requires_network = False def __init__(self, device, **kwargs): """ Creates a new Workload. :param device: the Device on which the workload will be executed. """ super(Workload, self).__init__(**kwargs) if self.supported_devices and device.name not in self.supported_devices: raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name)) if self.supported_platforms and device.platform not in self.supported_platforms: raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform)) self.device = device def init_resources(self, context): """ This method may be used to perform early resource discovery and initialization. This is invoked during the initial loading stage and before the device is ready, so cannot be used for any device-dependent initialization. This method is invoked before the workload instance is validated. """ pass def initialize(self, context): """ This method should be used to perform once-per-run initialization of a workload instance, i.e., unlike ``setup()`` it will not be invoked on each iteration. """ pass def setup(self, context): # pylint: disable=unused-argument """ Perform the setup necessary to run the workload, such as copying the necessary files to the device, configuring the environments, etc. This is also the place to perform any on-device checks prior to attempting to execute the workload. """ if self.requires_network: self.check_network_connected() def run(self, context): """Execute the workload. This is the method that performs the actual "work" of the""" pass def update_result(self, context): """ Update the result within the specified execution context with the metrics form this workload iteration. """ pass def teardown(self, context): """ Perform any final clean up for the Workload. """ pass def finalize(self, context): pass def check_network_connected(self): if not self.device.is_network_connected(): message = 'Workload "{}" requires internet. Device "{}" does not appear to be connected to the internet.' raise WorkloadError(message.format(self.name, self.device.name)) def __str__(self): return '<Workload {}>'.format(self.name)
apache-2.0
cesc-park/CRCN
entity/grid_adder_to_data.py
1
2515
#import entity_grid from entity_grid import * from parsetree import * import svm as disvm import json import pickle import os import re # from easydict import EDict as edict # args = edict() # args.jobs = 1 # args.mem = 10 args={} args['jobs']=1 args['mem']=10 args['parser']="./stanford-parser/stanford-parser.jar" args['models']="./stanford-parser/stanford-parser-3.5.0-models.jar" args['grammar']="./stanford-parser/englishPCFG.caseless.ser.gz" args['threads']=4 args['max_length']=1000 testgrid_path="./browncoherence/bin64/TestGrid" trainer = disvm.RankSVMTrainer(20) jsonfile = open('../data/example_tree.json', 'r') json_data=jsonfile.read() jsondata=json.loads(json_data) json_imgs=jsondata['images'] contents={} totallen=len(json_imgs) sentence_list=[] for i,json_img in enumerate(json_imgs): #pageurl=os.path.basename(json_img['docpath']).encode('ascii','ignore') #concatstring="" #if contents.has_key(pageurl): # concatstring=contents[pageurl] for j,sentence in enumerate(json_img['sentences']): string=re.sub('\.+','.',sentence['raw'].encode('ascii','ignore')).replace('.','.\n') sentence_list.append(string) # trees=get_parsed_trees_a_document(string,args) # json_img['sentences'][j]['entities']=[] # if trees is not None: # grid=get_grids_a_document(testgrid_path,trees) # if grid and grid.strip()!="": # model=new_entity_grid(grid, syntax=True,max_salience=0, history=2) # json_img['sentences'][j]['entities']=model.entities trees_list=get_parsed_trees_multi_documents(sentence_list,args) trees_key_list=[] for key,trees in enumerate(trees_list): if trees is not None: trees_key_list.append({'trees':trees,'key':str(key)}) grids=get_grids_multi_documents(testgrid_path,trees_key_list,3) #jobs is 3 entities_list=[] for grid, trees_and_key in itertools.izip(grids, trees_key_list): if grid and grid.strip()!="": model=new_entity_grid(grid, syntax=True,max_salience=0, history=2) entities_list.append({'entities':model.entities,'key':trees_and_key['key']}) count =0 entity_idx=0 for i,json_img in enumerate(json_imgs): for j,sentence in enumerate(json_img['sentences']): if count==int(entities_list[entity_idx]['key']): json_img['sentences'][j]['entities']=entities_list[entity_idx]['entities'] entity_idx=entity_idx+1 else: json_img['sentences'][j]['entities']=[] count =count+1 json_imgs[i]=json_img jsondata['images']=json_imgs fname = open('../data/example_entities.json', 'w') json.dump(jsondata, fname,ensure_ascii=False) fname.close()
mit
eptmp3/Sick-Beard
lib/enzyme/__init__.py
168
2414
# -*- coding: utf-8 -*- # enzyme - Video metadata parser # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # Copyright 2003-2006 Thomas Schueppel <stain@acm.org> # Copyright 2003-2006 Dirk Meyer <dischi@freevo.org> # # This file is part of enzyme. # # enzyme is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # enzyme is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with enzyme. If not, see <http://www.gnu.org/licenses/>. import mimetypes import os import sys from exceptions import * PARSERS = [('asf', ['video/asf'], ['asf', 'wmv', 'wma']), ('flv', ['video/flv'], ['flv']), ('mkv', ['video/x-matroska', 'application/mkv'], ['mkv', 'mka', 'webm']), ('mp4', ['video/quicktime', 'video/mp4'], ['mov', 'qt', 'mp4', 'mp4a', '3gp', '3gp2', '3g2', 'mk2']), ('mpeg', ['video/mpeg'], ['mpeg', 'mpg', 'mp4', 'ts']), ('ogm', ['application/ogg'], ['ogm', 'ogg', 'ogv']), ('real', ['video/real'], ['rm', 'ra', 'ram']), ('riff', ['video/avi'], ['wav', 'avi']) ] def parse(path): """Parse metadata of the given video :param string path: path to the video file to parse :return: a parser corresponding to the video's mimetype or extension :rtype: :class:`~enzyme.core.AVContainer` """ if not os.path.isfile(path): raise ValueError('Invalid path') extension = os.path.splitext(path)[1][1:] mimetype = mimetypes.guess_type(path)[0] parser_ext = None parser_mime = None for (parser_name, parser_mimetypes, parser_extensions) in PARSERS: if mimetype in parser_mimetypes: parser_mime = parser_name if extension in parser_extensions: parser_ext = parser_name parser = parser_mime or parser_ext if not parser: raise NoParserError() mod = __import__(parser, globals=globals(), locals=locals(), fromlist=[], level=-1) with open(path, 'rb') as f: p = mod.Parser(f) return p
gpl-3.0
GheRivero/ansible
lib/ansible/modules/storage/netapp/na_ontap_snapshot.py
9
11805
#!/usr/bin/python # (c) 2018, NetApp, Inc # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: na_ontap_snapshot short_description: Manage NetApp Sanpshots extends_documentation_fragment: - netapp.na_ontap version_added: '2.6' author: - Chris Archibald (carchi@netapp.com), Kevin Hutton (khutton@netapp.com) description: - Create/Modify/Delete Ontap snapshots options: state: description: - If you want to create/modify a snapshot, or delete it. choices: ['present', 'absent'] default: present snapshot: description: Name of the snapshot to be managed. The maximum string length is 256 characters. required: true volume: description: - Name of the volume on which the snapshot is to be created. required: true async_bool: description: - If true, the snapshot is to be created asynchronously. type: bool comment: description: A human readable comment attached with the snapshot. The size of the comment can be at most 255 characters. snapmirror_label: description: A human readable SnapMirror Label attached with the snapshot. Size of the label can be at most 31 characters. ignore_owners: description: - if this field is true, snapshot will be deleted even if some other processes are accessing it. type: bool snapshot_instance_uuid: description: - The 128 bit unique snapshot identifier expressed in the form of UUID. vserver: description: - The Vserver name new_comment: description: A human readable comment attached with the snapshot. The size of the comment can be at most 255 characters. This will replace the existing comment ''' EXAMPLES = """ - name: create SnapShot tags: - create na_ontap_snapshot: state=present snapshot={{ snapshot name }} volume={{ vol name }} comment="i am a comment" vserver={{ vserver name }} username={{ netapp username }} password={{ netapp password }} hostname={{ netapp hostname }} - name: delete SnapShot tags: - delete na_ontap_snapshot: state=absent snapshot={{ snapshot name }} volume={{ vol name }} vserver={{ vserver name }} username={{ netapp username }} password={{ netapp password }} hostname={{ netapp hostname }} - name: modify SnapShot tags: - modify na_ontap_snapshot: state=present snapshot={{ snapshot name }} new_comment="New comments are great" volume={{ vol name }} vserver={{ vserver name }} username={{ netapp username }} password={{ netapp password }} hostname={{ netapp hostname }} """ RETURN = """ """ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() class NetAppOntapSnapshot(object): """ Creates, modifies, and deletes a Snapshot """ def __init__(self): self.argument_spec = netapp_utils.na_ontap_host_argument_spec() self.argument_spec.update(dict( state=dict(required=False, choices=[ 'present', 'absent'], default='present'), snapshot=dict(required=True, type="str"), volume=dict(required=True, type="str"), async_bool=dict(required=False, type="bool", default=False), comment=dict(required=False, type="str"), snapmirror_label=dict(required=False, type="str"), ignore_owners=dict(required=False, type="bool", default=False), snapshot_instance_uuid=dict(required=False, type="str"), vserver=dict(required=True, type="str"), new_comment=dict(required=False, type="str"), )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) parameters = self.module.params # set up state variables # These are the required variables self.state = parameters['state'] self.snapshot = parameters['snapshot'] self.vserver = parameters['vserver'] # these are the optional variables for creating a snapshot self.volume = parameters['volume'] self.async_bool = parameters['async_bool'] self.comment = parameters['comment'] self.snapmirror_label = parameters['snapmirror_label'] # these are the optional variables for deleting a snapshot\ self.ignore_owners = parameters['ignore_owners'] self.snapshot_instance_uuid = parameters['snapshot_instance_uuid'] # These are the optional for Modify. # You can NOT change a snapcenter name self.new_comment = parameters['new_comment'] if HAS_NETAPP_LIB is False: self.module.fail_json( msg="the python NetApp-Lib module is required") else: self.server = netapp_utils.setup_na_ontap_zapi( module=self.module, vserver=self.vserver) return def create_snapshot(self): """ Creates a new snapshot """ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create") # set up required variables to create a snapshot snapshot_obj.add_new_child("snapshot", self.snapshot) snapshot_obj.add_new_child("volume", self.volume) # Set up optional variables to create a snapshot if self.async_bool: snapshot_obj.add_new_child("async", self.async_bool) if self.comment: snapshot_obj.add_new_child("comment", self.comment) if self.snapmirror_label: snapshot_obj.add_new_child( "snapmirror-label", self.snapmirror_label) try: self.server.invoke_successfully(snapshot_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error creating snapshot %s: %s' % (self.snapshot, to_native(error)), exception=traceback.format_exc()) def delete_snapshot(self): """ Deletes an existing snapshot """ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete") # Set up required variables to delete a snapshot snapshot_obj.add_new_child("snapshot", self.snapshot) snapshot_obj.add_new_child("volume", self.volume) # set up optional variables to delete a snapshot if self.ignore_owners: snapshot_obj.add_new_child("ignore-owners", self.ignore_owners) if self.snapshot_instance_uuid: snapshot_obj.add_new_child( "snapshot-instance-uuid", self.snapshot_instance_uuid) try: self.server.invoke_successfully(snapshot_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error deleting snapshot %s: %s' % (self.snapshot, to_native(error)), exception=traceback.format_exc()) def modify_snapshot(self): """ Modify an existing snapshot :return: """ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter") # Create query object, this is the existing object query = netapp_utils.zapi.NaElement("query") snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") snapshot_info_obj.add_new_child("name", self.snapshot) query.add_child_elem(snapshot_info_obj) snapshot_obj.add_child_elem(query) # this is what we want to modify in the snapshot object attributes = netapp_utils.zapi.NaElement("attributes") snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") snapshot_info_obj.add_new_child("name", self.snapshot) snapshot_info_obj.add_new_child("comment", self.new_comment) attributes.add_child_elem(snapshot_info_obj) snapshot_obj.add_child_elem(attributes) try: self.server.invoke_successfully(snapshot_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error modifying snapshot %s: %s' % (self.snapshot, to_native(error)), exception=traceback.format_exc()) def does_snapshot_exist(self): """ Checks to see if a snapshot exists or not :return: Return True if a snapshot exists, false if it dosn't """ snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter") desired_attr = netapp_utils.zapi.NaElement("desired-attributes") snapshot_info = netapp_utils.zapi.NaElement('snapshot-info') comment = netapp_utils.zapi.NaElement('comment') # add more desired attributes that are allowed to be modified snapshot_info.add_child_elem(comment) desired_attr.add_child_elem(snapshot_info) snapshot_obj.add_child_elem(desired_attr) # compose query query = netapp_utils.zapi.NaElement("query") snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") snapshot_info_obj.add_new_child("name", self.snapshot) snapshot_info_obj.add_new_child("volume", self.volume) query.add_child_elem(snapshot_info_obj) snapshot_obj.add_child_elem(query) result = self.server.invoke_successfully(snapshot_obj, True) return_value = None # TODO: Snapshot with the same name will mess this up, # need to fix that later if result.get_child_by_name('num-records') and \ int(result.get_child_content('num-records')) == 1: attributes_list = result.get_child_by_name('attributes-list') snap_info = attributes_list.get_child_by_name('snapshot-info') return_value = {'comment': snap_info.get_child_content('comment')} return return_value def apply(self): """ Check to see which play we should run """ changed = False comment_changed = False netapp_utils.ems_log_event("na_ontap_snapshot", self.server) existing_snapshot = self.does_snapshot_exist() if existing_snapshot is not None: if self.state == 'absent': changed = True elif self.state == 'present' and self.new_comment: if existing_snapshot['comment'] != self.new_comment: comment_changed = True changed = True else: if self.state == 'present': changed = True if changed: if self.module.check_mode: pass else: if self.state == 'present': if not existing_snapshot: self.create_snapshot() elif comment_changed: self.modify_snapshot() elif self.state == 'absent': if existing_snapshot: self.delete_snapshot() self.module.exit_json(changed=changed) def main(): """ Creates, modifies, and deletes a Snapshot """ obj = NetAppOntapSnapshot() obj.apply() if __name__ == '__main__': main()
gpl-3.0
fossoult/odoo
openerp/modules/loading.py
59
21202
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Modules (also called addons) management. """ import itertools import logging import os import sys import threading import time import openerp import openerp.modules.db import openerp.modules.graph import openerp.modules.migration import openerp.modules.registry import openerp.osv as osv import openerp.tools as tools from openerp import SUPERUSER_ID from openerp.tools.translate import _ from openerp.modules.module import initialize_sys_path, \ load_openerp_module, init_module_models, adapt_version from module import runs_post_install _logger = logging.getLogger(__name__) _test_logger = logging.getLogger('openerp.tests') def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ def load_test(module_name, idref, mode): cr.commit() try: _load_data(cr, module_name, idref, mode, 'test') return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', module_name) return False finally: if tools.config.options['test_commit']: cr.commit() else: cr.rollback() # avoid keeping stale xml_id, etc. in cache openerp.modules.registry.RegistryManager.clear_caches(cr.dbname) def _get_files_of_kind(kind): if kind == 'demo': kind = ['demo_xml', 'demo'] elif kind == 'data': kind = ['init_xml', 'update_xml', 'data'] if isinstance(kind, str): kind = [kind] files = [] for k in kind: for f in package.data[k]: files.append(f) if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')): # init_xml, update_xml and demo_xml are deprecated except # for the case of init_xml with yaml, csv and sql files as # we can't specify noupdate for those file. correct_key = 'demo' if k.count('demo') else 'data' _logger.warning( "module %s: key '%s' is deprecated in favor of '%s' for file '%s'.", package.name, k, correct_key, f ) return files def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ try: if kind in ('demo', 'test'): threading.currentThread().testing = True for filename in _get_files_of_kind(kind): _logger.info("loading %s/%s", module_name, filename) noupdate = False if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')): noupdate = True tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report) finally: if kind in ('demo', 'test'): threading.currentThread().testing = False processed_modules = [] loaded_modules = [] registry = openerp.registry(cr.dbname) migrations = openerp.modules.migration.MigrationManager(cr, graph) _logger.info('loading %d modules...', len(graph)) registry.clear_manual_fields() # register, instantiate and initialize models for each modules t0 = time.time() t0_sql = openerp.sql_db.sql_counter for index, package in enumerate(graph): module_name = package.name module_id = package.id if skip_modules and module_name in skip_modules: continue migrations.migrate_module(package, 'pre') load_openerp_module(package.name) new_install = package.installed_version is None if new_install: py_module = sys.modules['openerp.addons.%s' % (module_name,)] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) models = registry.load(cr, package) loaded_modules.append(package.name) if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'): registry.setup_models(cr, partial=True) init_module_models(cr, package.name, models) idref = {} mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'): # Can't put this line out of the loop: ir.module.module will be # registered by init_module_models() above. modobj = registry['ir.module.module'] if perform_checks: modobj.check(cr, SUPERUSER_ID, [module_id]) if package.state=='to upgrade': # upgrading the module information modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data)) _load_data(cr, module_name, idref, mode, kind='data') has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed') if has_demo: _load_data(cr, module_name, idref, mode, kind='demo') cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id)) modobj.invalidate_cache(cr, SUPERUSER_ID, ['demo'], [module_id]) migrations.migrate_module(package, 'post') if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) registry._init_modules.add(package.name) # validate all the views at a whole registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name) if has_demo: # launch tests only in demo mode, allowing tests to use demo data. if tools.config.options['test_enable']: # Yamel test report.record_result(load_test(module_name, idref, mode)) # Python tests ir_http = registry['ir.http'] if hasattr(ir_http, '_routing_map'): # Force routing map to be rebuilt between each module test suite del(ir_http._routing_map) report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname)) processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver}) # Update translations for all installed languages modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]}) package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) registry._init_modules.add(package.name) cr.commit() _logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, openerp.sql_db.sql_counter - t0_sql) registry.clear_manual_fields() cr.commit() return loaded_modules, processed_modules def _check_module_names(cr, module_names): mod_names = set(module_names) if 'base' in mod_names: # ignore dummy 'all' module if 'all' in mod_names: mod_names.remove('all') if mod_names: cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),)) if cr.dictfetchone()['count'] != len(mod_names): # find out what module name(s) are incorrect: cr.execute("SELECT name FROM ir_module_module") incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()]) _logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names)) def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks): """Loads modules marked with ``states``, adding them to ``graph`` and ``loaded_modules`` and returns a list of installed/upgraded modules.""" processed_modules = [] while True: cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),)) module_list = [name for (name,) in cr.fetchall() if name not in graph] if not module_list: break graph.add_modules(cr, module_list, force) _logger.debug('Updating graph with %d more modules', len(module_list)) loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks) processed_modules.extend(processed) loaded_modules.extend(loaded) if not processed: break return processed_modules def load_modules(db, force_demo=False, status=None, update_module=False): initialize_sys_path() force = [] if force_demo: force.append('demo') cr = db.cursor() try: if not openerp.modules.db.is_initialized(cr): _logger.info("init db") openerp.modules.db.initialize(cr) update_module = True # process auto-installed modules tools.config["init"]["all"] = 1 tools.config['update']['all'] = 1 if not tools.config['without_demo']: tools.config["demo"]['all'] = 1 # This is a brand new registry, just created in # openerp.modules.registry.RegistryManager.new(). registry = openerp.registry(cr.dbname) if 'base' in tools.config['update'] or 'all' in tools.config['update']: cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed')) # STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps) graph = openerp.modules.graph.Graph() graph.add_module(cr, 'base', force) if not graph: _logger.critical('module base cannot be loaded! (hint: verify addons-path)') raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)')) # processed_modules: for cleanup step after install # loaded_modules: to avoid double loading report = registry._assertion_report loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report) if tools.config['load_language'] or update_module: # some base models are used below, so make sure they are set up registry.setup_models(cr, partial=True) if tools.config['load_language']: for lang in tools.config['load_language'].split(','): tools.load_language(cr, lang) # STEP 2: Mark other modules to be loaded/updated if update_module: modobj = registry['ir.module.module'] if ('base' in tools.config['init']) or ('base' in tools.config['update']): _logger.info('updating modules list') modobj.update_list(cr, SUPERUSER_ID) _check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys())) mods = [k for k in tools.config['init'] if tools.config['init'][k]] if mods: ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)]) if ids: modobj.button_install(cr, SUPERUSER_ID, ids) mods = [k for k in tools.config['update'] if tools.config['update'][k]] if mods: ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)]) if ids: modobj.button_upgrade(cr, SUPERUSER_ID, ids) cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base')) modobj.invalidate_cache(cr, SUPERUSER_ID, ['state']) # STEP 3: Load marked modules (skipping base which was done in STEP 1) # IMPORTANT: this is done in two parts, first loading all installed or # partially installed modules (i.e. installed/to upgrade), to # offer a consistent system to the second part: installing # newly selected modules. # We include the modules 'to remove' in the first step, because # they are part of the "currently installed" modules. They will # be dropped in STEP 6 later, before restarting the loading # process. # IMPORTANT 2: We have to loop here until all relevant modules have been # processed, because in some rare cases the dependencies have # changed, and modules that depend on an uninstalled module # will not be processed on the first pass. # It's especially useful for migrations. previously_processed = -1 while previously_processed < len(processed_modules): previously_processed = len(processed_modules) processed_modules += load_marked_modules(cr, graph, ['installed', 'to upgrade', 'to remove'], force, status, report, loaded_modules, update_module) if update_module: processed_modules += load_marked_modules(cr, graph, ['to install'], force, status, report, loaded_modules, update_module) registry.setup_models(cr) # STEP 4: Finish and cleanup installations if processed_modules: cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""") for (model, name) in cr.fetchall(): if model in registry and not registry[model].is_transient() and not isinstance(registry[model], openerp.osv.orm.AbstractModel): _logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0', model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_')) # Temporary warning while we remove access rights on osv_memory objects, as they have # been replaced by owner-only access rights cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""") for (model, name) in cr.fetchall(): if model in registry and registry[model].is_transient(): _logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name) cr.execute("SELECT model from ir_model") for (model,) in cr.fetchall(): if model in registry: registry[model]._check_removed_columns(cr, log=True) else: _logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model) # Cleanup orphan records registry['ir.model.data']._process_end(cr, SUPERUSER_ID, processed_modules) for kind in ('init', 'demo', 'update'): tools.config[kind] = {} cr.commit() # STEP 5: Cleanup menus # Remove menu items that are not referenced by any of other # (child) menu item, ir_values, or ir_model_data. # TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children if update_module: while True: cr.execute('''delete from ir_ui_menu where (id not IN (select parent_id from ir_ui_menu where parent_id is not null)) and (id not IN (select res_id from ir_values where model='ir.ui.menu')) and (id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''') cr.commit() if not cr.rowcount: break else: _logger.info('removed %d unused menus', cr.rowcount) # STEP 6: Uninstall modules to remove if update_module: # Remove records referenced from ir_model_data for modules to be # removed (and removed the references from ir_model_data). cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove',)) modules_to_remove = dict(cr.fetchall()) if modules_to_remove: pkgs = reversed([p for p in graph if p.name in modules_to_remove]) for pkg in pkgs: uninstall_hook = pkg.info.get('uninstall_hook') if uninstall_hook: py_module = sys.modules['openerp.addons.%s' % (pkg.name,)] getattr(py_module, uninstall_hook)(cr, registry) registry['ir.module.module'].module_uninstall(cr, SUPERUSER_ID, modules_to_remove.values()) # Recursive reload, should only happen once, because there should be no # modules to remove next time cr.commit() _logger.info('Reloading registry once more after uninstalling modules') openerp.api.Environment.reset() return openerp.modules.registry.RegistryManager.new(cr.dbname, force_demo, status, update_module) # STEP 7: verify custom views on every model if update_module: Views = registry['ir.ui.view'] custom_view_test = True for model in registry.models.keys(): if not Views._validate_custom_views(cr, SUPERUSER_ID, model): custom_view_test = False _logger.error('invalid custom view(s) for model %s', model) report.record_result(custom_view_test) if report.failures: _logger.error('At least one test failed when loading the modules.') else: _logger.info('Modules loaded.') # STEP 8: call _register_hook on every model for model in registry.models.values(): model._register_hook(cr) # STEP 9: Run the post-install tests cr.commit() t0 = time.time() t0_sql = openerp.sql_db.sql_counter if openerp.tools.config['test_enable']: cr.execute("SELECT name FROM ir_module_module WHERE state='installed'") for module_name in cr.fetchall(): report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install)) _logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, openerp.sql_db.sql_counter - t0_sql) finally: cr.close() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
bitifirefly/edx-platform
common/djangoapps/third_party_auth/tests/utils.py
112
3037
"""Common utility for testing third party oauth2 features.""" import json import httpretty from provider.constants import PUBLIC from provider.oauth2.models import Client from social.apps.django_app.default.models import UserSocialAuth from student.tests.factories import UserFactory from .testutil import ThirdPartyAuthTestMixin @httpretty.activate class ThirdPartyOAuthTestMixin(ThirdPartyAuthTestMixin): """ Mixin with tests for third party oauth views. A TestCase that includes this must define the following: BACKEND: The name of the backend from python-social-auth USER_URL: The URL of the endpoint that the backend retrieves user data from UID_FIELD: The field in the user data that the backend uses as the user id """ def setUp(self, create_user=True): super(ThirdPartyOAuthTestMixin, self).setUp() self.social_uid = "test_social_uid" self.access_token = "test_access_token" self.client_id = "test_client_id" self.oauth_client = Client.objects.create( client_id=self.client_id, client_type=PUBLIC ) if create_user: self.user = UserFactory() UserSocialAuth.objects.create(user=self.user, provider=self.BACKEND, uid=self.social_uid) if self.BACKEND == 'google-oauth2': self.configure_google_provider(enabled=True) elif self.BACKEND == 'facebook': self.configure_facebook_provider(enabled=True) def _setup_provider_response(self, success=False, email=''): """ Register a mock response for the third party user information endpoint; success indicates whether the response status code should be 200 or 400 """ if success: status = 200 response = {self.UID_FIELD: self.social_uid} if email: response.update({'email': email}) body = json.dumps(response) else: status = 400 body = json.dumps({}) self._setup_provider_response_with_body(status, body) def _setup_provider_response_with_body(self, status, body): """ Register a mock response for the third party user information endpoint with given status and body. """ httpretty.register_uri( httpretty.GET, self.USER_URL, body=body, status=status, content_type="application/json" ) class ThirdPartyOAuthTestMixinFacebook(object): """Tests oauth with the Facebook backend""" BACKEND = "facebook" USER_URL = "https://graph.facebook.com/v2.3/me" # In facebook responses, the "id" field is used as the user's identifier UID_FIELD = "id" class ThirdPartyOAuthTestMixinGoogle(object): """Tests oauth with the Google backend""" BACKEND = "google-oauth2" USER_URL = "https://www.googleapis.com/plus/v1/people/me" # In google-oauth2 responses, the "email" field is used as the user's identifier UID_FIELD = "email"
agpl-3.0
HiSPARC/station-software
user/python/Lib/test/test_rfc822.py
23
9461
import unittest from test import test_support rfc822 = test_support.import_module("rfc822", deprecated=True) try: from cStringIO import StringIO except ImportError: from StringIO import StringIO class MessageTestCase(unittest.TestCase): def create_message(self, msg): return rfc822.Message(StringIO(msg)) def test_get(self): msg = self.create_message( 'To: "last, first" <userid@foo.net>\n\ntest\n') self.assertTrue(msg.get("to") == '"last, first" <userid@foo.net>') self.assertTrue(msg.get("TO") == '"last, first" <userid@foo.net>') self.assertTrue(msg.get("No-Such-Header") is None) self.assertTrue(msg.get("No-Such-Header", "No-Such-Value") == "No-Such-Value") def test_setdefault(self): msg = self.create_message( 'To: "last, first" <userid@foo.net>\n\ntest\n') self.assertTrue(not msg.has_key("New-Header")) self.assertTrue(msg.setdefault("New-Header", "New-Value") == "New-Value") self.assertTrue(msg.setdefault("New-Header", "Different-Value") == "New-Value") self.assertTrue(msg["new-header"] == "New-Value") self.assertTrue(msg.setdefault("Another-Header") == "") self.assertTrue(msg["another-header"] == "") def check(self, msg, results): """Check addresses and the date.""" m = self.create_message(msg) i = 0 for n, a in m.getaddrlist('to') + m.getaddrlist('cc'): try: mn, ma = results[i][0], results[i][1] except IndexError: print 'extra parsed address:', repr(n), repr(a) continue i = i + 1 self.assertEqual(mn, n, "Un-expected name: %r != %r" % (mn, n)) self.assertEqual(ma, a, "Un-expected address: %r != %r" % (ma, a)) if mn == n and ma == a: pass else: print 'not found:', repr(n), repr(a) out = m.getdate('date') if out: self.assertEqual(out, (1999, 1, 13, 23, 57, 35, 0, 1, 0), "date conversion failed") # Note: all test cases must have the same date (in various formats), # or no date! def test_basic(self): self.check( 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' 'From: Guido van Rossum <guido@CNRI.Reston.VA.US>\n' 'To: "Guido van\n' '\t : Rossum" <guido@python.org>\n' 'Subject: test2\n' '\n' 'test2\n', [('Guido van\n\t : Rossum', 'guido@python.org')]) self.check( 'From: Barry <bwarsaw@python.org\n' 'To: guido@python.org (Guido: the Barbarian)\n' 'Subject: nonsense\n' 'Date: Wednesday, January 13 1999 23:57:35 -0500\n' '\n' 'test', [('Guido: the Barbarian', 'guido@python.org')]) self.check( 'From: Barry <bwarsaw@python.org\n' 'To: guido@python.org (Guido: the Barbarian)\n' 'Cc: "Guido: the Madman" <guido@python.org>\n' 'Date: 13-Jan-1999 23:57:35 EST\n' '\n' 'test', [('Guido: the Barbarian', 'guido@python.org'), ('Guido: the Madman', 'guido@python.org') ]) self.check( 'To: "The monster with\n' ' the very long name: Guido" <guido@python.org>\n' 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' '\n' 'test', [('The monster with\n the very long name: Guido', 'guido@python.org')]) self.check( 'To: "Amit J. Patel" <amitp@Theory.Stanford.EDU>\n' 'CC: Mike Fletcher <mfletch@vrtelecom.com>,\n' ' "\'string-sig@python.org\'" <string-sig@python.org>\n' 'Cc: fooz@bat.com, bart@toof.com\n' 'Cc: goit@lip.com\n' 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' '\n' 'test', [('Amit J. Patel', 'amitp@Theory.Stanford.EDU'), ('Mike Fletcher', 'mfletch@vrtelecom.com'), ("'string-sig@python.org'", 'string-sig@python.org'), ('', 'fooz@bat.com'), ('', 'bart@toof.com'), ('', 'goit@lip.com'), ]) self.check( 'To: Some One <someone@dom.ain>\n' 'From: Anudder Persin <subuddy.else@dom.ain>\n' 'Date:\n' '\n' 'test', [('Some One', 'someone@dom.ain')]) self.check( 'To: person@dom.ain (User J. Person)\n\n', [('User J. Person', 'person@dom.ain')]) def test_doublecomment(self): # The RFC allows comments within comments in an email addr self.check( 'To: person@dom.ain ((User J. Person)), John Doe <foo@bar.com>\n\n', [('User J. Person', 'person@dom.ain'), ('John Doe', 'foo@bar.com')]) def test_twisted(self): # This one is just twisted. I don't know what the proper # result should be, but it shouldn't be to infloop, which is # what used to happen! self.check( 'To: <[smtp:dd47@mail.xxx.edu]_at_hmhq@hdq-mdm1-imgout.companay.com>\n' 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' '\n' 'test', [('', ''), ('', 'dd47@mail.xxx.edu'), ('', '_at_hmhq@hdq-mdm1-imgout.companay.com'), ]) def test_commas_in_full_name(self): # This exercises the old commas-in-a-full-name bug, which # should be doing the right thing in recent versions of the # module. self.check( 'To: "last, first" <userid@foo.net>\n' '\n' 'test', [('last, first', 'userid@foo.net')]) def test_quoted_name(self): self.check( 'To: (Comment stuff) "Quoted name"@somewhere.com\n' '\n' 'test', [('Comment stuff', '"Quoted name"@somewhere.com')]) def test_bogus_to_header(self): self.check( 'To: :\n' 'Cc: goit@lip.com\n' 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' '\n' 'test', [('', 'goit@lip.com')]) def test_addr_ipquad(self): self.check( 'To: guido@[132.151.1.21]\n' '\n' 'foo', [('', 'guido@[132.151.1.21]')]) def test_iter(self): m = rfc822.Message(StringIO( 'Date: Wed, 13 Jan 1999 23:57:35 -0500\n' 'From: Guido van Rossum <guido@CNRI.Reston.VA.US>\n' 'To: "Guido van\n' '\t : Rossum" <guido@python.org>\n' 'Subject: test2\n' '\n' 'test2\n' )) self.assertEqual(sorted(m), ['date', 'from', 'subject', 'to']) def test_rfc2822_phrases(self): # RFC 2822 (the update to RFC 822) specifies that dots in phrases are # obsolete syntax, which conforming programs MUST recognize but NEVER # generate (see $4.1 Miscellaneous obsolete tokens). This is a # departure from RFC 822 which did not allow dots in non-quoted # phrases. self.check('To: User J. Person <person@dom.ain>\n\n', [('User J. Person', 'person@dom.ain')]) # This takes too long to add to the test suite ## def test_an_excrutiatingly_long_address_field(self): ## OBSCENELY_LONG_HEADER_MULTIPLIER = 10000 ## oneaddr = ('Person' * 10) + '@' + ('.'.join(['dom']*10)) + '.com' ## addr = ', '.join([oneaddr] * OBSCENELY_LONG_HEADER_MULTIPLIER) ## lst = rfc822.AddrlistClass(addr).getaddrlist() ## self.assertEqual(len(lst), OBSCENELY_LONG_HEADER_MULTIPLIER) def test_2getaddrlist(self): eq = self.assertEqual msg = self.create_message("""\ To: aperson@dom.ain Cc: bperson@dom.ain Cc: cperson@dom.ain Cc: dperson@dom.ain A test message. """) ccs = [('', a) for a in ['bperson@dom.ain', 'cperson@dom.ain', 'dperson@dom.ain']] addrs = msg.getaddrlist('cc') addrs.sort() eq(addrs, ccs) # Try again, this one used to fail addrs = msg.getaddrlist('cc') addrs.sort() eq(addrs, ccs) def test_parseaddr(self): eq = self.assertEqual eq(rfc822.parseaddr('<>'), ('', '')) eq(rfc822.parseaddr('aperson@dom.ain'), ('', 'aperson@dom.ain')) eq(rfc822.parseaddr('bperson@dom.ain (Bea A. Person)'), ('Bea A. Person', 'bperson@dom.ain')) eq(rfc822.parseaddr('Cynthia Person <cperson@dom.ain>'), ('Cynthia Person', 'cperson@dom.ain')) def test_quote_unquote(self): eq = self.assertEqual eq(rfc822.quote('foo\\wacky"name'), 'foo\\\\wacky\\"name') eq(rfc822.unquote('"foo\\\\wacky\\"name"'), 'foo\\wacky"name') def test_invalid_headers(self): eq = self.assertEqual msg = self.create_message("First: val\n: otherval\nSecond: val2\n") eq(msg.getheader('First'), 'val') eq(msg.getheader('Second'), 'val2') def test_main(): test_support.run_unittest(MessageTestCase) if __name__ == "__main__": test_main()
gpl-3.0
Carmezim/tensorflow
tensorflow/contrib/learn/python/learn/estimators/estimator.py
5
55283
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import copy import os import tempfile import numpy as np import six from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers from tensorflow.contrib import metrics as metrics_lib from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_args from tensorflow.contrib.framework import list_variables from tensorflow.contrib.framework import load_variable from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import monitors as monitor_lib from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import metric_key from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import tensor_signature from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.contrib.learn.python.learn.learn_io import data_feeder from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils from tensorflow.contrib.training.python.training import evaluation from tensorflow.core.framework import summary_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import resources from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import device_setter from tensorflow.python.training import monitored_session from tensorflow.python.training import saver from tensorflow.python.training import summary_io from tensorflow.python.util import compat from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect AS_ITERABLE_DATE = '2016-09-15' AS_ITERABLE_INSTRUCTIONS = ( 'The default behavior of predict() is changing. The default value for\n' 'as_iterable will change to True, and then the flag will be removed\n' 'altogether. The behavior of this flag is described below.') SCIKIT_DECOUPLE_DATE = '2016-12-01' SCIKIT_DECOUPLE_INSTRUCTIONS = ( 'Estimator is decoupled from Scikit Learn interface by moving into\n' 'separate class SKCompat. Arguments x, y and batch_size are only\n' 'available in the SKCompat class, Estimator will only accept input_fn.\n' 'Example conversion:\n' ' est = Estimator(...) -> est = SKCompat(Estimator(...))') def _verify_input_args(x, y, input_fn, feed_fn, batch_size): """Verifies validity of co-existance of input arguments.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') else: if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if input_fn is not None: return input_fn, feed_fn df = data_feeder.setup_train_data_feeder( x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() def infer_real_valued_columns_from_input_fn(input_fn): """Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor` or `Tensor`. labels - `Tensor` of label values. Returns: List of `FeatureColumn` objects. """ with ops.Graph().as_default(): features, _ = input_fn() return layers.infer_real_valued_columns(features) def infer_real_valued_columns_from_input(x): """Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects. """ input_fn, _ = _get_input_fn( x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn) def _model_fn_args(fn): """Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `tuple` of string argument names. Raises: ValueError: if partial function has positionally bound arguments """ _, fn = tf_decorator.unwrap(fn) if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'): # Handle functools.partial and similar objects. return tuple([ arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):] if arg not in set(fn.keywords.keys()) ]) # Handle function. return tuple(tf_inspect.getargspec(fn).args) def _get_replica_device_setter(config): """Creates a replica device setter if required. Args: config: A RunConfig instance. Returns: A replica device setter, or None. """ ps_ops = [ 'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable' ] if config.task_type: worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id) else: worker_device = '/job:worker' if config.num_ps_replicas > 0: return device_setter.replica_device_setter( ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None def _make_metrics_ops(metrics, features, labels, predictions): """Add metrics based on `features`, `labels`, and `predictions`. `metrics` contains a specification for how to run metrics. It is a dict mapping friendly names to either `MetricSpec` objects, or directly to a metric function (assuming that `predictions` and `labels` are single tensors), or to `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and `labels` to `metric` (assuming `labels` is a single tensor). Users are encouraged to use `MetricSpec` objects, which are more flexible and cleaner. They also lead to clearer errors. Args: metrics: A dict mapping names to metrics specification, for example `MetricSpec` objects. features: A dict of tensors returned from an input_fn as features/inputs. labels: A single tensor or a dict of tensors returned from an input_fn as labels. predictions: A single tensor or a dict of tensors output from a model as predictions. Returns: A dict mapping the friendly given in `metrics` to the result of calling the given metric function. Raises: ValueError: If metrics specifications do not work with the type of `features`, `labels`, or `predictions` provided. Mostly, a dict is given but no pred_name specified. """ metrics = metrics or {} # If labels is a dict with a single key, unpack into a single tensor. labels_tensor_or_dict = labels if isinstance(labels, dict) and len(labels) == 1: labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} # Iterate in lexicographic order, so the graph is identical among runs. for name, metric in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue # TODO(b/31229024): Remove the rest of this loop logging.warning('Please specify metrics using MetricSpec. Using bare ' 'functions or (key, fn) tuples is deprecated and support ' 'for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): # Multi-head metrics. if len(name) != 2: raise ValueError('Invalid metric for {}. It returned a tuple with ' 'len {}, expected 2.'.format(name, len(name))) if not isinstance(predictions, dict): raise ValueError( 'Metrics passed provide (name, prediction), ' 'but predictions are not dict. ' 'Metrics: %s, Predictions: %s.' % (metrics, predictions)) # Here are two options: labels are single Tensor or a dict. if isinstance(labels, dict) and name[1] in labels: # If labels are dict and the prediction name is in it, apply metric. result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: # Otherwise pass the labels to the metric. result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: # Single head metrics. if isinstance(predictions, dict): raise ValueError( 'Metrics passed provide only name, no prediction, ' 'but predictions are dict. ' 'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict)) result[name] = metric(predictions, labels_tensor_or_dict) return result def _dict_to_str(dictionary): """Get a `str` representation of a `dict`. Args: dictionary: The `dict` to be represented as `str`. Returns: A `str` representing the `dictionary`. """ return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items())) def _write_dict_to_summary(output_dir, dictionary, current_global_step): """Writes a `dict` into summary file in given output directory. Args: output_dir: `str`, directory to write the summary file in. dictionary: the `dict` to be written to summary file. current_global_step: `int`, the current global step. """ logging.info('Saving dict for global step %d: %s', current_global_step, _dict_to_str(dictionary)) summary_writer = summary_io.SummaryWriterCache.get(output_dir) summary_proto = summary_pb2.Summary() for key in dictionary: if dictionary[key] is None: continue value = summary_proto.value.add() value.tag = key if (isinstance(dictionary[key], np.float32) or isinstance(dictionary[key], float)): value.simple_value = float(dictionary[key]) else: logging.warn('Skipping summary for %s, must be a float or np.float32.', key) summary_writer.add_summary(summary_proto, current_global_step) summary_writer.flush() class BaseEstimator( sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable): """Abstract BaseEstimator class to train and evaluate TensorFlow models. Users should not instantiate or subclass this class. Instead, use `Estimator`. """ __metaclass__ = abc.ABCMeta # Note that for Google users, this is overriden with # learn_runner.EstimatorConfig. # TODO(wicke): Remove this once launcher takes over config functionality _Config = run_config.RunConfig # pylint: disable=invalid-name def __init__(self, model_dir=None, config=None): """Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. config: A RunConfig instance. """ # Create a run configuration. if config is None: self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config if self._config.session_config is None: self._session_config = config_pb2.ConfigProto(allow_soft_placement=True) else: self._session_config = self._config.session_config # Model directory. if (model_dir is not None) and (self._config.model_dir is not None): if model_dir != self._config.model_dir: # TODO(b/9965722): remove this suppression after it is no longer # necessary. # pylint: disable=g-doc-exception raise ValueError( "model_dir are set both in constructor and RunConfig, but with " "different values. In constructor: '{}', in RunConfig: " "'{}' ".format(model_dir, self._config.model_dir)) self._model_dir = model_dir or self._config.model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if self._config.model_dir is None: self._config = self._config.replace(model_dir=self._model_dir) logging.info('Using config: %s', str(vars(self._config))) # Set device function depending if there are replicas or not. self._device_fn = _get_replica_device_setter(self._config) # Features and labels TensorSignature objects. # TODO(wicke): Rename these to something more descriptive self._features_info = None self._labels_info = None self._graph = None @property def config(self): # TODO(wicke): make RunConfig immutable, and then return it without a copy. return copy.deepcopy(self._config) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Trainable`. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') _verify_input_args(x, y, input_fn, None, batch_size) if x is not None: SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors) return self if max_steps is not None: try: start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return self except: # pylint: disable=bare-except pass hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) if steps is not None or max_steps is not None: hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps)) loss = self._train_model(input_fn=input_fn, hooks=hooks) logging.info('Loss for final step: %s.', loss) return self @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def partial_fit( self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of labels. The training label values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided. """ logging.warning('The current implementation of partial_fit is not optimized' ' for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None, hooks=None, log_progress=True): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Evaluable`. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if x is not None: return SKCompat(self).score(x, y, batch_size, steps, metrics) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name, checkpoint_path=checkpoint_path, hooks=hooks, log_progress=log_progress) if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('batch_size', None), ('as_iterable', True) ) def predict( self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: A numpy array of predicted classes or regression values if the constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict` of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of predictions if as_iterable is True. Raises: ValueError: If x and input_fn are both provided or both `None`. """ _verify_input_args(x, None, input_fn, None, batch_size) if x is not None and not as_iterable: return SKCompat(self).predict(x, batch_size) input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size) return self._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable) def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor. """ return load_variable(self.model_dir, name) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return [name for name, _ in list_variables(self.model_dir)] @property def model_dir(self): return self._model_dir @deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.') def export(self, export_dir, input_fn=export._default_input_fn, # pylint: disable=protected-access input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None, checkpoint_path=None): """Exports inference graph into given dir. Args: export_dir: A string containing a directory to write the exported graph and checkpoints. input_fn: If `use_deprecated_input_fn` is true, then a function that given `Tensor` of `Example` strings, parses it into features that are then passed to the model. Otherwise, a function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: Only used if `use_deprecated_input_fn` is false. String key into the features dict returned by `input_fn` that corresponds to a the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). use_deprecated_input_fn: Determines the signature format of `input_fn`. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `Tensor` or `dict` of `Tensor`s for predictions. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `signature_fn`. Optional. If `None`, predictions will pass to `signature_fn` without filtering. default_batch_size: Default batch size of the `Example` placeholder. exports_to_keep: Number of exports to keep. checkpoint_path: the checkpoint path of the model to be exported. If it is `None` (which is default), will use the latest checkpoint in export_dir. Returns: The string path to the exported directory. NB: this functionality was added ca. 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because subclasses are not returning a value. """ # pylint: disable=protected-access return export._export_estimator( estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep, checkpoint_path=checkpoint_path) @abc.abstractproperty def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overridden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass @abc.abstractproperty def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: A `ModelFnOps` object. """ raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator') @deprecated( '2016-09-23', 'The signature of the input_fn accepted by export is changing to be ' 'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, ' 'which makes this function useless. This will be removed after the ' 'deprecation date.') def _get_feature_ops_from_example(self, examples_batch): """Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called). """ if self._features_info is None: raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures( self._features_info, examples_batch) def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info)) def _extract_metric_update_ops(self, eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} for name, metric_ops in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if len(metric_ops) == 2: value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning( 'Ignoring metric {}. It returned a list|tuple with len {}, ' 'expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return update_ops, value_ops def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None, name='', checkpoint_path=None, hooks=None, log_progress=True): # TODO(wicke): Remove this once Model and associated code are gone. if (hasattr(self._config, 'execution_mode') and self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')): return None, None # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) model_fn_results = self._get_eval_ops(features, labels, metrics) eval_dict = model_fn_results.eval_metric_ops update_op, eval_dict = self._extract_metric_update_ops(eval_dict) # We need to copy the hook array as we modify it, thus [:]. hooks = hooks[:] if hooks else [] if feed_fn: hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn)) if steps: hooks.append( evaluation.StopAfterNEvalsHook( steps, log_progress=log_progress)) global_step_key = 'global_step' while global_step_key in eval_dict: global_step_key = '_' + global_step_key eval_dict[global_step_key] = global_step eval_results = evaluation.evaluate_once( checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=model_fn_results.scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=hooks, config=self._session_config) current_global_step = eval_results[global_step_key] _write_dict_to_summary(eval_dir, eval_results, current_global_step) return eval_results, current_global_step def _get_features_from_input_fn(self, input_fn): result = input_fn() if isinstance(result, (list, tuple)): return result[0] return result def _infer_model(self, input_fn, feed_fn=None, outputs=None, as_iterable=True, iterate_batches=False): # Check that model has been trained. checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features = self._get_features_from_input_fn(input_fn) infer_ops = self._get_predict_ops(features) predictions = self._filter_predictions(infer_ops.predictions, outputs) mon_sess = monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, scaffold=infer_ops.scaffold, config=self._session_config)) if not as_iterable: with mon_sess: if not mon_sess.should_stop(): return mon_sess.run(predictions, feed_fn() if feed_fn else None) else: return self._predict_generator(mon_sess, predictions, feed_fn, iterate_batches) def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches): with mon_sess: while not mon_sess.should_stop(): preds = mon_sess.run(predictions, feed_fn() if feed_fn else None) if iterate_batches: yield preds elif not isinstance(predictions, dict): for pred in preds: yield pred else: first_tensor = list(preds.values())[0] if isinstance(first_tensor, sparse_tensor.SparseTensorValue): batch_length = first_tensor.dense_shape[0] else: batch_length = first_tensor.shape[0] for i in range(batch_length): yield {key: value[i] for key, value in six.iteritems(preds)} if self._is_input_constant(feed_fn, mon_sess.graph): return def _is_input_constant(self, feed_fn, graph): # If there are no queue_runners, the input `predictions` is a # constant, and we should stop after the first epoch. If, # instead, there are queue_runners, eventually they should throw # an `OutOfRangeError`. if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS): return False # data_feeder uses feed_fn to generate `OutOfRangeError`. if feed_fn is not None: return False return True def _filter_predictions(self, predictions, outputs): if not outputs: return predictions if not isinstance(predictions, dict): raise ValueError( 'outputs argument is not valid in case of non-dict predictions.') existing_keys = predictions.keys() predictions = { key: value for key, value in six.iteritems(predictions) if key in outputs } if not predictions: raise ValueError('Expected to run at least one output from %s, ' 'provided %s.' % (existing_keys, outputs)) return predictions def _train_model(self, input_fn, hooks): all_hooks = [] self._graph = ops.Graph() with self._graph.as_default() as g, g.device(self._device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) model_fn_ops = self._get_train_ops(features, labels) ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss) all_hooks.extend([ basic_session_run_hooks.NanTensorHook(model_fn_ops.loss), basic_session_run_hooks.LoggingTensorHook( { 'loss': model_fn_ops.loss, 'step': global_step }, every_n_iter=100) ]) all_hooks.extend(hooks) scaffold = model_fn_ops.scaffold or monitored_session.Scaffold() if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)): ops.add_to_collection( ops.GraphKeys.SAVERS, saver.Saver( sharded=True, max_to_keep=self._config.keep_checkpoint_max, defer_build=True, save_relative_paths=True)) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): saver_hook_exists = any([ isinstance(h, basic_session_run_hooks.CheckpointSaverHook) for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks + model_fn_ops.training_chief_hooks) ]) if not saver_hook_exists: chief_hooks = [ basic_session_run_hooks.CheckpointSaverHook( self._model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) ] with monitored_session.MonitoredTrainingSession( master=self._config.master, is_chief=self._config.is_chief, checkpoint_dir=self._model_dir, scaffold=scaffold, hooks=all_hooks + model_fn_ops.training_hooks, chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks, save_checkpoint_secs=0, # Saving is handled by a hook. save_summaries_steps=self._config.save_summary_steps, config=self._session_config ) as mon_sess: loss = None while not mon_sess.should_stop(): _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss]) summary_io.SummaryWriterCache.clear() return loss def _identity_feature_engineering_fn(features, labels): return features, labels class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): """Constructs an `Estimator` instance. Args: model_fn: Model function. Follows the signature: * Args: * `features`: single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head models). If mode is `ModeKeys.INFER`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode`: Optional. Specifies if this training, evaluation or prediction. See `ModeKeys`. * `params`: Optional `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * `config`: Optional configuration object. Will receive what is passed to Estimator in `config` parameter, or the default `config`. Allows updating things in your model_fn based on configuration such as `num_ps_replicas`. * `model_dir`: Optional directory where model parameters, graph etc are saved. Will receive what is passed to Estimator in `model_dir` parameter, or the default `model_dir`. Allows updating things in your model_fn that expect model_dir, such as training hooks. * Returns: `ModelFnOps` Also supports a legacy signature which returns tuple of: * predictions: `Tensor`, `SparseTensor` or dictionary of same. Can also be any type that is convertible to a `Tensor` or `SparseTensor`, or dictionary of same. * loss: Scalar loss `Tensor`. * train_op: Training update `Tensor` or `Operation`. Supports next three signatures for the function: * `(features, labels) -> (predictions, loss, train_op)` * `(features, labels, mode) -> (predictions, loss, train_op)` * `(features, labels, mode, params) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config, model_dir) -> (predictions, loss, train_op)` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Raises: ValueError: parameters of `model_fn` don't match `params`. """ super(Estimator, self).__init__(model_dir=model_dir, config=config) if model_fn is not None: # Check number of arguments of the given function matches requirements. model_fn_args = _model_fn_args(model_fn) if params is not None and 'params' not in model_fn_args: raise ValueError('Estimator\'s model_fn (%s) has less than 4 ' 'arguments, but not None params (%s) are passed.' % (model_fn, params)) if params is None and 'params' in model_fn_args: logging.warning('Estimator\'s model_fn (%s) includes params ' 'argument, but params are not passed to Estimator.', model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = ( feature_engineering_fn or _identity_feature_engineering_fn) def _call_model_fn(self, features, labels, mode): """Calls model function with support of 2, 3 or 4 arguments. Args: features: features dict. labels: labels dict. mode: ModeKeys Returns: A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a `ModelFnOps` object. Raises: ValueError: if model_fn returns invalid objects. """ features, labels = self._feature_engineering_fn(features, labels) model_fn_args = _model_fn_args(self._model_fn) kwargs = {} if 'mode' in model_fn_args: kwargs['mode'] = mode if 'params' in model_fn_args: kwargs['params'] = self.params if 'config' in model_fn_args: kwargs['config'] = self.config if 'model_dir' in model_fn_args: kwargs['model_dir'] = self.model_dir model_fn_results = self._model_fn(features, labels, **kwargs) if isinstance(model_fn_results, model_fn_lib.ModelFnOps): return model_fn_results # Here model_fn_results should be a tuple with 3 elements. if len(model_fn_results) != 3: raise ValueError('Unrecognized value returned by model_fn, ' 'please return ModelFnOps.') return model_fn_lib.ModelFnOps( mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2]) def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN) def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: `ModelFnOps` object. Raises: ValueError: if `metrics` don't match `labels`. """ model_fn_ops = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL) features, labels = self._feature_engineering_fn(features, labels) # Custom metrics should overwrite defaults. if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops( metrics, features, labels, model_fn_ops.predictions)) if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops: model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = ( metrics_lib.streaming_mean(model_fn_ops.loss)) return model_fn_ops def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ labels = tensor_signature.create_placeholders_from_signatures( self._labels_info) return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER) def export_savedmodel( self, export_dir_base, serving_input_fn, default_output_alternative_key=None, assets_extra=None, as_text=False, checkpoint_path=None): """Exports inference graph as a SavedModel into given dir. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. serving_input_fn: A function that takes no argument and returns an `InputFnOps`. default_output_alternative_key: the name of the head to serve when none is specified. Not needed for single-headed models. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel. Each key should give the destination path (including the filename) relative to the assets.extra directory. The corresponding value gives the full path of the source file to be copied. For example, the simple case of copying a single file without renaming it is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If None (the default), the most recent checkpoint found within the model directory is chosen. Returns: The string path to the exported directory. Raises: ValueError: if an unrecognized export_type is requested. """ if serving_input_fn is None: raise ValueError('serving_input_fn must be defined.') with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) # Call the serving_input_fn and collect the input alternatives. input_ops = serving_input_fn() input_alternatives, features = ( saved_model_export_utils.get_input_alternatives(input_ops)) # TODO(b/34388557) This is a stopgap, pending recording model provenance. # Record which features are expected at serving time. It is assumed that # these are the features that were used in training. for feature_key in input_ops.features.keys(): ops.add_to_collection( constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key) # Call the model_fn and collect the output alternatives. model_fn_ops = self._call_model_fn(features, None, model_fn_lib.ModeKeys.INFER) output_alternatives, actual_default_output_alternative_key = ( saved_model_export_utils.get_output_alternatives( model_fn_ops, default_output_alternative_key)) # Build the SignatureDefs from all pairs of input and output alternatives signature_def_map = saved_model_export_utils.build_all_signature_defs( input_alternatives, output_alternatives, actual_default_output_alternative_key) if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), lookup_ops.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(as_text) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) gfile.MakeDirs(dest_path) gfile.Copy(source, dest_absolute) return export_dir # For time of deprecation x,y from Estimator allow direct access. # pylint: disable=protected-access class SKCompat(sklearn.BaseEstimator): """Scikit learn wrapper for TensorFlow Learn Estimator.""" def __init__(self, estimator): self._estimator = estimator def fit(self, x, y, batch_size=128, steps=None, max_steps=None, monitors=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) all_monitors = [] if feed_fn: all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)] if monitors: all_monitors.extend(monitors) self._estimator.fit(input_fn=input_fn, steps=steps, max_steps=max_steps, monitors=all_monitors) return self def score(self, x, y, batch_size=128, steps=None, metrics=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._estimator._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name='score') if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results def predict(self, x, batch_size=128, outputs=None): input_fn, feed_fn = _get_input_fn( x, None, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) results = list( self._estimator._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=True, iterate_batches=True)) if not isinstance(results[0], dict): return np.concatenate([output for output in results], axis=0) return { key: np.concatenate( [output[key] for output in results], axis=0) for key in results[0] }
apache-2.0
Carmezim/tensorflow
tensorflow/contrib/learn/python/learn/estimators/constants.py
79
1657
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Constants regarding Estimators. This file is obsoleted in the move of Estimator to core. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function class ProblemType(object): """Enum-like values for the type of problem that the model solves. These values are used when exporting the model to produce the appropriate signature function for serving. The following values are supported: UNSPECIFIED: Produces a predict signature_fn. CLASSIFICATION: Produces a classify signature_fn. LINEAR_REGRESSION: Produces a regression signature_fn. LOGISTIC_REGRESSION: Produces a classify signature_fn. """ UNSPECIFIED = 0 CLASSIFICATION = 1 LINEAR_REGRESSION = 2 LOGISTIC_REGRESSION = 3 # CollectionDef key for the input feature keys. # TODO(b/34388557): This is a stopgap; please follow the bug to learn of changes COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS = "input_feature_keys"
apache-2.0
VinceZK/phantomjs
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/wire_format.py
561
8431
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Constants and static functions to support protocol buffer wire format.""" __author__ = 'robinson@google.com (Will Robinson)' import struct from google.protobuf import descriptor from google.protobuf import message TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 # These numbers identify the wire type of a protocol buffer value. # We use the least-significant TAG_TYPE_BITS bits of the varint-encoded # tag-and-type to store one of these WIRETYPE_* constants. # These values must match WireType enum in google/protobuf/wire_format.h. WIRETYPE_VARINT = 0 WIRETYPE_FIXED64 = 1 WIRETYPE_LENGTH_DELIMITED = 2 WIRETYPE_START_GROUP = 3 WIRETYPE_END_GROUP = 4 WIRETYPE_FIXED32 = 5 _WIRETYPE_MAX = 5 # Bounds for various integer types. INT32_MAX = int((1 << 31) - 1) INT32_MIN = int(-(1 << 31)) UINT32_MAX = (1 << 32) - 1 INT64_MAX = (1 << 63) - 1 INT64_MIN = -(1 << 63) UINT64_MAX = (1 << 64) - 1 # "struct" format strings that will encode/decode the specified formats. FORMAT_UINT32_LITTLE_ENDIAN = '<I' FORMAT_UINT64_LITTLE_ENDIAN = '<Q' FORMAT_FLOAT_LITTLE_ENDIAN = '<f' FORMAT_DOUBLE_LITTLE_ENDIAN = '<d' # We'll have to provide alternate implementations of AppendLittleEndian*() on # any architectures where these checks fail. if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4: raise AssertionError('Format "I" is not a 32-bit number.') if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8: raise AssertionError('Format "Q" is not a 64-bit number.') def PackTag(field_number, wire_type): """Returns an unsigned 32-bit integer that encodes the field number and wire type information in standard protocol message wire format. Args: field_number: Expected to be an integer in the range [1, 1 << 29) wire_type: One of the WIRETYPE_* constants. """ if not 0 <= wire_type <= _WIRETYPE_MAX: raise message.EncodeError('Unknown wire type: %d' % wire_type) return (field_number << TAG_TYPE_BITS) | wire_type def UnpackTag(tag): """The inverse of PackTag(). Given an unsigned 32-bit number, returns a (field_number, wire_type) tuple. """ return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) def ZigZagEncode(value): """ZigZag Transform: Encodes signed integers so that they can be effectively used with varint encoding. See wire_format.h for more details. """ if value >= 0: return value << 1 return (value << 1) ^ (~0) def ZigZagDecode(value): """Inverse of ZigZagEncode().""" if not value & 0x1: return value >> 1 return (value >> 1) ^ (~0) # The *ByteSize() functions below return the number of bytes required to # serialize "field number + type" information and then serialize the value. def Int32ByteSize(field_number, int32): return Int64ByteSize(field_number, int32) def Int32ByteSizeNoTag(int32): return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) def Int64ByteSize(field_number, int64): # Have to convert to uint before calling UInt64ByteSize(). return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) def UInt32ByteSize(field_number, uint32): return UInt64ByteSize(field_number, uint32) def UInt64ByteSize(field_number, uint64): return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) def SInt32ByteSize(field_number, int32): return UInt32ByteSize(field_number, ZigZagEncode(int32)) def SInt64ByteSize(field_number, int64): return UInt64ByteSize(field_number, ZigZagEncode(int64)) def Fixed32ByteSize(field_number, fixed32): return TagByteSize(field_number) + 4 def Fixed64ByteSize(field_number, fixed64): return TagByteSize(field_number) + 8 def SFixed32ByteSize(field_number, sfixed32): return TagByteSize(field_number) + 4 def SFixed64ByteSize(field_number, sfixed64): return TagByteSize(field_number) + 8 def FloatByteSize(field_number, flt): return TagByteSize(field_number) + 4 def DoubleByteSize(field_number, double): return TagByteSize(field_number) + 8 def BoolByteSize(field_number, b): return TagByteSize(field_number) + 1 def EnumByteSize(field_number, enum): return UInt32ByteSize(field_number, enum) def StringByteSize(field_number, string): return BytesByteSize(field_number, string.encode('utf-8')) def BytesByteSize(field_number, b): return (TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(len(b)) + len(b)) def GroupByteSize(field_number, message): return (2 * TagByteSize(field_number) # START and END group. + message.ByteSize()) def MessageByteSize(field_number, message): return (TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(message.ByteSize()) + message.ByteSize()) def MessageSetItemByteSize(field_number, msg): # First compute the sizes of the tags. # There are 2 tags for the beginning and ending of the repeated group, that # is field number 1, one with field number 2 (type_id) and one with field # number 3 (message). total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) # Add the number of bytes for type_id. total_size += _VarUInt64ByteSizeNoTag(field_number) message_size = msg.ByteSize() # The number of bytes for encoding the length of the message. total_size += _VarUInt64ByteSizeNoTag(message_size) # The size of the message. total_size += message_size return total_size def TagByteSize(field_number): """Returns the bytes required to serialize a tag with this field number.""" # Just pass in type 0, since the type won't affect the tag+type size. return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) # Private helper function for the *ByteSize() functions above. def _VarUInt64ByteSizeNoTag(uint64): """Returns the number of bytes required to serialize a single varint using boundary value comparisons. (unrolled loop optimization -WPierce) uint64 must be unsigned. """ if uint64 <= 0x7f: return 1 if uint64 <= 0x3fff: return 2 if uint64 <= 0x1fffff: return 3 if uint64 <= 0xfffffff: return 4 if uint64 <= 0x7ffffffff: return 5 if uint64 <= 0x3ffffffffff: return 6 if uint64 <= 0x1ffffffffffff: return 7 if uint64 <= 0xffffffffffffff: return 8 if uint64 <= 0x7fffffffffffffff: return 9 if uint64 > UINT64_MAX: raise message.EncodeError('Value out of range: %d' % uint64) return 10 NON_PACKABLE_TYPES = ( descriptor.FieldDescriptor.TYPE_STRING, descriptor.FieldDescriptor.TYPE_GROUP, descriptor.FieldDescriptor.TYPE_MESSAGE, descriptor.FieldDescriptor.TYPE_BYTES ) def IsTypePackable(field_type): """Return true iff packable = true is valid for fields of this type. Args: field_type: a FieldDescriptor::Type value. Returns: True iff fields of this type are packable. """ return field_type not in NON_PACKABLE_TYPES
bsd-3-clause
CSC301H-Fall2013/JuakStore
site-packages/build/lib/django/test/testcases.py
31
46782
from __future__ import unicode_literals import difflib import json import os import re import sys from copy import copy from functools import wraps try: from urllib.parse import urlsplit, urlunsplit except ImportError: # Python 2 from urlparse import urlsplit, urlunsplit import select import socket import threading import errno from django.conf import settings from django.contrib.staticfiles.handlers import StaticFilesHandler from django.core import mail from django.core.exceptions import ValidationError, ImproperlyConfigured from django.core.handlers.wsgi import WSGIHandler from django.core.management import call_command from django.core.management.color import no_style from django.core.signals import request_started from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer, WSGIServerException) from django.core.urlresolvers import clear_url_caches from django.core.validators import EMPTY_VALUES from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS, reset_queries) from django.forms.fields import CharField from django.http import QueryDict from django.test import _doctest as doctest from django.test.client import Client from django.test.html import HTMLParseError, parse_html from django.test.signals import template_rendered from django.test.utils import (get_warnings_state, restore_warnings_state, override_settings, compare_xml, strip_quotes) from django.test.utils import ContextList from django.utils import unittest as ut2 from django.utils.encoding import force_text from django.utils import six from django.utils.unittest.util import safe_repr from django.utils.unittest import skipIf from django.views.static import serve __all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s) normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s) def to_list(value): """ Puts value into a list if it's not already one. Returns an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value real_commit = transaction.commit real_rollback = transaction.rollback real_enter_transaction_management = transaction.enter_transaction_management real_leave_transaction_management = transaction.leave_transaction_management real_managed = transaction.managed real_abort = transaction.abort def nop(*args, **kwargs): return def disable_transaction_methods(): transaction.commit = nop transaction.rollback = nop transaction.enter_transaction_management = nop transaction.leave_transaction_management = nop transaction.managed = nop transaction.abort = nop def restore_transaction_methods(): transaction.commit = real_commit transaction.rollback = real_rollback transaction.enter_transaction_management = real_enter_transaction_management transaction.leave_transaction_management = real_leave_transaction_management transaction.managed = real_managed transaction.abort = real_abort def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e.msg) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class OutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): """ The entry method for doctest output checking. Defers to a sequence of child checkers """ checks = (self.check_output_default, self.check_output_numeric, self.check_output_xml, self.check_output_json) for check in checks: if check(want, got, optionflags): return True return False def check_output_default(self, want, got, optionflags): """ The default comparator provided by doctest - not perfect, but good for most purposes """ return doctest.OutputChecker.check_output(self, want, got, optionflags) def check_output_numeric(self, want, got, optionflags): """Doctest does an exact string comparison of output, which means that some numerically equivalent values aren't equal. This check normalizes * long integers (22L) so that they equal normal integers. (22) * Decimals so that they are comparable, regardless of the change made to __repr__ in Python 2.6. """ return doctest.OutputChecker.check_output(self, normalize_decimals(normalize_long_ints(want)), normalize_decimals(normalize_long_ints(got)), optionflags) def check_output_xml(self, want, got, optionsflags): try: return compare_xml(want, got) except Exception: return False def check_output_json(self, want, got, optionsflags): """ Tries to compare want and got as if they were JSON-encoded data """ want, got = strip_quotes(want, got) try: want_json = json.loads(want) got_json = json.loads(got) except Exception: return False return want_json == got_json class DocTestRunner(doctest.DocTestRunner): def __init__(self, *args, **kwargs): doctest.DocTestRunner.__init__(self, *args, **kwargs) self.optionflags = doctest.ELLIPSIS def report_unexpected_exception(self, out, test, example, exc_info): doctest.DocTestRunner.report_unexpected_exception(self, out, test, example, exc_info) # Rollback, in case of database errors. Otherwise they'd have # side effects on other tests. for conn in connections: transaction.rollback_unless_managed(using=conn) class _AssertNumQueriesContext(object): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num self.connection = connection def __enter__(self): self.old_debug_cursor = self.connection.use_debug_cursor self.connection.use_debug_cursor = True self.starting_queries = len(self.connection.queries) request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.use_debug_cursor = self.old_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return final_queries = len(self.connection.queries) executed = final_queries - self.starting_queries self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected" % ( executed, self.num ) ) class _AssertTemplateUsedContext(object): def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if len(self.rendered_templates) == 0: message += ' No template was rendered.' else: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names)) self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class SimpleTestCase(ut2.TestCase): def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ testMethod = getattr(self, self._testMethodName) skipped = (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)) if not skipped: try: self._pre_setup() except (KeyboardInterrupt, SystemExit): raise except Exception: result.addError(self, sys.exc_info()) return super(SimpleTestCase, self).__call__(result) if not skipped: try: self._post_teardown() except (KeyboardInterrupt, SystemExit): raise except Exception: result.addError(self, sys.exc_info()) return def _pre_setup(self): pass def _post_teardown(self): pass def save_warnings_state(self): """ Saves the state of the warnings module """ self._warnings_state = get_warnings_state() def restore_warnings_state(self): """ Restores the state of the warnings module to the state saved by save_warnings_state() """ restore_warnings_state(self._warnings_state) def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts back to the original value when exiting the context. """ return override_settings(**kwargs) def assertRaisesMessage(self, expected_exception, expected_message, callable_obj=None, *args, **kwargs): """ Asserts that the message in a raised exception matches the passed value. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. callable_obj: Function to be called. args: Extra args. kwargs: Extra kwargs. """ return six.assertRaisesRegex(self, expected_exception, re.escape(expected_message), callable_obj, *args, **kwargs) def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Asserts that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in EMPTY_VALUES """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **dict(field_kwargs, required=False)) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [force_text(required.error_messages['required'])] for e in EMPTY_VALUES: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length':2, 'max_length':20}) self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs), fieldclass)) def assertHTMLEqual(self, html1, html2, msg=None): """ Asserts that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( six.text_type(dom1).splitlines(), six.text_type(dom2).splitlines()))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Asserts that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count = None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of '%s' in response" " (expected %d)" % (real_count, needle, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): try: data = json.loads(raw) except ValueError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, six.string_types): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False def _pre_setup(self): """Performs any pre-test setup. This includes: * Flushing the database. * If the Test Case class has a 'fixtures' member, installing the named fixtures. * If the Test Case class has a 'urls' member, replace the ROOT_URLCONF with it. * Clearing the mail test outbox. """ self.client = self.client_class() self._fixture_setup() self._urlconf_setup() mail.outbox = [] def _databases_names(self, include_mirrors=True): # If the test case has a multi_db=True flag, act on all databases, # including mirrors or not. Otherwise, just on the default DB. if getattr(self, 'multi_db', False): return [alias for alias in connections if include_mirrors or not connections[alias].settings_dict['TEST_MIRROR']] else: return [DEFAULT_DB_ALIAS] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = \ conn.ops.sequence_reset_by_name_sql(no_style(), conn.introspection.sequence_list()) if sql_list: try: cursor = conn.cursor() for sql in sql_list: cursor.execute(sql) except Exception: transaction.rollback_unless_managed(using=db_name) raise transaction.commit_unless_managed(using=db_name) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) if hasattr(self, 'fixtures'): # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name, 'skip_validation': True}) def _urlconf_setup(self): if hasattr(self, 'urls'): self._old_root_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = self.urls clear_url_caches() def _post_teardown(self): """ Performs any post-test things. This includes: * Putting back the original ROOT_URLCONF if it was changed. * Force closing the connection, so that the next test gets a clean cursor. """ self._fixture_teardown() self._urlconf_teardown() # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does rollback, the effect # of these statements is lost, which can effect the operation # of tests (e.g., losing a timezone setting causing objects to # be created with the wrong time). # To make sure this doesn't happen, get a clean connection at the # start of every test. for conn in connections.all(): conn.close() def _fixture_teardown(self): # Roll back any pending transactions in order to avoid a deadlock # during flush when TEST_MIRROR is used (#18984). for conn in connections.all(): conn.rollback_unless_managed() for db in self._databases_names(include_mirrors=False): call_command('flush', verbosity=0, interactive=False, database=db, skip_validation=True, reset_sequences=False) def _urlconf_teardown(self): if hasattr(self, '_old_root_urlconf'): settings.ROOT_URLCONF = self._old_root_urlconf clear_url_caches() def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix=''): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request. """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response['Location'] scheme, netloc, path, query, fragment = urlsplit(url) redirect_response = response.client.get(path, QueryDict(query)) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit( expected_url) if not (e_scheme or e_netloc): expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment)) self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if (hasattr(response, 'render') and callable(response.render) and not response.is_rendered): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = force_text(text, encoding=response._charset) content = content.decode(response._charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of %s in response" " (expected %d)" % (real_count, text_repr, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` doesn't occurs in the content of the response. """ # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if (hasattr(response, 'render') and callable(response.render) and not response.is_rendered): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) content = response.content if not isinstance(text, bytes) or html: text = force_text(text, encoding=response._charset) content = content.decode(response._charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, 'Response\'s content is not valid HTML:') text = assert_and_parse_html(self, text, None, 'Second argument is not valid HTML:') self.assertEqual(content.count(text), 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Asserts that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to " "render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i,context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue(err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors))) elif field in context[form].fields: self.fail(msg_prefix + "The field '%s' on form '%s'" " in context %d contains no errors" % (field, form, i)) else: self.fail(msg_prefix + "The form '%s' in context %d" " does not contain the field '%s'" % (form, i, field)) else: non_field_errors = context[form].non_field_errors() self.assertTrue(err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors)) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the" " response" % form) def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''): """ Asserts that the template with the provided name was used in rendering the response. Also usable as context manager. """ if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " # Use assertTemplateUsed as context manager. if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None context = _AssertTemplateUsedContext(self, template_name) return context template_names = [t.name for t in response.templates] if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue(template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names))) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Asserts that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " # Use assertTemplateUsed as context manager. if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None context = _AssertTemplateNotUsedContext(self, template_name) return context template_names = [t.name for t in response.templates] self.assertFalse(template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering" " the response" % template_name) def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True): items = six.moves.map(transform, qs) if not ordered: return self.assertEqual(set(items), set(values)) return self.assertEqual(list(items), values) def assertNumQueries(self, num, func=None, *args, **kwargs): using = kwargs.pop("using", DEFAULT_DB_ALIAS) conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(): """ Returns True if all connections support transactions. """ return all(conn.features.supports_transactions for conn in connections.all()) class TestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase, but surrounds every test with a transaction, monkey-patches the real transaction management routines to do nothing, and rollsback the test transaction at the end of the test. You have to use TransactionTestCase, if you need transaction management inside a test. """ def _fixture_setup(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_setup() assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' for db_name in self._databases_names(): transaction.enter_transaction_management(using=db_name) transaction.managed(True, using=db_name) disable_transaction_methods() from django.contrib.sites.models import Site Site.objects.clear_cache() for db in self._databases_names(include_mirrors=False): if hasattr(self, 'fixtures'): call_command('loaddata', *self.fixtures, **{ 'verbosity': 0, 'commit': False, 'database': db, 'skip_validation': True, }) def _fixture_teardown(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_teardown() restore_transaction_methods() for db in self._databases_names(): transaction.rollback(using=db) transaction.leave_transaction_management(using=db) def _deferredSkip(condition, reason): def decorator(test_func): if not (isinstance(test_func, type) and issubclass(test_func, TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if condition(): raise ut2.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: test_item = test_func test_item.__unittest_skip_why__ = reason return test_item return decorator def skipIfDBFeature(feature): """ Skip a test if a database has the named feature """ return _deferredSkip(lambda: getattr(connection.features, feature), "Database has feature %s" % feature) def skipUnlessDBFeature(feature): """ Skip a test unless a database has the named feature """ return _deferredSkip(lambda: not getattr(connection.features, feature), "Database doesn't support feature %s" % feature) class QuietWSGIRequestHandler(WSGIRequestHandler): """ Just a regular WSGIRequestHandler except it doesn't log to the standard output any of the requests received, so as to not clutter the output for the tests' results. """ def log_message(*args): pass if sys.version_info >= (3, 3, 0): _ImprovedEvent = threading.Event elif sys.version_info >= (2, 7, 0): _ImprovedEvent = threading._Event else: class _ImprovedEvent(threading._Event): """ Does the same as `threading.Event` except it overrides the wait() method with some code borrowed from Python 2.7 to return the set state of the event (see: http://hg.python.org/cpython/rev/b5aa8aa78c0f/). This allows to know whether the wait() method exited normally or because of the timeout. This class can be removed when Django supports only Python >= 2.7. """ def wait(self, timeout=None): self._Event__cond.acquire() try: if not self._Event__flag: self._Event__cond.wait(timeout) return self._Event__flag finally: self._Event__cond.release() class StoppableWSGIServer(WSGIServer): """ The code in this class is borrowed from the `SocketServer.BaseServer` class in Python 2.6. The important functionality here is that the server is non- blocking and that it can be shut down at any moment. This is made possible by the server regularly polling the socket and checking if it has been asked to stop. Note for the future: Once Django stops supporting Python 2.6, this class can be removed as `WSGIServer` will have this ability to shutdown on demand and will not require the use of the _ImprovedEvent class whose code is borrowed from Python 2.7. """ def __init__(self, *args, **kwargs): super(StoppableWSGIServer, self).__init__(*args, **kwargs) self.__is_shut_down = _ImprovedEvent() self.__serving = False def serve_forever(self, poll_interval=0.5): """ Handle one request at a time until shutdown. Polls for shutdown every poll_interval seconds. """ self.__serving = True self.__is_shut_down.clear() while self.__serving: r, w, e = select.select([self], [], [], poll_interval) if r: self._handle_request_noblock() self.__is_shut_down.set() def shutdown(self): """ Stops the serve_forever loop. Blocks until the loop has finished. This must be called while serve_forever() is running in another thread, or it will deadlock. """ self.__serving = False if not self.__is_shut_down.wait(2): raise RuntimeError( "Failed to shutdown the live test server in 2 seconds. The " "server might be stuck or generating a slow response.") def handle_request(self): """Handle one request, possibly blocking. """ fd_sets = select.select([self], [], [], None) if not fd_sets[0]: return self._handle_request_noblock() def _handle_request_noblock(self): """ Handle one request, without blocking. I assume that select.select has returned that the socket is readable before this function was called, so there should be no risk of blocking in get_request(). """ try: request, client_address = self.get_request() except socket.error: return if self.verify_request(request, client_address): try: self.process_request(request, client_address) except Exception: self.handle_error(request, client_address) self.close_request(request) class _MediaFilesHandler(StaticFilesHandler): """ Handler for serving the media files. This is a private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL def serve(self, request): relative_url = request.path[len(self.base_url[2]):] return serve(request, relative_url, document_root=self.get_base_dir()) class LiveServerThread(threading.Thread): """ Thread for running a live http server while the tests are running. """ def __init__(self, host, possible_ports, connections_override=None): self.host = host self.port = None self.possible_ports = possible_ports self.is_ready = threading.Event() self.error = None self.connections_override = connections_override super(LiveServerThread, self).__init__() def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ if self.connections_override: from django.db import connections # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = StaticFilesHandler(_MediaFilesHandler(WSGIHandler())) # Go through the list of possible ports, hoping that we can find # one that is free to use for the WSGI server. for index, port in enumerate(self.possible_ports): try: self.httpd = StoppableWSGIServer( (self.host, port), QuietWSGIRequestHandler) except WSGIServerException as e: if (index + 1 < len(self.possible_ports) and hasattr(e.args[0], 'errno') and e.args[0].errno == errno.EADDRINUSE): # This port is already in use, so we go on and try with # the next one in the list. continue else: # Either none of the given ports are free or the error # is something else than "Address already in use". So # we let that error bubble up to the main thread. raise else: # A free port was found. self.port = port break self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() def join(self, timeout=None): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() super(LiveServerThread, self).join(timeout) class LiveServerTestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase but also launches a live http server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. Note that it inherits from TransactionTestCase instead of TestCase because the threads do not share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ @property def live_server_url(self): return 'http://%s:%s' % ( self.server_thread.host, self.server_thread.port) @classmethod def setUpClass(cls): connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite') and conn.settings_dict['NAME'] == ':memory:'): # Explicitly enable thread-shareability for this connection conn.allow_thread_sharing = True connections_override[conn.alias] = conn # Launch the live server's thread specified_address = os.environ.get( 'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081') # The specified ports may be of the form '8000-8010,8080,9200-9300' # i.e. a comma-separated list of ports or ranges of ports, so we break # it down into a detailed list of all possible ports. possible_ports = [] try: host, port_ranges = specified_address.split(':') for port_range in port_ranges.split(','): # A port range can be of either form: '8000' or '8000-8010'. extremes = list(map(int, port_range.split('-'))) assert len(extremes) in [1, 2] if len(extremes) == 1: # Port range of the form '8000' possible_ports.append(extremes[0]) else: # Port range of the form '8000-8010' for port in range(extremes[0], extremes[1] + 1): possible_ports.append(port) except Exception: raise ImproperlyConfigured('Invalid address ("%s") for live ' 'server.' % specified_address) cls.server_thread = LiveServerThread( host, possible_ports, connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: raise cls.server_thread.error super(LiveServerTestCase, cls).setUpClass() @classmethod def tearDownClass(cls): # There may not be a 'server_thread' attribute if setUpClass() for some # reasons has raised an exception. if hasattr(cls, 'server_thread'): # Terminate the live server's thread cls.server_thread.join() # Restore sqlite connections' non-sharability for conn in connections.all(): if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite') and conn.settings_dict['NAME'] == ':memory:'): conn.allow_thread_sharing = False super(LiveServerTestCase, cls).tearDownClass()
mit
heeraj123/oh-mainline
mysite/search/migrations/0023_re_run_icon_scaling.py
17
3768
# This file is part of OpenHatch. # Copyright (C) 2009 OpenHatch, Inc. # Copyright (C) 2010 John Stumpo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from south.db import db from django.db import models from mysite.search.models import * class Migration: no_dry_run = True def forwards(self, orm): "Write your forwards migration here" def backwards(self, orm): "Write your backwards migration here" models = { 'search.bug': { 'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'date_reported': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.TextField', [], {}), 'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'last_polled': ('django.db.models.fields.DateTimeField', [], {}), 'last_touched': ('django.db.models.fields.DateTimeField', [], {}), 'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'search.project': { 'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}), 'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}), 'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}), 'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}), 'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) } } complete_apps = ['search']
agpl-3.0
beni55/django
django/contrib/gis/db/backends/base/features.py
93
3542
from functools import partial from django.contrib.gis.db.models import aggregates class BaseSpatialFeatures(object): gis_enabled = True # Does the database contain a SpatialRefSys model to store SRID information? has_spatialrefsys_table = True # Does the backend support the django.contrib.gis.utils.add_srs_entry() utility? supports_add_srs_entry = True # Does the backend introspect GeometryField to its subtypes? supports_geometry_field_introspection = True # Reference implementation of 3D functions is: # http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions supports_3d_functions = False # Does the database support SRID transform operations? supports_transform = True # Do geometric relationship operations operate on real shapes (or only on bounding boxes)? supports_real_shape_operations = True # Can geometry fields be null? supports_null_geometries = True # Can the `distance` GeoQuerySet method be applied on geodetic coordinate systems? supports_distance_geodetic = True # Is the database able to count vertices on polygons (with `num_points`)? supports_num_points_poly = True # The following properties indicate if the database backend support # certain lookups (dwithin, left and right, relate, ...) supports_distances_lookups = True supports_left_right_lookups = False @property def supports_bbcontains_lookup(self): return 'bbcontains' in self.connection.ops.gis_operators @property def supports_contained_lookup(self): return 'contained' in self.connection.ops.gis_operators @property def supports_crosses_lookup(self): return 'crosses' in self.connection.ops.gis_operators @property def supports_dwithin_lookup(self): return 'dwithin' in self.connection.ops.gis_operators @property def supports_relate_lookup(self): return 'relate' in self.connection.ops.gis_operators # For each of those methods, the class will have a property named # `has_<name>_method` (defined in __init__) which accesses connection.ops # to determine GIS method availability. geoqueryset_methods = ( 'area', 'centroid', 'difference', 'distance', 'distance_spheroid', 'envelope', 'force_rhr', 'geohash', 'gml', 'intersection', 'kml', 'length', 'num_geom', 'perimeter', 'point_on_surface', 'reverse', 'scale', 'snap_to_grid', 'svg', 'sym_difference', 'transform', 'translate', 'union', 'unionagg', ) # Specifies whether the Collect and Extent aggregates are supported by the database @property def supports_collect_aggr(self): return aggregates.Collect not in self.connection.ops.disallowed_aggregates @property def supports_extent_aggr(self): return aggregates.Extent not in self.connection.ops.disallowed_aggregates @property def supports_make_line_aggr(self): return aggregates.MakeLine not in self.connection.ops.disallowed_aggregates def __init__(self, *args): super(BaseSpatialFeatures, self).__init__(*args) for method in self.geoqueryset_methods: # Add dynamically properties for each GQS method, e.g. has_force_rhr_method, etc. setattr(self.__class__, 'has_%s_method' % method, property(partial(BaseSpatialFeatures.has_ops_method, method=method))) def has_ops_method(self, method): return getattr(self.connection.ops, method, False)
bsd-3-clause
NullSoldier/django
tests/app_loading/tests.py
288
3113
from __future__ import unicode_literals import os from django.apps import apps from django.test import SimpleTestCase from django.test.utils import extend_sys_path from django.utils import six from django.utils._os import upath class EggLoadingTest(SimpleTestCase): def setUp(self): self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__)) def tearDown(self): apps.clear_cache() def test_egg1(self): """Models module can be loaded from an app in an egg""" egg_name = '%s/modelapp.egg' % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=['app_with_models']): models_module = apps.get_app_config('app_with_models').models_module self.assertIsNotNone(models_module) del apps.all_models['app_with_models'] def test_egg2(self): """Loading an app from an egg that has no models returns no models (and no error)""" egg_name = '%s/nomodelapp.egg' % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=['app_no_models']): models_module = apps.get_app_config('app_no_models').models_module self.assertIsNone(models_module) del apps.all_models['app_no_models'] def test_egg3(self): """Models module can be loaded from an app located under an egg's top-level package""" egg_name = '%s/omelet.egg' % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=['omelet.app_with_models']): models_module = apps.get_app_config('app_with_models').models_module self.assertIsNotNone(models_module) del apps.all_models['app_with_models'] def test_egg4(self): """Loading an app with no models from under the top-level egg package generates no error""" egg_name = '%s/omelet.egg' % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=['omelet.app_no_models']): models_module = apps.get_app_config('app_no_models').models_module self.assertIsNone(models_module) del apps.all_models['app_no_models'] def test_egg5(self): """Loading an app from an egg that has an import error in its models module raises that error""" egg_name = '%s/brokenapp.egg' % self.egg_dir with extend_sys_path(egg_name): with six.assertRaisesRegex(self, ImportError, 'modelz'): with self.settings(INSTALLED_APPS=['broken_app']): pass class GetModelsTest(SimpleTestCase): def setUp(self): from .not_installed import models self.not_installed_module = models def test_get_model_only_returns_installed_models(self): with self.assertRaises(LookupError): apps.get_model("not_installed", "NotInstalledModel") def test_get_models_only_returns_installed_models(self): self.assertNotIn( "NotInstalledModel", [m.__name__ for m in apps.get_models()])
bsd-3-clause
dcosentino/edx-platform
lms/djangoapps/instructor/features/bulk_email.py
116
6813
""" Define steps for bulk email acceptance test. """ # pylint: disable=missing-docstring # pylint: disable=redefined-outer-name from lettuce import world, step from lettuce.django import mail from nose.tools import assert_in, assert_equal # pylint: disable=no-name-in-module from django.core.management import call_command from django.conf import settings from courseware.tests.factories import StaffFactory, InstructorFactory @step(u'Given there is a course with a staff, instructor and student') def make_populated_course(step): # pylint: disable=unused-argument ## This is different than the function defined in common.py because it enrolls ## a staff, instructor, and student member regardless of what `role` is, then ## logs `role` in. This is to ensure we have 3 class participants to email. # Clear existing courses to avoid conflicts world.clear_courses() # Create a new course course = world.CourseFactory.create( org='edx', number='888', display_name='Bulk Email Test Course' ) world.bulk_email_course_key = course.id try: # See if we've defined the instructor & staff user yet world.bulk_email_instructor except AttributeError: # Make & register an instructor for the course world.bulk_email_instructor = InstructorFactory(course_key=world.bulk_email_course_key) world.enroll_user(world.bulk_email_instructor, world.bulk_email_course_key) # Make & register a staff member world.bulk_email_staff = StaffFactory(course_key=course.id) world.enroll_user(world.bulk_email_staff, world.bulk_email_course_key) # Make & register a student world.register_by_course_key( course.id, username='student', password='test', is_staff=False ) # Store the expected recipients # given each "send to" option staff_emails = [world.bulk_email_staff.email, world.bulk_email_instructor.email] world.expected_addresses = { 'course staff': staff_emails, 'students, staff, and instructors': staff_emails + ['student@edx.org'] } # Dictionary mapping a description of the email recipient # to the corresponding <option> value in the UI. SEND_TO_OPTIONS = { 'myself': 'myself', 'course staff': 'staff', 'students, staff, and instructors': 'all' } @step(u'I am logged in to the course as "([^"]*)"') def log_into_the_course(step, role): # pylint: disable=unused-argument # Store the role assert_in(role, ['instructor', 'staff']) # Log in as the an instructor or staff for the course my_email = world.bulk_email_instructor.email if role == 'instructor': world.log_in( username=world.bulk_email_instructor.username, password='test', email=my_email, name=world.bulk_email_instructor.profile.name ) else: my_email = world.bulk_email_staff.email world.log_in( username=world.bulk_email_staff.username, password='test', email=my_email, name=world.bulk_email_staff.profile.name ) # Store the "myself" send to option world.expected_addresses['myself'] = [my_email] @step(u'I send email to "([^"]*)"') def when_i_send_an_email(step, recipient): # pylint: disable=unused-argument # Check that the recipient is valid assert_in( recipient, SEND_TO_OPTIONS, msg="Invalid recipient: {}".format(recipient) ) # Clear the queue of existing emails while not mail.queue.empty(): # pylint: disable=no-member mail.queue.get() # pylint: disable=no-member # Because we flush the database before each run, # we need to ensure that the email template fixture # is re-loaded into the database call_command('loaddata', 'course_email_template.json') # Go to the email section of the instructor dash url = '/courses/{}'.format(world.bulk_email_course_key) world.visit(url) world.css_click('a[href="{}/instructor"]'.format(url)) world.css_click('a[data-section="send_email"]') # Select the recipient world.select_option('send_to', SEND_TO_OPTIONS[recipient]) # Enter subject and message world.css_fill('input#id_subject', 'Hello') with world.browser.get_iframe('mce_0_ifr') as iframe: editor = iframe.find_by_id('tinymce')[0] editor.fill('test message') # Click send world.css_click('input[name="send"]', dismiss_alert=True) # Expect to see a message that the email was sent expected_msg = "Your email was successfully queued for sending." world.wait_for_visible('#request-response') assert_in( expected_msg, world.css_text('#request-response'), msg="Could not find email success message." ) UNSUBSCRIBE_MSG = 'To stop receiving email like this' @step(u'Email is sent to "([^"]*)"') def then_the_email_is_sent(step, recipient): # pylint: disable=unused-argument # Check that the recipient is valid assert_in( recipient, SEND_TO_OPTIONS, msg="Invalid recipient: {}".format(recipient) ) # Retrieve messages. Because we are using celery in "always eager" # mode, we expect all messages to be sent by this point. messages = [] while not mail.queue.empty(): # pylint: disable=no-member messages.append(mail.queue.get()) # pylint: disable=no-member # Check that we got the right number of messages assert_equal( len(messages), len(world.expected_addresses[recipient]), msg="Received {0} instead of {1} messages for {2}".format( len(messages), len(world.expected_addresses[recipient]), recipient ) ) # Check that the message properties were correct recipients = [] for msg in messages: assert_in('Hello', msg.subject) assert_in(settings.BULK_EMAIL_DEFAULT_FROM_EMAIL, msg.from_email) # Message body should have the message we sent # and an unsubscribe message assert_in('test message', msg.body) assert_in(UNSUBSCRIBE_MSG, msg.body) # Should have alternative HTML form assert_equal(len(msg.alternatives), 1) content, mime_type = msg.alternatives[0] assert_equal(mime_type, 'text/html') assert_in('test message', content) assert_in(UNSUBSCRIBE_MSG, content) # Store the recipient address so we can verify later recipients.extend(msg.recipients()) # Check that the messages were sent to the right people # Because "myself" can vary based on who sent the message, # we use the world.expected_addresses dict we configured # in an earlier step. for addr in world.expected_addresses[recipient]: assert_in(addr, recipients)
agpl-3.0
arenadata/ambari
ambari-server/src/main/resources/stacks/ADH/1.0/services/HBASE/package/scripts/hbase_thrift_server.py
2
1815
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management.libraries.functions import conf_select from resource_management.libraries.script import Script from hbase_thrift import hbase_thrift_server from hbase import hbase class HBaseThriftServer(Script): def configure(self, env): import params env.set_params(params) hbase(name='hbasethriftserver') def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) hbase_thrift_server('start') def stop(self, env, upgrade_type=None): import params env.set_params(params) hbase_thrift_server('stop') def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) def status(self, env): import status_params env.set_params(status_params) hbase_thrift_server('status') def security_status(self, env): check_process_status(status_params.spark_thrift_server_pid_file) self.put_structured_out({"securityState": "UNSECURED"}) if __name__ == "__main__": HBaseThriftServer().execute()
apache-2.0
archyufa/CloudFerry
condensation/group.py
9
3406
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. class Group(object): @classmethod def from_dict(cls, data, vm_dict): """ This classmethod creates tree of groups recursively """ groups_list = [] for group_name, group_data in data.items(): group = cls(group_name) groups_list.append(group) # We assume that our group_data is either "dict" or iterable if isinstance(group_data, dict): # if group_data is "dict" group.add_groups(cls.from_dict(group_data, vm_dict)) else: # if group_data is iterable for i in group_data: vm_obj = vm_dict.get(i) if vm_obj: group.add_vms({i: vm_obj}) return sorted(groups_list, key=lambda a: a.capacity, reverse=True) def __init__(self, name="null"): self.name = name self.children = [] self.parent = None self.vms = {} def add_groups(self, groups_list): """ This method adds children to self """ self.children.extend(groups_list) for group in groups_list: group.parent = self def add_vms(self, vms_dict): """ This method adds vms to self """ self.vms.update(vms_dict) def get_all_vms(self): """ This method gets vms from all children recursively """ result = self.vms.values() for child in self.children: result.extend(child.get_all_vms()) # make list distinct return list(set(result)) @property def capacity(self): """ This method calculates number of ram, cores required by all vms of this group """ flavors = [vm.flavor for vm in self.get_all_vms()] return sum(fl.ram for fl in flavors), sum(fl.core for fl in flavors) def parent_count(self, count=0): """ This method helps us to build pretty tree """ if self.parent: return self.parent.parent_count(count + 1) return count def __str__(self): """ Print tree of this group and underlying ones """ lines = [] status_line = ("GROUP -> %s" % self.name) status_line += ("\tRAM -> %d\t CORE -> %f" % self.capacity) lines.append(status_line) if self.vms: lines.append("GROUP_VMS:") lines.extend([" -" + vm for vm in self.vms.keys()]) prefix = "\t" * self.parent_count() info = "\n".join([prefix + line for line in lines]) children_info = "\n".join([str(i) for i in self.children]) if children_info: return info + "\n" + children_info else: return info
apache-2.0
maggienj/ActiveData
mo_kwargs/__init__.py
2
5140
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from collections import Mapping from mo_dots import zip as dict_zip, get_logger, wrap from mo_logs import Except def override(func): """ THIS DECORATOR WILL PUT ALL PARAMETERS INTO THE `kwargs` PARAMETER AND PUT ALL `kwargs` PARAMETERS INTO THE FUNCTION PARAMETERS. THIS HAS BOTH THE BENEFIT OF HAVING ALL PARAMETERS IN ONE PLACE (kwargs) AND ALL PARAMETERS ARE EXPLICIT FOR CLARITY. OF COURSE, THIS MEANS PARAMETER ASSIGNMENT MAY NOT BE UNIQUE: VALUES CAN COME FROM EXPLICIT CALL PARAMETERS, OR FROM THE kwargs PARAMETER. IN THESE CASES, PARAMETER VALUES ARE CHOSEN IN THE FOLLOWING ORDER: 1) EXPLICT CALL PARAMETERS 2) PARAMETERS FOUND IN kwargs 3) DEFAULT VALUES ASSIGNED IN FUNCTION DEFINITION """ params = func.func_code.co_varnames[:func.func_code.co_argcount] if not func.func_defaults: defaults = {} else: defaults = {k: v for k, v in zip(reversed(params), reversed(func.func_defaults))} if "kwargs" not in params: # WE ASSUME WE ARE ONLY ADDING A kwargs PARAMETER TO SOME REGULAR METHOD def w_settings(*args, **kwargs): settings = kwargs.get("kwargs") params = func.func_code.co_varnames[:func.func_code.co_argcount] if not func.func_defaults: defaults = {} else: defaults = {k: v for k, v in zip(reversed(params), reversed(func.func_defaults))} ordered_params = dict(zip(params, args)) return func(**params_pack(params, ordered_params, kwargs, settings, defaults)) return w_settings def wrapper(*args, **kwargs): try: if func.func_name in ("__init__", "__new__") and "kwargs" in kwargs: packed = params_pack(params, kwargs, dict_zip(params[1:], args[1:]), kwargs["kwargs"], defaults) return func(args[0], **packed) elif func.func_name in ("__init__", "__new__") and len(args) == 2 and len(kwargs) == 0 and isinstance(args[1], Mapping): # ASSUME SECOND UNNAMED PARAM IS kwargs packed = params_pack(params, args[1], defaults) return func(args[0], **packed) elif func.func_name in ("__init__", "__new__"): # DO NOT INCLUDE self IN kwargs packed = params_pack(params, kwargs, dict_zip(params[1:], args[1:]), defaults) return func(args[0], **packed) elif params[0] == "self" and "kwargs" in kwargs: packed = params_pack(params, kwargs, dict_zip(params[1:], args[1:]), kwargs["kwargs"], defaults) return func(args[0], **packed) elif params[0] == "self" and len(args) == 2 and len(kwargs) == 0 and isinstance(args[1], Mapping): # ASSUME SECOND UNNAMED PARAM IS kwargs packed = params_pack(params, args[1], defaults) return func(args[0], **packed) elif params[0] == "self": packed = params_pack(params, kwargs, dict_zip(params[1:], args[1:]), defaults) return func(args[0], **packed) elif len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], Mapping): # ASSUME SINGLE PARAMETER IS A SETTING packed = params_pack(params, args[0], defaults) return func(**packed) elif "kwargs" in kwargs and isinstance(kwargs["kwargs"], Mapping): # PUT args INTO kwargs packed = params_pack(params, kwargs, dict_zip(params, args), kwargs["kwargs"], defaults) return func(**packed) else: # PULL kwargs OUT INTO PARAMS packed = params_pack(params, kwargs, dict_zip(params, args), defaults) return func(**packed) except TypeError as e: e = Except.wrap(e) if e.message.startswith(func.func_name) and "takes at least" in e: missing = [p for p in params if str(p) not in packed] get_logger().error( "Problem calling {{func_name}}: Expecting parameter {{missing}}", func_name=func.func_name, missing=missing, stack_depth=1 ) get_logger().error("Error dispatching call", e) return wrapper def params_pack(params, *args): settings = {} for a in args: if a == None: continue for k, v in a.items(): k = unicode(k) if k in settings: continue settings[k] = v settings["kwargs"] = settings output = wrap({str(k): settings[k] for k in params if k in settings}) return output
mpl-2.0
aisipos/django
tests/forms_tests/field_tests/test_filepathfield.py
22
5261
from __future__ import unicode_literals import os.path from django.forms import FilePathField, ValidationError, forms from django.test import SimpleTestCase from django.utils import six from django.utils._os import upath def fix_os_paths(x): if isinstance(x, six.string_types): return x.replace('\\', '/') elif isinstance(x, tuple): return tuple(fix_os_paths(list(x))) elif isinstance(x, list): return [fix_os_paths(y) for y in x] else: return x class FilePathFieldTest(SimpleTestCase): def test_filepathfield_1(self): path = os.path.abspath(upath(forms.__file__)) path = os.path.dirname(path) + '/' self.assertTrue(fix_os_paths(path).endswith('/django/forms/')) def test_filepathfield_2(self): path = upath(forms.__file__) path = os.path.dirname(os.path.abspath(path)) + '/' f = FilePathField(path=path) f.choices = [p for p in f.choices if p[0].endswith('.py')] f.choices.sort() expected = [ ('/django/forms/__init__.py', '__init__.py'), ('/django/forms/boundfield.py', 'boundfield.py'), ('/django/forms/fields.py', 'fields.py'), ('/django/forms/forms.py', 'forms.py'), ('/django/forms/formsets.py', 'formsets.py'), ('/django/forms/models.py', 'models.py'), ('/django/forms/utils.py', 'utils.py'), ('/django/forms/widgets.py', 'widgets.py') ] for exp, got in zip(expected, fix_os_paths(f.choices)): self.assertEqual(exp[1], got[1]) self.assertTrue(got[0].endswith(exp[0])) msg = "'Select a valid choice. fields.py is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean('fields.py') self.assertTrue(fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')) def test_filepathfield_3(self): path = upath(forms.__file__) path = os.path.dirname(os.path.abspath(path)) + '/' f = FilePathField(path=path, match='^.*?\.py$') f.choices.sort() expected = [ ('/django/forms/__init__.py', '__init__.py'), ('/django/forms/boundfield.py', 'boundfield.py'), ('/django/forms/fields.py', 'fields.py'), ('/django/forms/forms.py', 'forms.py'), ('/django/forms/formsets.py', 'formsets.py'), ('/django/forms/models.py', 'models.py'), ('/django/forms/utils.py', 'utils.py'), ('/django/forms/widgets.py', 'widgets.py') ] for exp, got in zip(expected, fix_os_paths(f.choices)): self.assertEqual(exp[1], got[1]) self.assertTrue(got[0].endswith(exp[0])) def test_filepathfield_4(self): path = os.path.abspath(upath(forms.__file__)) path = os.path.dirname(path) + '/' f = FilePathField(path=path, recursive=True, match='^.*?\.py$') f.choices.sort() expected = [ ('/django/forms/__init__.py', '__init__.py'), ('/django/forms/boundfield.py', 'boundfield.py'), ('/django/forms/extras/__init__.py', 'extras/__init__.py'), ('/django/forms/extras/widgets.py', 'extras/widgets.py'), ('/django/forms/fields.py', 'fields.py'), ('/django/forms/forms.py', 'forms.py'), ('/django/forms/formsets.py', 'formsets.py'), ('/django/forms/models.py', 'models.py'), ('/django/forms/utils.py', 'utils.py'), ('/django/forms/widgets.py', 'widgets.py') ] for exp, got in zip(expected, fix_os_paths(f.choices)): self.assertEqual(exp[1], got[1]) self.assertTrue(got[0].endswith(exp[0])) def test_filepathfield_folders(self): path = os.path.abspath(os.path.join(upath(__file__), '..', '..')) + '/tests/filepath_test_files/' f = FilePathField(path=path, allow_folders=True, allow_files=False) f.choices.sort() expected = [ ('/forms_tests/tests/filepath_test_files/directory', 'directory'), ] actual = fix_os_paths(f.choices) self.assertEqual(len(expected), len(actual)) for exp, got in zip(expected, actual): self.assertEqual(exp[1], got[1]) self.assertTrue(got[0].endswith(exp[0])) f = FilePathField(path=path, allow_folders=True, allow_files=True) f.choices.sort() expected = [ ('/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'), ('/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'), ('/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'), ('/forms_tests/tests/filepath_test_files/directory', 'directory'), ('/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'), ('/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'), ] actual = fix_os_paths(f.choices) self.assertEqual(len(expected), len(actual)) for exp, got in zip(expected, actual): self.assertEqual(exp[1], got[1]) self.assertTrue(got[0].endswith(exp[0]))
bsd-3-clause
huchoi/edx-platform
common/lib/xmodule/xmodule/modulestore/__init__.py
2
25982
""" This module provides an abstraction for working with XModuleDescriptors that are stored in a database an accessible using their Location as an identifier """ import logging import re import json import datetime from uuid import uuid4 from collections import namedtuple, defaultdict import collections from contextlib import contextmanager from abc import ABCMeta, abstractmethod from xblock.plugin import default_select from .exceptions import InvalidLocationError, InsufficientSpecificationError from xmodule.errortracker import make_error_tracker from opaque_keys.edx.keys import CourseKey, UsageKey from opaque_keys.edx.locations import Location # For import backwards compatibility from opaque_keys import InvalidKeyError from opaque_keys.edx.locations import SlashSeparatedCourseKey from xblock.runtime import Mixologist from xblock.core import XBlock log = logging.getLogger('edx.modulestore') class ModuleStoreEnum(object): """ A class to encapsulate common constants that are used with the various modulestores. """ class Type(object): """ The various types of modulestores provided """ split = 'split' mongo = 'mongo' xml = 'xml' class RevisionOption(object): """ Revision constants to use for Module Store operations Note: These values are passed into store APIs and only used at run time """ # both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions draft_preferred = 'rev-opt-draft-preferred' # only DRAFT versions are queried and no PUBLISHED versions draft_only = 'rev-opt-draft-only' # # only PUBLISHED versions are queried and no DRAFT versions published_only = 'rev-opt-published-only' # all revisions are queried all = 'rev-opt-all' class Branch(object): """ Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps """ draft_preferred = 'draft-preferred' published_only = 'published-only' class BranchName(object): """ Branch constants to use for stores, such as Split, that have named branches """ draft = 'draft-branch' published = 'published-branch' class UserID(object): """ Values for user ID defaults """ # Note: we use negative values here to (try to) not collide # with user identifiers provided by actual user services. # user ID to use for all management commands mgmt_command = -1 # user ID to use for primitive commands primitive_command = -2 # user ID to use for tests that do not have a django user available test = -3 class PublishState(object): """ The publish state for a given xblock-- either 'draft', 'private', or 'public'. Currently in CMS, an xblock can only be in 'draft' or 'private' if it is at or below the Unit level. """ draft = 'draft' private = 'private' public = 'public' class ModuleStoreRead(object): """ An abstract interface for a database backend that stores XModuleDescriptor instances and extends read-only functionality """ __metaclass__ = ABCMeta @abstractmethod def has_item(self, usage_key): """ Returns True if usage_key exists in this ModuleStore. """ pass @abstractmethod def get_item(self, usage_key, depth=0): """ Returns an XModuleDescriptor instance for the item at location. If any segment of the location is None except revision, raises xmodule.modulestore.exceptions.InsufficientSpecificationError If no object is found at that location, raises xmodule.modulestore.exceptions.ItemNotFoundError usage_key: A :class:`.UsageKey` subclass instance depth (int): An argument that some module stores may use to prefetch descendents of the queried modules for more efficient results later in the request. The depth is counted in the number of calls to get_children() to cache. None indicates to cache all descendents """ pass @abstractmethod def get_course_errors(self, course_key): """ Return a list of (msg, exception-or-None) errors that the modulestore encountered when loading the course at course_id. Raises the same exceptions as get_item if the location isn't found or isn't fully specified. Args: course_key (:class:`.CourseKey`): The course to check for errors """ pass @abstractmethod def get_items(self, location, course_id=None, depth=0, qualifiers=None): """ Returns a list of XModuleDescriptor instances for the items that match location. Any element of location that is None is treated as a wildcard that matches any value location: Something that can be passed to Location depth: An argument that some module stores may use to prefetch descendents of the queried modules for more efficient results later in the request. The depth is counted in the number of calls to get_children() to cache. None indicates to cache all descendents """ pass def _block_matches(self, fields_or_xblock, qualifiers): ''' Return True or False depending on whether the field value (block contents) matches the qualifiers as per get_items. Note, only finds directly set not inherited nor default value matches. For substring matching pass a regex object. for arbitrary function comparison such as date time comparison, pass the function as in start=lambda x: x < datetime.datetime(2014, 1, 1, 0, tzinfo=pytz.UTC) Args: fields_or_xblock (dict or XBlock): either the json blob (from the db or get_explicitly_set_fields) or the xblock.fields() value or the XBlock from which to get those values qualifiers (dict): field: searchvalue pairs. ''' if isinstance(fields_or_xblock, XBlock): fields = fields_or_xblock.fields xblock = fields_or_xblock is_xblock = True else: fields = fields_or_xblock is_xblock = False def _is_set_on(key): """ Is this key set in fields? (return tuple of boolean and value). A helper which can handle fields either being the json doc or xblock fields. Is inner function to restrict use and to access local vars. """ if key not in fields: return False, None field = fields[key] if is_xblock: return field.is_set_on(fields_or_xblock), getattr(xblock, key) else: return True, field for key, criteria in qualifiers.iteritems(): is_set, value = _is_set_on(key) if not is_set: return False if not self._value_matches(value, criteria): return False return True def _value_matches(self, target, criteria): ''' helper for _block_matches: does the target (field value) match the criteria? If target is a list, do any of the list elements meet the criteria If the criteria is a regex, does the target match it? If the criteria is a function, does invoking it on the target yield something truthy? Otherwise, is the target == criteria ''' if isinstance(target, list): return any(self._value_matches(ele, criteria) for ele in target) elif isinstance(criteria, re._pattern_type): return criteria.search(target) is not None elif callable(criteria): return criteria(target) else: return criteria == target @abstractmethod def get_courses(self): ''' Returns a list containing the top level XModuleDescriptors of the courses in this modulestore. ''' pass @abstractmethod def get_course(self, course_id, depth=0): ''' Look for a specific course by its id (:class:`CourseKey`). Returns the course descriptor, or None if not found. ''' pass @abstractmethod def has_course(self, course_id, ignore_case=False): ''' Look for a specific course id. Returns whether it exists. Args: course_id (CourseKey): ignore_case (boolean): some modulestores are case-insensitive. Use this flag to search for whether a potentially conflicting course exists in that case. ''' pass @abstractmethod def get_parent_location(self, location, **kwargs): '''Find the location that is the parent of this location in this course. Needed for path_to_location(). ''' pass @abstractmethod def get_orphans(self, course_key): """ Get all of the xblocks in the given course which have no parents and are not of types which are usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't use children to point to their dependents. """ pass @abstractmethod def get_errored_courses(self): """ Return a dictionary of course_dir -> [(msg, exception_str)], for each course_dir where course loading failed. """ pass @abstractmethod def get_modulestore_type(self, course_id): """ Returns a type which identifies which modulestore is servicing the given course_id. The return can be either "xml" (for XML based courses) or "mongo" for MongoDB backed courses """ pass @abstractmethod def get_courses_for_wiki(self, wiki_slug): """ Return the list of courses which use this wiki_slug :param wiki_slug: the course wiki root slug :return: list of course keys """ pass @abstractmethod def compute_publish_state(self, xblock): """ Returns whether this xblock is draft, public, or private. Returns: PublishState.draft - content is in the process of being edited, but still has a previous version deployed to LMS PublishState.public - content is locked and deployed to LMS PublishState.private - content is editable and not deployed to LMS """ pass @abstractmethod def close_connections(self): """ Closes any open connections to the underlying databases """ pass class ModuleStoreWrite(ModuleStoreRead): """ An abstract interface for a database backend that stores XModuleDescriptor instances and extends both read and write functionality """ __metaclass__ = ABCMeta @abstractmethod def update_item(self, xblock, user_id, allow_not_found=False, force=False): """ Update the given xblock's persisted repr. Pass the user's unique id which the persistent store should save with the update if it has that ability. :param allow_not_found: whether this method should raise an exception if the given xblock has not been persisted before. :param force: fork the structure and don't update the course draftVersion if there's a version conflict (only applicable to version tracking and conflict detecting persistence stores) :raises VersionConflictError: if org, course, run, and version_guid given and the current version head != version_guid and force is not True. (only applicable to version tracking stores) """ pass @abstractmethod def delete_item(self, location, user_id, **kwargs): """ Delete an item and its subtree from persistence. Remove the item from any parents (Note, does not affect parents from other branches or logical branches; thus, in old mongo, deleting something whose parent cannot be draft, deletes it from both but deleting a component under a draft vertical only deletes it from the draft. Pass the user's unique id which the persistent store should save with the update if it has that ability. :param force: fork the structure and don't update the course draftVersion if there's a version conflict (only applicable to version tracking and conflict detecting persistence stores) :raises VersionConflictError: if org, course, run, and version_guid given and the current version head != version_guid and force is not True. (only applicable to version tracking stores) """ pass @abstractmethod def create_course(self, org, course, run, user_id, fields=None, **kwargs): """ Creates and returns the course. Args: org (str): the organization that owns the course course (str): the name of the course run (str): the name of the run user_id: id of the user creating the course fields (dict): Fields to set on the course at initialization kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation Returns: a CourseDescriptor """ pass @abstractmethod def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs): """ Creates and saves a new item in a course. Returns the newly created item. Args: user_id: ID of the user creating and saving the xmodule course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create this item in block_type: The type of block to create block_id: a unique identifier for the new item. If not supplied, a new identifier will be generated fields (dict): A dictionary specifying initial values for some or all fields in the newly created block """ pass @abstractmethod def clone_course(self, source_course_id, dest_course_id, user_id): """ Sets up source_course_id to point a course with the same content as the desct_course_id. This operation may be cheap or expensive. It may have to copy all assets and all xblock content or merely setup new pointers. Backward compatibility: this method used to require in some modulestores that dest_course_id pointed to an empty but already created course. Implementers should support this or should enable creating the course from scratch. Raises: ItemNotFoundError: if the source course doesn't exist (or any of its xblocks aren't found) DuplicateItemError: if the destination course already exists (with content in some cases) """ pass @abstractmethod def delete_course(self, course_key, user_id): """ Deletes the course. It may be a soft or hard delete. It may or may not remove the xblock definitions depending on the persistence layer and how tightly bound the xblocks are to the course. Args: course_key (CourseKey): which course to delete user_id: id of the user deleting the course """ pass @abstractmethod def _drop_database(self): """ A destructive operation to drop the underlying database and close all connections. Intended to be used by test code for cleanup. """ pass class ModuleStoreReadBase(ModuleStoreRead): ''' Implement interface functionality that can be shared. ''' # pylint: disable=W0613 def __init__( self, contentstore=None, doc_store_config=None, # ignore if passed up metadata_inheritance_cache_subsystem=None, request_cache=None, xblock_mixins=(), xblock_select=None, # temporary parms to enable backward compatibility. remove once all envs migrated db=None, collection=None, host=None, port=None, tz_aware=True, user=None, password=None, # allow lower level init args to pass harmlessly ** kwargs ): ''' Set up the error-tracking logic. ''' self._course_errors = defaultdict(make_error_tracker) # location -> ErrorLog self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem self.request_cache = request_cache self.xblock_mixins = xblock_mixins self.xblock_select = xblock_select self.contentstore = contentstore def get_course_errors(self, course_key): """ Return list of errors for this :class:`.CourseKey`, if any. Raise the same errors as get_item if course_key isn't present. """ # check that item is present and raise the promised exceptions if needed # TODO (vshnayder): post-launch, make errors properties of items # self.get_item(location) assert(isinstance(course_key, CourseKey)) return self._course_errors[course_key].errors def get_errored_courses(self): """ Returns an empty dict. It is up to subclasses to extend this method if the concept of errored courses makes sense for their implementation. """ return {} def get_course(self, course_id, depth=0): """ See ModuleStoreRead.get_course Default impl--linear search through course list """ assert(isinstance(course_id, CourseKey)) for course in self.get_courses(): if course.id == course_id: return course return None def has_course(self, course_id, ignore_case=False): """ Returns the course_id of the course if it was found, else None Args: course_id (CourseKey): ignore_case (boolean): some modulestores are case-insensitive. Use this flag to search for whether a potentially conflicting course exists in that case. """ # linear search through list assert(isinstance(course_id, CourseKey)) if ignore_case: return next( ( c.id for c in self.get_courses() if c.id.org.lower() == course_id.org.lower() and c.id.course.lower() == course_id.course.lower() and c.id.run.lower() == course_id.run.lower() ), None ) else: return next( (c.id for c in self.get_courses() if c.id == course_id), None ) def compute_publish_state(self, xblock): """ Returns PublishState.public since this is a read-only store. """ return PublishState.public def heartbeat(self): """ Is this modulestore ready? """ # default is to say yes by not raising an exception return {'default_impl': True} def close_connections(self): """ Closes any open connections to the underlying databases """ if self.contentstore: self.contentstore.close_connections() super(ModuleStoreReadBase, self).close_connections() @contextmanager def default_store(self, store_type): """ A context manager for temporarily changing the default store """ if self.get_modulestore_type(None) != store_type: raise ValueError(u"Cannot set default store to type {}".format(store_type)) yield class ModuleStoreWriteBase(ModuleStoreReadBase, ModuleStoreWrite): ''' Implement interface functionality that can be shared. ''' def __init__(self, contentstore, **kwargs): super(ModuleStoreWriteBase, self).__init__(contentstore=contentstore, **kwargs) # TODO: Don't have a runtime just to generate the appropriate mixin classes (cpennington) # This is only used by partition_fields_by_scope, which is only needed because # the split mongo store is used for item creation as well as item persistence self.mixologist = Mixologist(self.xblock_mixins) def partition_fields_by_scope(self, category, fields): """ Return dictionary of {scope: {field1: val, ..}..} for the fields of this potential xblock :param category: the xblock category :param fields: the dictionary of {fieldname: value} """ if fields is None: return {} cls = self.mixologist.mix(XBlock.load_class(category, select=prefer_xmodules)) result = collections.defaultdict(dict) for field_name, value in fields.iteritems(): field = getattr(cls, field_name) result[field.scope][field_name] = value return result def clone_course(self, source_course_id, dest_course_id, user_id): """ This base method just copies the assets. The lower level impls must do the actual cloning of content. """ # copy the assets if self.contentstore: self.contentstore.copy_all_course_assets(source_course_id, dest_course_id) super(ModuleStoreWriteBase, self).clone_course(source_course_id, dest_course_id, user_id) return dest_course_id def delete_course(self, course_key, user_id): """ This base method just deletes the assets. The lower level impls must do the actual deleting of content. """ # delete the assets if self.contentstore: self.contentstore.delete_all_course_assets(course_key) super(ModuleStoreWriteBase, self).delete_course(course_key, user_id) def _drop_database(self): """ A destructive operation to drop the underlying database and close all connections. Intended to be used by test code for cleanup. """ if self.contentstore: self.contentstore._drop_database() # pylint: disable=protected-access super(ModuleStoreWriteBase, self)._drop_database() # pylint: disable=protected-access def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs): """ Creates and saves a new xblock that as a child of the specified block Returns the newly created item. Args: user_id: ID of the user creating and saving the xmodule parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the block that this item should be parented under block_type: The type of block to create block_id: a unique identifier for the new item. If not supplied, a new identifier will be generated fields (dict): A dictionary specifying initial values for some or all fields in the newly created block """ item = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, **kwargs) parent = self.get_item(parent_usage_key) parent.children.append(item.location) self.update_item(parent, user_id) @contextmanager def bulk_write_operations(self, course_id): """ A context manager for notifying the store of bulk write events. In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree until the bulk operation is completed. """ # TODO # Make this multi-process-safe if future operations need it. # Right now, only Import Course, Clone Course, and Delete Course use this, so # it's ok if the cached metadata in the memcache is invalid when another # request comes in for the same course. try: if hasattr(self, '_begin_bulk_write_operation'): self._begin_bulk_write_operation(course_id) yield finally: # check for the begin method here, # since it's an error if an end method is not defined when a begin method is if hasattr(self, '_begin_bulk_write_operation'): self._end_bulk_write_operation(course_id) def only_xmodules(identifier, entry_points): """Only use entry_points that are supplied by the xmodule package""" from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule'] return default_select(identifier, from_xmodule) def prefer_xmodules(identifier, entry_points): """Prefer entry_points from the xmodule package""" from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule'] if from_xmodule: return default_select(identifier, from_xmodule) else: return default_select(identifier, entry_points) class EdxJSONEncoder(json.JSONEncoder): """ Custom JSONEncoder that handles `Location` and `datetime.datetime` objects. `Location`s are encoded as their url string form, and `datetime`s as ISO date strings """ def default(self, obj): if isinstance(obj, Location): return obj.to_deprecated_string() elif isinstance(obj, datetime.datetime): if obj.tzinfo is not None: if obj.utcoffset() is None: return obj.isoformat() + 'Z' else: return obj.isoformat() else: return obj.isoformat() else: return super(EdxJSONEncoder, self).default(obj)
agpl-3.0
cloudbau/cinder
cinder/tests/api/contrib/test_services.py
5
11428
# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api.contrib import services from cinder import context from cinder import db from cinder import exception from cinder.openstack.common import timeutils from cinder import policy from cinder import test from cinder.tests.api import fakes from datetime import datetime fake_services_list = [{'binary': 'cinder-scheduler', 'host': 'host1', 'availability_zone': 'cinder', 'id': 1, 'disabled': True, 'updated_at': datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'cinder-volume', 'host': 'host1', 'availability_zone': 'cinder', 'id': 2, 'disabled': True, 'updated_at': datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'availability_zone': 'cinder', 'id': 3, 'disabled': False, 'updated_at': datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, {'binary': 'cinder-volume', 'host': 'host2', 'availability_zone': 'cinder', 'id': 4, 'disabled': True, 'updated_at': datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, ] class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} GET = {} # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compability sake. class FakeRequestWithService(object): environ = {"cinder.context": context.get_admin_context()} GET = {"service": "cinder-volume"} class FakeRequestWithBinary(object): environ = {"cinder.context": context.get_admin_context()} GET = {"binary": "cinder-volume"} class FakeRequestWithHost(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1"} # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compability sake. class FakeRequestWithHostService(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "service": "cinder-volume"} class FakeRequestWithHostBinary(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "binary": "cinder-volume"} def fake_service_get_all(context): return fake_services_list def fake_service_get_by_host_binary(context, host, binary): for service in fake_services_list: if service['host'] == host and service['binary'] == binary: return service return None def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': 'cinder-volume', 'disabled': values['disabled']} def fake_policy_enforce(context, action, target): pass def fake_utcnow(): return datetime(2012, 10, 29, 13, 42, 11) class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() self.stubs.Set(db, "service_get_all", fake_service_get_all) self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(db, "service_get_by_args", fake_service_get_by_host_binary) self.stubs.Set(db, "service_update", fake_service_update) self.stubs.Set(policy, "enforce", fake_policy_enforce) self.context = context.get_admin_context() self.controller = services.ServiceController() def tearDown(self): super(ServicesTest, self).tearDown() def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(res_dict, response) def test_services_list_with_host(self): req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(res_dict, response) def test_services_list_with_service(self): req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(res_dict, response) def test_services_list_with_binary(self): req = FakeRequestWithBinary() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(res_dict, response) def test_services_list_with_host_service(self): req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(res_dict, response) def test_services_list_with_host_binary(self): req = FakeRequestWithHostBinary() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(res_dict, response) def test_services_enable_with_service_key(self): body = {'host': 'host1', 'service': 'cinder-volume'} req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') res_dict = self.controller.update(req, "enable", body) self.assertEqual(res_dict['status'], 'enabled') def test_services_enable_with_binary_key(self): body = {'host': 'host1', 'binary': 'cinder-volume'} req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') res_dict = self.controller.update(req, "enable", body) self.assertEqual(res_dict['status'], 'enabled') def test_services_disable_with_service_key(self): req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') body = {'host': 'host1', 'service': 'cinder-volume'} res_dict = self.controller.update(req, "disable", body) self.assertEqual(res_dict['status'], 'disabled') def test_services_disable_with_binary_key(self): req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') body = {'host': 'host1', 'binary': 'cinder-volume'} res_dict = self.controller.update(req, "disable", body) self.assertEqual(res_dict['status'], 'disabled')
apache-2.0
ToonTownInfiniteRepo/ToontownInfinite
Panda3D-1.9.0/python/Lib/idlelib/ToolTip.py
149
2736
# general purpose 'tooltip' routines - currently unused in idlefork # (although the 'calltips' extension is partly based on this code) # may be useful for some purposes in (or almost in ;) the current project scope # Ideas gleaned from PySol from Tkinter import * class ToolTipBase: def __init__(self, button): self.button = button self.tipwindow = None self.id = None self.x = self.y = 0 self._id1 = self.button.bind("<Enter>", self.enter) self._id2 = self.button.bind("<Leave>", self.leave) self._id3 = self.button.bind("<ButtonPress>", self.leave) def enter(self, event=None): self.schedule() def leave(self, event=None): self.unschedule() self.hidetip() def schedule(self): self.unschedule() self.id = self.button.after(1500, self.showtip) def unschedule(self): id = self.id self.id = None if id: self.button.after_cancel(id) def showtip(self): if self.tipwindow: return # The tip window must be completely outside the button; # otherwise when the mouse enters the tip window we get # a leave event and it disappears, and then we get an enter # event and it reappears, and so on forever :-( x = self.button.winfo_rootx() + 20 y = self.button.winfo_rooty() + self.button.winfo_height() + 1 self.tipwindow = tw = Toplevel(self.button) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) self.showcontents() def showcontents(self, text="Your text here"): # Override this in derived class label = Label(self.tipwindow, text=text, justify=LEFT, background="#ffffe0", relief=SOLID, borderwidth=1) label.pack() def hidetip(self): tw = self.tipwindow self.tipwindow = None if tw: tw.destroy() class ToolTip(ToolTipBase): def __init__(self, button, text): ToolTipBase.__init__(self, button) self.text = text def showcontents(self): ToolTipBase.showcontents(self, self.text) class ListboxToolTip(ToolTipBase): def __init__(self, button, items): ToolTipBase.__init__(self, button) self.items = items def showcontents(self): listbox = Listbox(self.tipwindow, background="#ffffe0") listbox.pack() for item in self.items: listbox.insert(END, item) def main(): # Test code root = Tk() b = Button(root, text="Hello", command=root.destroy) b.pack() root.update() tip = ListboxToolTip(b, ["Hello", "world"]) root.mainloop() if __name__ == '__main__': main()
mit
finfish/scrapy
scrapy/extensions/telnet.py
3
4136
""" Scrapy Telnet Console extension See documentation in docs/topics/telnetconsole.rst """ import pprint import logging import traceback import binascii import os from twisted.internet import protocol try: from twisted.conch import manhole, telnet from twisted.conch.insults import insults TWISTED_CONCH_AVAILABLE = True except (ImportError, SyntaxError): _TWISTED_CONCH_TRACEBACK = traceback.format_exc() TWISTED_CONCH_AVAILABLE = False from scrapy.exceptions import NotConfigured from scrapy import signals from scrapy.utils.trackref import print_live_refs from scrapy.utils.engine import print_engine_status from scrapy.utils.reactor import listen_tcp from scrapy.utils.decorators import defers try: import guppy hpy = guppy.hpy() except ImportError: hpy = None logger = logging.getLogger(__name__) # signal to update telnet variables # args: telnet_vars update_telnet_vars = object() class TelnetConsole(protocol.ServerFactory): def __init__(self, crawler): if not crawler.settings.getbool('TELNETCONSOLE_ENABLED'): raise NotConfigured if not TWISTED_CONCH_AVAILABLE: raise NotConfigured( 'TELNETCONSOLE_ENABLED setting is True but required twisted ' 'modules failed to import:\n' + _TWISTED_CONCH_TRACEBACK) self.crawler = crawler self.noisy = False self.portrange = [int(x) for x in crawler.settings.getlist('TELNETCONSOLE_PORT')] self.host = crawler.settings['TELNETCONSOLE_HOST'] self.username = crawler.settings['TELNETCONSOLE_USERNAME'] self.password = crawler.settings['TELNETCONSOLE_PASSWORD'] if not self.password: self.password = binascii.hexlify(os.urandom(8)).decode('utf8') logger.info('Telnet Password: %s', self.password) self.crawler.signals.connect(self.start_listening, signals.engine_started) self.crawler.signals.connect(self.stop_listening, signals.engine_stopped) @classmethod def from_crawler(cls, crawler): return cls(crawler) def start_listening(self): self.port = listen_tcp(self.portrange, self.host, self) h = self.port.getHost() logger.info("Telnet console listening on %(host)s:%(port)d", {'host': h.host, 'port': h.port}, extra={'crawler': self.crawler}) def stop_listening(self): self.port.stopListening() def protocol(self): class Portal: """An implementation of IPortal""" @defers def login(self_, credentials, mind, *interfaces): if not (credentials.username == self.username.encode('utf8') and credentials.checkPassword(self.password.encode('utf8'))): raise ValueError("Invalid credentials") protocol = telnet.TelnetBootstrapProtocol( insults.ServerProtocol, manhole.Manhole, self._get_telnet_vars() ) return (interfaces[0], protocol, lambda: None) return telnet.TelnetTransport( telnet.AuthenticatingTelnetProtocol, Portal() ) def _get_telnet_vars(self): # Note: if you add entries here also update topics/telnetconsole.rst telnet_vars = { 'engine': self.crawler.engine, 'spider': self.crawler.engine.spider, 'slot': self.crawler.engine.slot, 'crawler': self.crawler, 'extensions': self.crawler.extensions, 'stats': self.crawler.stats, 'settings': self.crawler.settings, 'est': lambda: print_engine_status(self.crawler.engine), 'p': pprint.pprint, 'prefs': print_live_refs, 'hpy': hpy, 'help': "This is Scrapy telnet console. For more info see: " "https://docs.scrapy.org/en/latest/topics/telnetconsole.html", } self.crawler.signals.send_catch_log(update_telnet_vars, telnet_vars=telnet_vars) return telnet_vars
bsd-3-clause
jcftang/ansible
lib/ansible/modules/packaging/os/opkg.py
37
5435
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com> # Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: opkg author: "Patrick Pelletier (@skinp)" short_description: Package manager for OpenWrt description: - Manages OpenWrt packages version_added: "1.1" options: name: description: - name of package to install/remove required: true state: description: - state of the package choices: [ 'present', 'absent' ] required: false default: present force: description: - opkg --force parameter used choices: ["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"] required: false default: absent version_added: "2.0" update_cache: description: - update the package db first required: false default: "no" choices: [ "yes", "no" ] notes: [] ''' EXAMPLES = ''' - opkg: name: foo state: present - opkg: name: foo state: present update_cache: yes - opkg: name: foo state: absent - opkg: name: foo,bar state: absent - opkg: name: foo state: present force: overwrite ''' import pipes def update_package_db(module, opkg_path): """ Updates packages list. """ rc, out, err = module.run_command("%s update" % opkg_path) if rc != 0: module.fail_json(msg="could not update package db") def query_package(module, opkg_path, name, state="present"): """ Returns whether a package is installed or not. """ if state == "present": rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: return True return False def remove_packages(module, opkg_path, packages): """ Uninstalls one or more packages if installed. """ p = module.params force = p["force"] if force: force = "--force-%s" % force remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove if not query_package(module, opkg_path, package): continue rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package)) if query_package(module, opkg_path, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) remove_c += 1 if remove_c > 0: module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, opkg_path, packages): """ Installs one or more packages if not already installed. """ p = module.params force = p["force"] if force: force = "--force-%s" % force install_c = 0 for package in packages: if query_package(module, opkg_path, package): continue rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package)) if not query_package(module, opkg_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) install_c += 1 if install_c > 0: module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) module.exit_json(changed=False, msg="package(s) already present") def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=["pkg"], required=True), state = dict(default="present", choices=["present", "installed", "absent", "removed"]), force = dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), update_cache = dict(default="no", aliases=["update-cache"], type='bool') ) ) opkg_path = module.get_bin_path('opkg', True, ['/bin']) p = module.params if p["update_cache"]: update_package_db(module, opkg_path) pkgs = p["name"].split(",") if p["state"] in ["present", "installed"]: install_packages(module, opkg_path, pkgs) elif p["state"] in ["absent", "removed"]: remove_packages(module, opkg_path, pkgs) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
glenn-edgar/local_controller_3
__backup__/python_2_ref/moisture_monitoring.py
1
11106
# external control import datetime import time import string #import urllib2 import math import redis import base64 import json import py_cf.cf_interpreter import os import copy import rabbit_cloud_status_publish import io_control class Moisture_Control(object): def __init__(self, redis_handle , graph_management,io_control_class, status_queue_classes, moisture_app_classes, moisture_remote_classes, remote_classes): self.redis_handle = redis_handle self.gm = graph_management self.status_queue_class = status_queue_classes self.moisture_app_classes = moisture_app_classes self.moisture_remote_classes = moisture_remote_classes self.remote_classes = remote_classes self.io_control_class = io_control_class self.unpack_graph() self.clear_update_flag() def unpack_graph( self ): self.update_flag = self.gm.match_relationship("MOISTURE_MANUAL_UPDATE_FLAG" )[0] self.web_moisture_trigger_key = self.update_flag["name"] #print self.web_moisture_trigger_key self.store_data_list = {} self.store_air_list = {} self.rollover_list = {} for i in moisture_app_classes: self.store_data_list[i["name"] ] = self.gm.get_data(self.gm.qc.match_label_property("MOISTURE_DATA","name", "moisture_1" )[0]) self.store_air_list[i["name"]] = self.gm.get_data(self.gm.qc.match_label_property("MOISTURE_AIR_TEMP_LIST","name", "moisture_1" )[0]) self.rollover_list[i["name"]] = self.gm.get_data(self.gm.qc.match_label_property("MOISTURE_ROLLOVER","name","moisture_1")[0]) def clear_update_flag( self ): list = self.update_flag["name"] self.redis_handle.delete(list) def find_driver( self, port ): for i in self.moisture_remote_classes: if i["modbus_address"] == port: return i raise ValueError("Cannot find device at specified port") def update_moisture_readings( self,chainFlowHandle, chainOjb, parameters, event ): list_data = [] for value in self.moisture_app_classes: modbus_address = value["slave_controller_address"] driver = self.find_driver( modbus_address ) self.update_a_reading( value, driver, list_data ) def update_a_reading(self, value, driver, list_data ): properties = copy.deepcopy( value) modbus_address = driver["modbus_address"] measurement_properties = self.make_measurements( int(modbus_address), driver,list_data ) if measurement_properties["measurement_status"] == 0: return properties["measurements"] = measurement_properties properties["namespace"] = self.gm.convert_namespace(properties["namespace"]) name = properties["name"] #print measurement_properties.keys() redis_key = self.store_data_list[name]["queue_name"] redis_length = self.store_data_list[name]["list_length"] self.redis_handle.lpush(redis_key,json.dumps(properties)) self.redis_handle.ltrim(redis_key,0,redis_length) self.status_queue_class.queue_message("moisture_measurement", properties ) def make_measurements( self, modbus_address, io_wrapper , list_data): type = io_wrapper["type"] driver_class = remote_classes.find_class(type) measure_properties = {} time_stamp = time.strftime( "%b %d %Y %H:%M:%S",time.localtime(time.time())) measure_properties["time_stamp"] = time_stamp try: item = {} driver_class.make_soil_temperature( modbus_address ) time.sleep(1.0) driver_class.make_air_temp_humidity( modbus_address ) time.sleep(1.0) temp = driver_class.check_status( modbus_address ) time.sleep(1.0) driver_class.force_moisture_reading(modbus_address) time.sleep(1.5) temp = driver_class.read_moisture_control( modbus_address ) item["humidity"] = temp["AIR_HUMIDITY_FLOAT"] item["temperature"] =temp["AIR_TEMP_FLOAT"] list_data.append(item) measure_properties["air_humidity"] = temp["AIR_HUMIDITY_FLOAT"] measure_properties["soil_temperature"] = temp["MOISTURE_SOIL_TEMP_FLOAT"] measure_properties["air_temperature"] = temp["AIR_TEMP_FLOAT"] measure_properties["sensor_configuration"] = driver_class.read_moisture_configuration( modbus_address ) measure_properties["sensor_data"] = driver_class.read_moisture_data( modbus_address ) measure_properties["resistive_data"] = driver_class.read_moisture_resistive_data( modbus_address ) measure_properties["read_status"] = "Communication was successful at "+time_stamp measure_properties["measurement_status"] = 1 except: #raise print ("exception handler") measure_properties["read_status"] = "Communications problems with moisture plc at "+time_stamp measure_properties["measurement_status"] = 0 return measure_properties def check_update_flag( self,chainFlowHandle, chainOjb, parameters, event ): if self.redis_handle.llen( self.web_moisture_trigger_key ) > 0: key = self.redis_handle.rpop(self.web_moisture_trigger_key) if key != None: self.update_moisture_readings( None, None, None, None ) return "DISABLE" def hour_update( self,chainFlowHandle, chainOjb, parameters, event ): #print "hour tick" for i in self.moisture_app_classes: name = i["name"] redis_key = self.store_data_list[name]["queue_name"] data_json = redis_handle.lindex( redis_key, 0) data = json.loads(data_json) temp = {"air_temperature": data["measurements"]["air_temperature"],"air_humidity": data["measurements"]["air_humidity"]} redis_key = self.store_air_list[name]["queue_name"] redis_length = self.store_air_list[name]["list_length"] self.redis_handle.lpush(redis_key,json.dumps(temp)) self.redis_handle.ltrim(redis_key,0,redis_length) return "DISABLE" def day_update( self,chainFlowHandle, chainOjb, parameters, event ): for i in self.moisture_app_classes: name = i["name"] hour_redis_key = self.store_air_list[name]["queue_name"] #print "hour_redis_key",hour_redis_key #print "hour_redis_key",self.redis_handle.llen(hour_redis_key) rollover_redis_key = self.rollover_list[name]["queue_name"] #print "rollover",self.rollover_list[name]["name"] #print "--->", self.redis_handle.llen(rollover_redis_key) if self.redis_handle.llen(rollover_redis_key) > 0: #print "---" self.redis_handle.delete(rollover_redis_key) #print "++++",self.redis_handle.llen(hour_redis_key) self.redis_handle.rename( hour_redis_key , rollover_redis_key) #print "len",self.redis_handle.llen(rollover_redis_key) return "DISABLE" if __name__ == "__main__": import time import construct_graph import io_control import io_control.new_instrument import io_control.io_controller_class import io_control.construct_classes graph_management = construct_graph.Graph_Management("PI_1","main_remote","LaCima_DataStore") moisture_stations = graph_management.find_remotes_by_function("moisture") data_store_nodes = graph_management.find_data_stores() io_server_nodes = graph_management.find_io_servers() # find ip and port for redis data store data_server_ip = data_store_nodes[0]["ip"] data_server_port = data_store_nodes[0]["port"] io_server_ip = io_server_nodes[0]["ip"] io_server_port = io_server_nodes[0]["port"] # find ip and port for ip server instrument = io_control.new_instrument.Modbus_Instrument() instrument.set_ip(ip= io_server_ip, port = int(io_server_port)) io_control_class = io_control.io_controller_class.Build_Controller_Classes(instrument) remote_classes = io_control.construct_classes.Construct_Access_Classes(instrument) moisture_app_classes = graph_management.match_relationship("MOISTURE_CTR") moisture_remote_classes = graph_management.find_remotes_by_function("moisture") if len(moisture_app_classes) > len(moisture_remote_classes): raise ValueError("Imbalance in setup graph") redis_handle = redis.StrictRedis( host = data_server_ip, port=data_server_port, db = 12 ) status_stores = graph_management.match_relationship("CLOUD_STATUS_STORE") queue_name = status_stores[0]["queue_name"] status_queue_class = rabbit_cloud_status_publish.Status_Queue(redis_handle, queue_name ) moisture_class = Moisture_Control( redis_handle , graph_management, io_control_class, status_queue_class, moisture_app_classes, moisture_remote_classes , remote_classes ) moisture_class.update_moisture_readings(None,None,None, None ) #populate data # # Adding chains # cf = py_cf.cf_interpreter.CF_Interpreter() #cf.define_chain("test",True) #cf.insert_link( "link_1", "SendEvent", [ "HOUR_TICK",1 ] ) #cf.insert_link( "link_2", "WaitEvent", ["TIME_TICK"]) #cf.insert_link( "link_3", "SendEvent", [ "DAY_TICK", 1] ) cf.define_chain("update_moisture_readings",True) cf.insert_link( "link_1", "WaitEventCount", [ "MINUTE_TICK",15,0 ] ) cf.insert_link( "link_2", "One_Step", [ moisture_class.update_moisture_readings ] ) cf.insert_link( "link_3", "Reset", [] ) cf.define_chain("check_for_moisture_update",True) cf.insert_link( "link_1", "WaitEvent", [ "TIME_TICK" ] ) cf.insert_link( "link_2", "One_Step", [ moisture_class.check_update_flag ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("update_hour_readings",True) cf.insert_link( "link_1", "WaitEvent", [ "HOUR_TICK" ] ) cf.insert_link( "link_2", "One_Step", [ moisture_class.hour_update ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("update_day_readings",True) cf.insert_link( "link_1", "WaitEvent", [ "DAY_TICK" ] ) cf.insert_link( "link_2", "One_Step", [ moisture_class.day_update ] ) cf.insert_link( "link_4", "Reset", [] ) # cf.define_chain("watch_dog_thread",True) # cf.insert_link( "link_1","WaitTod",["*","*","*",30 ]) # cf.insert_link( "link_2","One_Step",[ wc.pat_wd ]) # cf.insert_link( "link_3","WaitTod",["*","*","*",55 ]) # cf.insert_link( "link_4","Reset",[]) # # Executing chains # cf_environ = py_cf.cf_interpreter.Execute_Cf_Environment( cf ) cf_environ.execute()
mit
pbrod/scipy
scipy/io/netcdf.py
15
37687
""" NetCDF reader/writer module. This module is used to read and create NetCDF files. NetCDF files are accessed through the `netcdf_file` object. Data written to and from NetCDF files are contained in `netcdf_variable` objects. Attributes are given as member variables of the `netcdf_file` and `netcdf_variable` objects. This module implements the Scientific.IO.NetCDF API to read and create NetCDF files. The same API is also used in the PyNIO and pynetcdf modules, allowing these modules to be used interchangeably when working with NetCDF files. Only NetCDF3 is supported here; for NetCDF4 see `netCDF4-python <http://unidata.github.io/netcdf4-python/>`__, which has a similar API. """ from __future__ import division, print_function, absolute_import # TODO: # * properly implement ``_FillValue``. # * fix character variables. # * implement PAGESIZE for Python 2.6? # The Scientific.IO.NetCDF API allows attributes to be added directly to # instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate # between user-set attributes and instance attributes, user-set attributes # are automatically stored in the ``_attributes`` attribute by overloading #``__setattr__``. This is the reason why the code sometimes uses #``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; # otherwise the key would be inserted into userspace attributes. __all__ = ['netcdf_file'] import warnings import weakref from operator import mul from collections import OrderedDict import mmap as mm import numpy as np from numpy.compat import asbytes, asstr from numpy import fromstring, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN from functools import reduce from scipy._lib.six import integer_types, text_type, binary_type ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' ZERO = b'\x00\x00\x00\x00' NC_BYTE = b'\x00\x00\x00\x01' NC_CHAR = b'\x00\x00\x00\x02' NC_SHORT = b'\x00\x00\x00\x03' NC_INT = b'\x00\x00\x00\x04' NC_FLOAT = b'\x00\x00\x00\x05' NC_DOUBLE = b'\x00\x00\x00\x06' NC_DIMENSION = b'\x00\x00\x00\n' NC_VARIABLE = b'\x00\x00\x00\x0b' NC_ATTRIBUTE = b'\x00\x00\x00\x0c' TYPEMAP = {NC_BYTE: ('b', 1), NC_CHAR: ('c', 1), NC_SHORT: ('h', 2), NC_INT: ('i', 4), NC_FLOAT: ('f', 4), NC_DOUBLE: ('d', 8)} REVERSE = {('b', 1): NC_BYTE, ('B', 1): NC_CHAR, ('c', 1): NC_CHAR, ('h', 2): NC_SHORT, ('i', 4): NC_INT, ('f', 4): NC_FLOAT, ('d', 8): NC_DOUBLE, # these come from asarray(1).dtype.char and asarray('foo').dtype.char, # used when getting the types from generic attributes. ('l', 4): NC_INT, ('S', 1): NC_CHAR} class netcdf_file(object): """ A file object for NetCDF data. A `netcdf_file` object has two standard attributes: `dimensions` and `variables`. The values of both are dictionaries, mapping dimension names to their associated lengths and variable names to variables, respectively. Application programs should never modify these dictionaries. All other attributes correspond to global attributes defined in the NetCDF file. Global file attributes are created by assigning to an attribute of the `netcdf_file` object. Parameters ---------- filename : string or file-like string -> filename mode : {'r', 'w', 'a'}, optional read-write-append mode, default is 'r' mmap : None or bool, optional Whether to mmap `filename` when reading. Default is True when `filename` is a file name, False when `filename` is a file-like object. Note that when mmap is in use, data arrays returned refer directly to the mmapped data on disk, and the file cannot be closed as long as references to it exist. version : {1, 2}, optional version of netcdf to read / write, where 1 means *Classic format* and 2 means *64-bit offset format*. Default is 1. See `here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`__ for more info. maskandscale : bool, optional Whether to automatically scale and/or mask data based on attributes. Default is False. Notes ----- The major advantage of this module over other modules is that it doesn't require the code to be linked to the NetCDF libraries. This module is derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_. NetCDF files are a self-describing binary data format. The file contains metadata that describes the dimensions and variables in the file. More details about NetCDF files can be found `here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`__. There are three main sections to a NetCDF data structure: 1. Dimensions 2. Variables 3. Attributes The dimensions section records the name and length of each dimension used by the variables. The variables would then indicate which dimensions it uses and any attributes such as data units, along with containing the data values for the variable. It is good practice to include a variable that is the same name as a dimension to provide the values for that axes. Lastly, the attributes section would contain additional information such as the name of the file creator or the instrument used to collect the data. When writing data to a NetCDF file, there is often the need to indicate the 'record dimension'. A record dimension is the unbounded dimension for a variable. For example, a temperature variable may have dimensions of latitude, longitude and time. If one wants to add more temperature data to the NetCDF file as time progresses, then the temperature variable should have the time dimension flagged as the record dimension. In addition, the NetCDF file header contains the position of the data in the file, so access can be done in an efficient manner without loading unnecessary data into memory. It uses the ``mmap`` module to create Numpy arrays mapped to the data on disk, for the same purpose. Note that when `netcdf_file` is used to open a file with mmap=True (default for read-only), arrays returned by it refer to data directly on the disk. The file should not be closed, and cannot be cleanly closed when asked, if such arrays are alive. You may want to copy data arrays obtained from mmapped Netcdf file if they are to be processed after the file is closed, see the example below. Examples -------- To create a NetCDF file: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'w') >>> f.history = 'Created for a test' >>> f.createDimension('time', 10) >>> time = f.createVariable('time', 'i', ('time',)) >>> time[:] = np.arange(10) >>> time.units = 'days since 2008-01-01' >>> f.close() Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice of the time variable allows for the data to be set in the object, rather than letting ``range(10)`` overwrite the ``time`` variable. To read the NetCDF file we just created: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'r') >>> print(f.history) Created for a test >>> time = f.variables['time'] >>> print(time.units) days since 2008-01-01 >>> print(time.shape) (10,) >>> print(time[-1]) 9 NetCDF files, when opened read-only, return arrays that refer directly to memory-mapped data on disk: >>> data = time[:] >>> data.base.base <mmap.mmap object at 0x7fe753763180> If the data is to be processed after the file is closed, it needs to be copied to main memory: >>> data = time[:].copy() >>> f.close() >>> data.mean() 4.5 A NetCDF file can also be used as context manager: >>> from scipy.io import netcdf >>> with netcdf.netcdf_file('simple.nc', 'r') as f: ... print(f.history) Created for a test """ def __init__(self, filename, mode='r', mmap=None, version=1, maskandscale=False): """Initialize netcdf_file from fileobj (str or file-like).""" if mode not in 'rwa': raise ValueError("Mode must be either 'r', 'w' or 'a'.") if hasattr(filename, 'seek'): # file-like self.fp = filename self.filename = 'None' if mmap is None: mmap = False elif mmap and not hasattr(filename, 'fileno'): raise ValueError('Cannot use file object for mmap') else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode self.fp = open(self.filename, '%sb' % omode) if mmap is None: mmap = True if mode != 'r': # Cannot read write-only files mmap = False self.use_mmap = mmap self.mode = mode self.version_byte = version self.maskandscale = maskandscale self.dimensions = OrderedDict() self.variables = OrderedDict() self._dims = [] self._recs = 0 self._recsize = 0 self._mm = None self._mm_buf = None if self.use_mmap: self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) self._attributes = OrderedDict() if mode in 'ra': self._read() def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def close(self): """Closes the NetCDF file.""" if not self.fp.closed: try: self.flush() finally: self.variables = OrderedDict() if self._mm_buf is not None: ref = weakref.ref(self._mm_buf) self._mm_buf = None if ref() is None: # self._mm_buf is gc'd, and we can close the mmap self._mm.close() else: # we cannot close self._mm, since self._mm_buf is # alive and there may still be arrays referring to it warnings.warn(( "Cannot close a netcdf_file opened with mmap=True, when " "netcdf_variables or arrays referring to its data still exist. " "All data arrays obtained from such files refer directly to " "data on disk, and must be copied before the file can be cleanly " "closed. (See netcdf_file docstring for more information on mmap.)" ), category=RuntimeWarning) self._mm = None self.fp.close() __del__ = close def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def createDimension(self, name, length): """ Adds a dimension to the Dimension section of the NetCDF data structure. Note that this function merely adds a new dimension that the variables can reference. The values for the dimension, if desired, should be added as a variable using `createVariable`, referring to this dimension. Parameters ---------- name : str Name of the dimension (Eg, 'lat' or 'time'). length : int Length of the dimension. See Also -------- createVariable """ if length is None and self._dims: raise ValueError("Only first dimension may be unlimited!") self.dimensions[name] = length self._dims.append(name) def createVariable(self, name, type, dimensions): """ Create an empty variable for the `netcdf_file` object, specifying its data type and the dimensions it uses. Parameters ---------- name : str Name of the new variable. type : dtype or str Data type of the variable. dimensions : sequence of str List of the dimension names used by the variable, in the desired order. Returns ------- variable : netcdf_variable The newly created ``netcdf_variable`` object. This object has also been added to the `netcdf_file` object as well. See Also -------- createDimension Notes ----- Any dimensions to be used by the variable should already exist in the NetCDF data structure or should be created by `createDimension` prior to creating the NetCDF variable. """ shape = tuple([self.dimensions[dim] for dim in dimensions]) shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: raise ValueError("NetCDF 3 does not support type %s" % type) data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, maskandscale=self.maskandscale) return self.variables[name] def flush(self): """ Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. See Also -------- sync : Identical function """ if hasattr(self, 'mode') and self.mode in 'wa': self._write() sync = flush def _write(self): self.fp.seek(0) self.fp.write(b'CDF') self.fp.write(array(self.version_byte, '>b').tostring()) # Write headers and data. self._write_numrecs() self._write_dim_array() self._write_gatt_array() self._write_var_array() def _write_numrecs(self): # Get highest record count from all record variables. for var in self.variables.values(): if var.isrec and len(var.data) > self._recs: self.__dict__['_recs'] = len(var.data) self._pack_int(self._recs) def _write_dim_array(self): if self.dimensions: self.fp.write(NC_DIMENSION) self._pack_int(len(self.dimensions)) for name in self._dims: self._pack_string(name) length = self.dimensions[name] self._pack_int(length or 0) # replace None with 0 for record dimension else: self.fp.write(ABSENT) def _write_gatt_array(self): self._write_att_array(self._attributes) def _write_att_array(self, attributes): if attributes: self.fp.write(NC_ATTRIBUTE) self._pack_int(len(attributes)) for name, values in attributes.items(): self._pack_string(name) self._write_values(values) else: self.fp.write(ABSENT) def _write_var_array(self): if self.variables: self.fp.write(NC_VARIABLE) self._pack_int(len(self.variables)) # Sort variable names non-recs first, then recs. def sortkey(n): v = self.variables[n] if v.isrec: return (-1,) return v._shape variables = sorted(self.variables, key=sortkey, reverse=True) # Set the metadata for all variables. for name in variables: self._write_var_metadata(name) # Now that we have the metadata, we know the vsize of # each record variable, so we can calculate recsize. self.__dict__['_recsize'] = sum([ var._vsize for var in self.variables.values() if var.isrec]) # Set the data for all variables. for name in variables: self._write_var_data(name) else: self.fp.write(ABSENT) def _write_var_metadata(self, name): var = self.variables[name] self._pack_string(name) self._pack_int(len(var.dimensions)) for dimname in var.dimensions: dimid = self._dims.index(dimname) self._pack_int(dimid) self._write_att_array(var._attributes) nc_type = REVERSE[var.typecode(), var.itemsize()] self.fp.write(asbytes(nc_type)) if not var.isrec: vsize = var.data.size * var.data.itemsize vsize += -vsize % 4 else: # record variable try: vsize = var.data[0].size * var.data.itemsize except IndexError: vsize = 0 rec_vars = len([v for v in self.variables.values() if v.isrec]) if rec_vars > 1: vsize += -vsize % 4 self.variables[name].__dict__['_vsize'] = vsize self._pack_int(vsize) # Pack a bogus begin, and set the real value later. self.variables[name].__dict__['_begin'] = self.fp.tell() self._pack_begin(0) def _write_var_data(self, name): var = self.variables[name] # Set begin in file header. the_beguine = self.fp.tell() self.fp.seek(var._begin) self._pack_begin(the_beguine) self.fp.seek(the_beguine) # Write data. if not var.isrec: self.fp.write(var.data.tostring()) count = var.data.size * var.data.itemsize self.fp.write(b'0' * (var._vsize - count)) else: # record variable # Handle rec vars with shape[0] < nrecs. if self._recs > len(var.data): shape = (self._recs,) + var.data.shape[1:] # Resize in-place does not always work since # the array might not be single-segment try: var.data.resize(shape) except ValueError: var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype) pos0 = pos = self.fp.tell() for rec in var.data: # Apparently scalars cannot be converted to big endian. If we # try to convert a ``=i4`` scalar to, say, '>i4' the dtype # will remain as ``=i4``. if not rec.shape and (rec.dtype.byteorder == '<' or (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): rec = rec.byteswap() self.fp.write(rec.tostring()) # Padding count = rec.size * rec.itemsize self.fp.write(b'0' * (var._vsize - count)) pos += self._recsize self.fp.seek(pos) self.fp.seek(pos0 + var._vsize) def _write_values(self, values): if hasattr(values, 'dtype'): nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] else: types = [(t, NC_INT) for t in integer_types] types += [ (float, NC_FLOAT), (str, NC_CHAR) ] # bytes index into scalars in py3k. Check for "string" types if isinstance(values, text_type) or isinstance(values, binary_type): sample = values else: try: sample = values[0] # subscriptable? except TypeError: sample = values # scalar for class_, nc_type in types: if isinstance(sample, class_): break typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ values = asarray(values, dtype=dtype_) self.fp.write(asbytes(nc_type)) if values.dtype.char == 'S': nelems = values.itemsize else: nelems = values.size self._pack_int(nelems) if not values.shape and (values.dtype.byteorder == '<' or (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): values = values.byteswap() self.fp.write(values.tostring()) count = values.size * values.itemsize self.fp.write(b'0' * (-count % 4)) # pad def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': raise TypeError("Error: %s is not a valid NetCDF 3 file" % self.filename) self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0] # Read file headers and set data. self._read_numrecs() self._read_dim_array() self._read_gatt_array() self._read_var_array() def _read_numrecs(self): self.__dict__['_recs'] = self._unpack_int() def _read_dim_array(self): header = self.fp.read(4) if header not in [ZERO, NC_DIMENSION]: raise ValueError("Unexpected header.") count = self._unpack_int() for dim in range(count): name = asstr(self._unpack_string()) length = self._unpack_int() or None # None for record dimension self.dimensions[name] = length self._dims.append(name) # preserve order def _read_gatt_array(self): for k, v in self._read_att_array().items(): self.__setattr__(k, v) def _read_att_array(self): header = self.fp.read(4) if header not in [ZERO, NC_ATTRIBUTE]: raise ValueError("Unexpected header.") count = self._unpack_int() attributes = OrderedDict() for attr in range(count): name = asstr(self._unpack_string()) attributes[name] = self._read_values() return attributes def _read_var_array(self): header = self.fp.read(4) if header not in [ZERO, NC_VARIABLE]: raise ValueError("Unexpected header.") begin = 0 dtypes = {'names': [], 'formats': []} rec_vars = [] count = self._unpack_int() for var in range(count): (name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize) = self._read_var() # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html # Note that vsize is the product of the dimension lengths # (omitting the record dimension) and the number of bytes # per value (determined from the type), increased to the # next multiple of 4, for each variable. If a record # variable, this is the amount of space per record. The # netCDF "record size" is calculated as the sum of the # vsize's of all the record variables. # # The vsize field is actually redundant, because its value # may be computed from other information in the header. The # 32-bit vsize field is not large enough to contain the size # of variables that require more than 2^32 - 4 bytes, so # 2^32 - 1 is used in the vsize field for such variables. if shape and shape[0] is None: # record variable rec_vars.append(name) # The netCDF "record size" is calculated as the sum of # the vsize's of all the record variables. self.__dict__['_recsize'] += vsize if begin == 0: begin = begin_ dtypes['names'].append(name) dtypes['formats'].append(str(shape[1:]) + dtype_) # Handle padding with a virtual variable. if typecode in 'bch': actual_size = reduce(mul, (1,) + shape[1:]) * size padding = -actual_size % 4 if padding: dtypes['names'].append('_padding_%d' % var) dtypes['formats'].append('(%d,)>b' % padding) # Data will be set later. data = None else: # not a record variable # Calculate size to avoid problems with vsize (above) a_size = reduce(mul, shape, 1) * size if self.use_mmap: data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) data.shape = shape else: pos = self.fp.tell() self.fp.seek(begin_) data = fromstring(self.fp.read(a_size), dtype=dtype_) data.shape = shape self.fp.seek(pos) # Add variable. self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, attributes, maskandscale=self.maskandscale) if rec_vars: # Remove padding when only one record variable. if len(rec_vars) == 1: dtypes['names'] = dtypes['names'][:1] dtypes['formats'] = dtypes['formats'][:1] # Build rec array. if self.use_mmap: rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes) rec_array.shape = (self._recs,) else: pos = self.fp.tell() self.fp.seek(begin) rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes) rec_array.shape = (self._recs,) self.fp.seek(pos) for var in rec_vars: self.variables[var].__dict__['data'] = rec_array[var] def _read_var(self): name = asstr(self._unpack_string()) dimensions = [] shape = [] dims = self._unpack_int() for i in range(dims): dimid = self._unpack_int() dimname = self._dims[dimid] dimensions.append(dimname) dim = self.dimensions[dimname] shape.append(dim) dimensions = tuple(dimensions) shape = tuple(shape) attributes = self._read_att_array() nc_type = self.fp.read(4) vsize = self._unpack_int() begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize def _read_values(self): nc_type = self.fp.read(4) n = self._unpack_int() typecode, size = TYPEMAP[nc_type] count = n*size values = self.fp.read(int(count)) self.fp.read(-count % 4) # read padding if typecode is not 'c': values = fromstring(values, dtype='>%s' % typecode) if values.shape == (1,): values = values[0] else: values = values.rstrip(b'\x00') return values def _pack_begin(self, begin): if self.version_byte == 1: self._pack_int(begin) elif self.version_byte == 2: self._pack_int64(begin) def _pack_int(self, value): self.fp.write(array(value, '>i').tostring()) _pack_int32 = _pack_int def _unpack_int(self): return int(fromstring(self.fp.read(4), '>i')[0]) _unpack_int32 = _unpack_int def _pack_int64(self, value): self.fp.write(array(value, '>q').tostring()) def _unpack_int64(self): return fromstring(self.fp.read(8), '>q')[0] def _pack_string(self, s): count = len(s) self._pack_int(count) self.fp.write(asbytes(s)) self.fp.write(b'0' * (-count % 4)) # pad def _unpack_string(self): count = self._unpack_int() s = self.fp.read(count).rstrip(b'\x00') self.fp.read(-count % 4) # read padding return s class netcdf_variable(object): """ A data object for the `netcdf` module. `netcdf_variable` objects are constructed by calling the method `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` objects behave much like array objects defined in numpy, except that their data resides in a file. Data is read by indexing and written by assigning to an indexed subset; the entire array can be accessed by the index ``[:]`` or (for scalars) by using the methods `getValue` and `assignValue`. `netcdf_variable` objects also have attribute `shape` with the same meaning as for arrays, but the shape cannot be modified. There is another read-only attribute `dimensions`, whose value is the tuple of dimension names. All other attributes correspond to variable attributes defined in the NetCDF file. Variable attributes are created by assigning to an attribute of the `netcdf_variable` object. Parameters ---------- data : array_like The data array that holds the values for the variable. Typically, this is initialized as empty, but with the proper shape. typecode : dtype character code Desired data-type for the data array. size : int Desired element size for the data array. shape : sequence of ints The shape of the array. This should match the lengths of the variable's dimensions. dimensions : sequence of strings The names of the dimensions used by the variable. Must be in the same order of the dimension lengths given by `shape`. attributes : dict, optional Attribute values (any type) keyed by string names. These attributes become attributes for the netcdf_variable object. maskandscale : bool, optional Whether to automatically scale and/or mask data based on attributes. Default is False. Attributes ---------- dimensions : list of str List of names of dimensions used by the variable object. isrec, shape Properties See also -------- isrec, shape """ def __init__(self, data, typecode, size, shape, dimensions, attributes=None, maskandscale=False): self.data = data self._typecode = typecode self._size = size self._shape = shape self.dimensions = dimensions self.maskandscale = maskandscale self._attributes = attributes or OrderedDict() for k, v in self._attributes.items(): self.__dict__[k] = v def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def isrec(self): """Returns whether the variable has a record dimension or not. A record dimension is a dimension along which additional data could be easily appended in the netcdf data structure without much rewriting of the data file. This attribute is a read-only property of the `netcdf_variable`. """ return bool(self.data.shape) and not self._shape[0] isrec = property(isrec) def shape(self): """Returns the shape tuple of the data variable. This is a read-only attribute and can not be modified in the same manner of other numpy arrays. """ return self.data.shape shape = property(shape) def getValue(self): """ Retrieve a scalar value from a `netcdf_variable` of length one. Raises ------ ValueError If the netcdf variable is an array of length greater than one, this exception will be raised. """ return self.data.item() def assignValue(self, value): """ Assign a scalar value to a `netcdf_variable` of length one. Parameters ---------- value : scalar Scalar value (of compatible type) to assign to a length-one netcdf variable. This value will be written to file. Raises ------ ValueError If the input is not a scalar, or if the destination is not a length-one netcdf variable. """ if not self.data.flags.writeable: # Work-around for a bug in NumPy. Calling itemset() on a read-only # memory-mapped array causes a seg. fault. # See NumPy ticket #1622, and SciPy ticket #1202. # This check for `writeable` can be removed when the oldest version # of numpy still supported by scipy contains the fix for #1622. raise RuntimeError("variable is not writeable") self.data.itemset(value) def typecode(self): """ Return the typecode of the variable. Returns ------- typecode : char The character typecode of the variable (eg, 'i' for int). """ return self._typecode def itemsize(self): """ Return the itemsize of the variable. Returns ------- itemsize : int The element size of the variable (eg, 8 for float64). """ return self._size def __getitem__(self, index): if not self.maskandscale: return self.data[index] data = self.data[index].copy() missing_value = self._get_missing_value() data = self._apply_missing_value(data, missing_value) scale_factor = self._attributes.get('scale_factor') add_offset = self._attributes.get('add_offset') if add_offset is not None or scale_factor is not None: data = data.astype(np.float64) if scale_factor is not None: data = data * scale_factor if add_offset is not None: data += add_offset return data def __setitem__(self, index, data): if self.maskandscale: missing_value = ( self._get_missing_value() or getattr(data, 'fill_value', 999999)) self._attributes.setdefault('missing_value', missing_value) self._attributes.setdefault('_FillValue', missing_value) data = ((data - self._attributes.get('add_offset', 0.0)) / self._attributes.get('scale_factor', 1.0)) data = np.ma.asarray(data).filled(missing_value) if self._typecode not in 'fd' and data.dtype.kind == 'f': data = np.round(data) # Expand data for record vars? if self.isrec: if isinstance(index, tuple): rec_index = index[0] else: rec_index = index if isinstance(rec_index, slice): recs = (rec_index.start or 0) + len(data) else: recs = rec_index + 1 if recs > len(self.data): shape = (recs,) + self._shape[1:] # Resize in-place does not always work since # the array might not be single-segment try: self.data.resize(shape) except ValueError: self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype) self.data[index] = data def _get_missing_value(self): """ Returns the value denoting "no data" for this variable. If this variable does not have a missing/fill value, returns None. If both _FillValue and missing_value are given, give precedence to _FillValue. The netCDF standard gives special meaning to _FillValue; missing_value is just used for compatibility with old datasets. """ if '_FillValue' in self._attributes: missing_value = self._attributes['_FillValue'] elif 'missing_value' in self._attributes: missing_value = self._attributes['missing_value'] else: missing_value = None return missing_value @staticmethod def _apply_missing_value(data, missing_value): """ Applies the given missing value to the data array. Returns a numpy.ma array, with any value equal to missing_value masked out (unless missing_value is None, in which case the original array is returned). """ if missing_value is None: newdata = data else: try: missing_value_isnan = np.isnan(missing_value) except (TypeError, NotImplementedError): # some data types (e.g., characters) cannot be tested for NaN missing_value_isnan = False if missing_value_isnan: mymask = np.isnan(data) else: mymask = (data == missing_value) newdata = np.ma.masked_where(mymask, data) return newdata NetCDFFile = netcdf_file NetCDFVariable = netcdf_variable
bsd-3-clause
BT-ojossen/odoo
addons/payment_paypal/tests/test_paypal.py
378
11126
# -*- coding: utf-8 -*- from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment.tests.common import PaymentAcquirerCommon from openerp.addons.payment_paypal.controllers.main import PaypalController from openerp.tools import mute_logger from lxml import objectify import urlparse class PaypalCommon(PaymentAcquirerCommon): def setUp(self): super(PaypalCommon, self).setUp() cr, uid = self.cr, self.uid self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url') # get the paypal account model, self.paypal_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_paypal', 'payment_acquirer_paypal') # tde+seller@openerp.com - tde+buyer@openerp.com - tde+buyer-it@openerp.com # some CC self.amex = (('378282246310005', '123'), ('371449635398431', '123')) self.amex_corporate = (('378734493671000', '123')) self.autralian_bankcard = (('5610591081018250', '123')) self.dinersclub = (('30569309025904', '123'), ('38520000023237', '123')) self.discover = (('6011111111111117', '123'), ('6011000990139424', '123')) self.jcb = (('3530111333300000', '123'), ('3566002020360505', '123')) self.mastercard = (('5555555555554444', '123'), ('5105105105105100', '123')) self.visa = (('4111111111111111', '123'), ('4012888888881881', '123'), ('4222222222222', '123')) self.dankord_pbs = (('76009244561', '123'), ('5019717010103742', '123')) self.switch_polo = (('6331101999990016', '123')) class PaypalServer2Server(PaypalCommon): def test_00_tx_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None) self.assertEqual(paypal.environment, 'test', 'test without test environment') res = self.payment_acquirer._paypal_s2s_get_access_token(cr, uid, [self.paypal_id], context=context) self.assertTrue(res[self.paypal_id] is not False, 'paypal: did not generate access token') tx_id = self.payment_transaction.s2s_create( cr, uid, { 'amount': 0.01, 'acquirer_id': self.paypal_id, 'currency_id': self.currency_euro_id, 'reference': 'test_reference', 'partner_id': self.buyer_id, }, { 'number': self.visa[0][0], 'cvc': self.visa[0][1], 'brand': 'visa', 'expiry_mm': 9, 'expiry_yy': 2015, }, context=context ) tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertTrue(tx.paypal_txn_id is not False, 'paypal: txn_id should have been set after s2s request') self.payment_transaction.write(cr, uid, tx_id, {'paypal_txn_id': False}, context=context) class PaypalForm(PaypalCommon): def test_10_paypal_form_render(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things self.payment_acquirer.write(cr, uid, self.paypal_id, {'fees_active': False}, context) paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context) self.assertEqual(paypal.environment, 'test', 'test without test environment') # ---------------------------------------- # Test: button direct rendering # ---------------------------------------- # render the button res = self.payment_acquirer.render( cr, uid, self.paypal_id, 'test_ref0', 0.01, self.currency_euro_id, partner_id=None, partner_values=self.buyer_values, context=context) form_values = { 'cmd': '_xclick', 'business': 'tde+paypal-facilitator@openerp.com', 'item_name': 'test_ref0', 'item_number': 'test_ref0', 'first_name': 'Buyer', 'last_name': 'Norbert', 'amount': '0.01', 'currency_code': 'EUR', 'address1': 'Huge Street 2/543', 'city': 'Sin City', 'zip': '1000', 'country': 'Belgium', 'email': 'norbert.buyer@example.com', 'return': '%s' % urlparse.urljoin(self.base_url, PaypalController._return_url), 'notify_url': '%s' % urlparse.urljoin(self.base_url, PaypalController._notify_url), 'cancel_return': '%s' % urlparse.urljoin(self.base_url, PaypalController._cancel_url), } # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'paypal: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) def test_11_paypal_form_with_fees(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None) self.assertEqual(paypal.environment, 'test', 'test without test environment') # update acquirer: compute fees self.payment_acquirer.write(cr, uid, self.paypal_id, { 'fees_active': True, 'fees_dom_fixed': 1.0, 'fees_dom_var': 0.35, 'fees_int_fixed': 1.5, 'fees_int_var': 0.50, }, context) # render the button res = self.payment_acquirer.render( cr, uid, self.paypal_id, 'test_ref0', 12.50, self.currency_euro, partner_id=None, partner_values=self.buyer_values, context=context) # check form result handling_found = False tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['handling']: handling_found = True self.assertEqual(form_input.get('value'), '1.57', 'paypal: wrong computed fees') self.assertTrue(handling_found, 'paypal: fees_active did not add handling input in rendered form') @mute_logger('openerp.addons.payment_paypal.models.paypal', 'ValidationError') def test_20_paypal_form_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context) self.assertEqual(paypal.environment, 'test', 'test without test environment') # typical data posted by paypal after client has successfully paid paypal_post_data = { 'protection_eligibility': u'Ineligible', 'last_name': u'Poilu', 'txn_id': u'08D73520KX778924N', 'receiver_email': u'tde+paypal-facilitator@openerp.com', 'payment_status': u'Pending', 'payment_gross': u'', 'tax': u'0.00', 'residence_country': u'FR', 'address_state': u'Alsace', 'payer_status': u'verified', 'txn_type': u'web_accept', 'address_street': u'Av. de la Pelouse, 87648672 Mayet', 'handling_amount': u'0.00', 'payment_date': u'03:21:19 Nov 18, 2013 PST', 'first_name': u'Norbert', 'item_name': u'test_ref_2', 'address_country': u'France', 'charset': u'windows-1252', 'custom': u'', 'notify_version': u'3.7', 'address_name': u'Norbert Poilu', 'pending_reason': u'multi_currency', 'item_number': u'test_ref_2', 'receiver_id': u'DEG7Z7MYGT6QA', 'transaction_subject': u'', 'business': u'tde+paypal-facilitator@openerp.com', 'test_ipn': u'1', 'payer_id': u'VTDKRZQSAHYPS', 'verify_sign': u'An5ns1Kso7MWUdW4ErQKJJJ4qi4-AVoiUf-3478q3vrSmqh08IouiYpM', 'address_zip': u'75002', 'address_country_code': u'FR', 'address_city': u'Paris', 'address_status': u'unconfirmed', 'mc_currency': u'EUR', 'shipping': u'0.00', 'payer_email': u'tde+buyer@openerp.com', 'payment_type': u'instant', 'mc_gross': u'1.95', 'ipn_track_id': u'866df2ccd444b', 'quantity': u'1' } # should raise error about unknown tx with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # create tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 1.95, 'acquirer_id': self.paypal_id, 'currency_id': self.currency_euro_id, 'reference': 'test_ref_2', 'partner_name': 'Norbert Buyer', 'partner_country_id': self.country_france_id, }, context=context ) # validate it self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # check tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'pending', 'paypal: wrong state after receiving a valid pending notification') self.assertEqual(tx.state_message, 'multi_currency', 'paypal: wrong state message after receiving a valid pending notification') self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification') self.assertFalse(tx.date_validate, 'paypal: validation date should not be updated whenr receiving pending notification') # update tx self.payment_transaction.write(cr, uid, [tx_id], { 'state': 'draft', 'paypal_txn_id': False, }, context=context) # update notification from paypal paypal_post_data['payment_status'] = 'Completed' # validate it self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # check tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'done', 'paypal: wrong state after receiving a valid pending notification') self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification') self.assertEqual(tx.date_validate, '2013-11-18 03:21:19', 'paypal: wrong validation date')
agpl-3.0
silky/autojump
bin/autojump_utils.py
18
4357
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import errno from itertools import islice import os import platform import re import shutil import sys import unicodedata if sys.version_info[0] == 3: imap = map os.getcwdu = os.getcwd else: from itertools import imap def create_dir(path): """Creates a directory atomically.""" try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def encode_local(string): """Converts string into user's preferred encoding.""" return string.encode(sys.getfilesystemencoding() or 'utf-8') def first(xs): it = iter(xs) try: if is_python3(): return it.__next__() return it.next() except StopIteration: return None def get_tab_entry_info(entry, separator): """ Given a tab entry in the following format return needle, index, and path: [needle]__[index]__[path] """ needle, index, path = None, None, None match_needle = re.search(r'(.*?)' + separator, entry) match_index = re.search(separator + r'([0-9]{1})', entry) match_path = re.search( separator + r'[0-9]{1}' + separator + r'(.*)', entry) if match_needle: needle = match_needle.group(1) if match_index: index = int(match_index.group(1)) if match_path: path = match_path.group(1) return needle, index, path def get_pwd(): try: return os.getcwdu() except OSError: print("Current directory no longer exists.", file=sys.stderr) raise def has_uppercase(string): if is_python3(): return any(ch.isupper() for ch in string) return any(unicodedata.category(c) == 'Lu' for c in unicode(string)) def in_bash(): return 'bash' in os.getenv('SHELL') def is_python2(): return sys.version_info[0] == 2 def is_python3(): return sys.version_info[0] == 3 def is_linux(): return platform.system() == 'Linux' def is_osx(): return platform.system() == 'Darwin' def is_windows(): return platform.system() == 'Windows' def last(xs): it = iter(xs) tmp = None try: if is_python3(): while True: tmp = it.__next__() else: while True: tmp = it.next() except StopIteration: return tmp def move_file(src, dst): """ Atomically move file. Windows does not allow for atomic file renaming (which is used by os.rename / shutil.move) so destination paths must first be deleted. """ if is_windows() and os.path.exists(dst): # raises exception if file is in use on Windows os.remove(dst) shutil.move(src, dst) def print_entry(entry): print_local("%.1f:\t%s" % (entry.weight, entry.path)) def print_local(string): print(encode_local(string)) def print_tab_menu(needle, tab_entries, separator): """ Prints the tab completion menu according to the following format: [needle]__[index]__[possible_match] The needle (search pattern) and index are necessary to recreate the results on subsequent calls. """ for i, entry in enumerate(tab_entries): print_local( '%s%s%d%s%s' % ( needle, separator, i + 1, separator, entry.path)) def sanitize(directories): # edge case to allow '/' as a valid path clean = lambda x: unico(x) if x == os.sep else unico(x).rstrip(os.sep) return list(imap(clean, directories)) def second(xs): it = iter(xs) try: it.next() return it.next() except StopIteration: return None def surround_quotes(string): """ Bash has problems dealing with certain paths so we're surrounding all path outputs with quotes. """ if in_bash() and string: # Python 2.6 requres field numbers return '"{0}"'.format(string) return string def take(n, iterable): """Return first n items of an iterable.""" return islice(iterable, n) def unico(string): """Converts into Unicode string.""" if is_python2() and not isinstance(string, unicode): return unicode(string, encoding='utf-8', errors='replace') return string
gpl-3.0
makacodewalker/etsgh
django/utils/unittest/loader.py
353
13437
"""Loading unittests.""" import os import re import sys import traceback import types import unittest from fnmatch import fnmatch from django.utils.unittest import case, suite try: from os.path import relpath except ImportError: from django.utils.unittest.compatibility import relpath __unittest = True def _CmpToKey(mycmp): 'Convert a cmp= function into a key= function' class K(object): def __init__(self, obj): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) == -1 return K # what about .pyc or .pyo (etc) # we would need to avoid loading the same tests multiple times # from '.py', '.pyc' *and* '.pyo' VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) def _make_failed_import_test(name, suiteClass): message = 'Failed to import test module: %s' % name if hasattr(traceback, 'format_exc'): # Python 2.3 compatibility # format_exc returns two frames of discover.py as well message += '\n%s' % traceback.format_exc() return _make_failed_test('ModuleImportFailure', name, ImportError(message), suiteClass) def _make_failed_load_tests(name, exception, suiteClass): return _make_failed_test('LoadTestsFailure', name, exception, suiteClass) def _make_failed_test(classname, methodname, exception, suiteClass): def testFailure(self): raise exception attrs = {methodname: testFailure} TestClass = type(classname, (case.TestCase,), attrs) return suiteClass((TestClass(methodname),)) class TestLoader(unittest.TestLoader): """ This class is responsible for loading tests according to various criteria and returning them wrapped in a TestSuite """ testMethodPrefix = 'test' sortTestMethodsUsing = cmp suiteClass = suite.TestSuite _top_level_dir = None def loadTestsFromTestCase(self, testCaseClass): """Return a suite of all tests cases contained in testCaseClass""" if issubclass(testCaseClass, suite.TestSuite): raise TypeError("Test cases should not be derived from TestSuite." " Maybe you meant to derive from TestCase?") testCaseNames = self.getTestCaseNames(testCaseClass) if not testCaseNames and hasattr(testCaseClass, 'runTest'): testCaseNames = ['runTest'] loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) return loaded_suite def loadTestsFromModule(self, module, use_load_tests=True): """Return a suite of all tests cases contained in the given module""" tests = [] for name in dir(module): obj = getattr(module, name) if isinstance(obj, type) and issubclass(obj, unittest.TestCase): tests.append(self.loadTestsFromTestCase(obj)) load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) if use_load_tests and load_tests is not None: try: return load_tests(self, tests, None) except Exception, e: return _make_failed_load_tests(module.__name__, e, self.suiteClass) return tests def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') if module is None: parts_copy = parts[:] while parts_copy: try: module = __import__('.'.join(parts_copy)) break except ImportError: del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: parent, obj = obj, getattr(obj, part) if isinstance(obj, types.ModuleType): return self.loadTestsFromModule(obj) elif isinstance(obj, type) and issubclass(obj, unittest.TestCase): return self.loadTestsFromTestCase(obj) elif (isinstance(obj, types.UnboundMethodType) and isinstance(parent, type) and issubclass(parent, case.TestCase)): return self.suiteClass([parent(obj.__name__)]) elif isinstance(obj, unittest.TestSuite): return obj elif hasattr(obj, '__call__'): test = obj() if isinstance(test, unittest.TestSuite): return test elif isinstance(test, unittest.TestCase): return self.suiteClass([test]) else: raise TypeError("calling %s returned %s, not a test" % (obj, test)) else: raise TypeError("don't know how to make test from: %s" % obj) def loadTestsFromNames(self, names, module=None): """Return a suite of all tests cases found using the given sequence of string specifiers. See 'loadTestsFromName()'. """ suites = [self.loadTestsFromName(name, module) for name in names] return self.suiteClass(suites) def getTestCaseNames(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass """ def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix): return attrname.startswith(prefix) and \ hasattr(getattr(testCaseClass, attrname), '__call__') testFnNames = filter(isTestMethod, dir(testCaseClass)) if self.sortTestMethodsUsing: testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing)) return testFnNames def discover(self, start_dir, pattern='test*.py', top_level_dir=None): """Find and return all test modules from the specified start directory, recursing into subdirectories to find them. Only test files that match the pattern will be loaded. (Using shell style pattern matching.) All test modules must be importable from the top level of the project. If the start directory is not the top level directory then the top level directory must be specified separately. If a test package name (directory with '__init__.py') matches the pattern then the package will be checked for a 'load_tests' function. If this exists then it will be called with loader, tests, pattern. If load_tests exists then discovery does *not* recurse into the package, load_tests is responsible for loading all tests in the package. The pattern is deliberately not stored as a loader attribute so that packages can continue discovery themselves. top_level_dir is stored so load_tests does not need to pass this argument in to loader.discover(). """ set_implicit_top = False if top_level_dir is None and self._top_level_dir is not None: # make top_level_dir optional if called from load_tests in a package top_level_dir = self._top_level_dir elif top_level_dir is None: set_implicit_top = True top_level_dir = start_dir top_level_dir = os.path.abspath(top_level_dir) if not top_level_dir in sys.path: # all test modules must be importable from the top level directory # should we *unconditionally* put the start directory in first # in sys.path to minimise likelihood of conflicts between installed # modules and development versions? sys.path.insert(0, top_level_dir) self._top_level_dir = top_level_dir is_not_importable = False if os.path.isdir(os.path.abspath(start_dir)): start_dir = os.path.abspath(start_dir) if start_dir != top_level_dir: is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) else: # support for discovery from dotted module names try: __import__(start_dir) except ImportError: is_not_importable = True else: the_module = sys.modules[start_dir] top_part = start_dir.split('.')[0] start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) if set_implicit_top: self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__))) sys.path.remove(top_level_dir) if is_not_importable: raise ImportError('Start directory is not importable: %r' % start_dir) tests = list(self._find_tests(start_dir, pattern)) return self.suiteClass(tests) def _get_name_from_path(self, path): path = os.path.splitext(os.path.normpath(path))[0] _relpath = relpath(path, self._top_level_dir) assert not os.path.isabs(_relpath), "Path must be within the project" assert not _relpath.startswith('..'), "Path must be within the project" name = _relpath.replace(os.path.sep, '.') return name def _get_module_from_name(self, name): __import__(name) return sys.modules[name] def _match_path(self, path, full_path, pattern): # override this method to use alternative matching strategy return fnmatch(path, pattern) def _find_tests(self, start_dir, pattern): """Used by discovery. Yields test suites it loads.""" paths = os.listdir(start_dir) for path in paths: full_path = os.path.join(start_dir, path) if os.path.isfile(full_path): if not VALID_MODULE_NAME.match(path): # valid Python identifiers only continue if not self._match_path(path, full_path, pattern): continue # if the test file matches, load it name = self._get_name_from_path(full_path) try: module = self._get_module_from_name(name) except: yield _make_failed_import_test(name, self.suiteClass) else: mod_file = os.path.abspath(getattr(module, '__file__', full_path)) realpath = os.path.splitext(mod_file)[0] fullpath_noext = os.path.splitext(full_path)[0] if realpath.lower() != fullpath_noext.lower(): module_dir = os.path.dirname(realpath) mod_name = os.path.splitext(os.path.basename(full_path))[0] expected_dir = os.path.dirname(full_path) msg = ("%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) yield self.loadTestsFromModule(module) elif os.path.isdir(full_path): if not os.path.isfile(os.path.join(full_path, '__init__.py')): continue load_tests = None tests = None if fnmatch(path, pattern): # only check load_tests if the package directory itself matches the filter name = self._get_name_from_path(full_path) package = self._get_module_from_name(name) load_tests = getattr(package, 'load_tests', None) tests = self.loadTestsFromModule(package, use_load_tests=False) if load_tests is None: if tests is not None: # tests loaded from package file yield tests # recurse into the package for test in self._find_tests(full_path, pattern): yield test else: try: yield load_tests(self, tests, pattern) except Exception, e: yield _make_failed_load_tests(package.__name__, e, self.suiteClass) defaultTestLoader = TestLoader() def _makeLoader(prefix, sortUsing, suiteClass=None): loader = TestLoader() loader.sortTestMethodsUsing = sortUsing loader.testMethodPrefix = prefix if suiteClass: loader.suiteClass = suiteClass return loader def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp): return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass) def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass) def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
bsd-3-clause
dymkowsk/mantid
Framework/PythonInterface/plugins/algorithms/SaveYDA.py
2
9618
from __future__ import absolute_import, division, print_function from mantid.api import PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty, WorkspaceUnitValidator, \ InstrumentValidator, FileProperty, FileAction from mantid.kernel import Direction, CompositeValidator from mantid.dataobjects import Workspace2D import yaml from yaml import Dumper from collections import OrderedDict import math class SaveYDA(PythonAlgorithm): """ Save data in yaml/frida 2.0 format from a Workspace2D. """ def category(self): """Return category """ return "DataHandling\\Text" def name(self): """Return name """ return "SaveYDA" def summary(self): """Return summary """ return "Save Workspace to a Frida 2.0 yaml format" def PyInit(self): """Declare properties """ wsValidators = CompositeValidator() # X axis must be a NumericAxis in energy transfer units. wsValidators.add(WorkspaceUnitValidator("DeltaE")) # Workspace must have an Instrument wsValidators.add(InstrumentValidator()) self.declareProperty(MatrixWorkspaceProperty(name="InputWorkspace", defaultValue="", direction=Direction.Input, validator=wsValidators), doc="Workspace name for input") self.declareProperty(FileProperty(name="Filename", defaultValue="", action=FileAction.Save, extensions=""), doc="The name to use when writing the file") def validateInputs(self): """Basic validation for inputs. :return: issues with not valid Inputs in dictionary """ issues = dict() # Only MomentumTransfer is allowed allowUn = "MomentumTransfer" ws = self.getProperty("InputWorkspace").value # Y axis must be either a SpectrumAxis or a NumericAxis in q units. # workspace must be a Workspace2D if ws: ax = ws.getAxis(1) if not ax.isSpectra() and ax.getUnit().unitID() != allowUn: issues["InputWorkspace"] = "Y axis is not 'Spectrum Axis' or 'Momentum Transfer'" if not isinstance(ws, Workspace2D): issues["InputWorkspace"] = "Input Workspace is not a Workspace2D" return issues def PyExec(self): """ Main execution body """ # Properties ws = self.getProperty("InputWorkspace").value filename = self.getProperty("Filename").value run = ws.getRun() ax = ws.getAxis(1) nHist = ws.getNumberHistograms() # check sample logs exists if len(run.getLogData()) == 0: raise NotImplementedError("No sample log data exist in workspace: " + self.getPropertyValue("InputWorkspace")) # save sample log data in lists, commented sequences an commented maps # commented sequences and maps are used to keep Data in the order they get inserted # if a log does not exist a warning is written on the log and the data is not saved in the file metadata = OrderedDict() metadata["format"] = "yaml/frida 2.0" metadata["type"] = "generic tabular data" hist = [] if run.hasProperty("proposal_number"): propn = "Proposal number " + run.getLogData("proposal_number").value hist.append(propn) else: self.log().warning("no proposal number found") if run.hasProperty("proposal_title"): propt = run.getLogData("proposal_title").value hist.append(propt) else: self.log().warning("no proposal title found") if run.hasProperty("experiment_team"): expt = run.getLogData("experiment_team").value hist.append(expt) else: self.log().warning("no experiment team found") hist.append("data reduced with mantid") rpar = [] if run.hasProperty("temperature"): temperature = float(run.getLogData("temperature").value) temp = OrderedDict() temp["name"] = "T" temp["unit"] = "K" temp["val"] = round(temperature, 14) temp["stdv"] = 0 rpar.append(temp) else: self.log().warning("no temperature found") if run.hasProperty("Ei"): eimeV = float(run.getLogData("Ei").value) ei = OrderedDict() ei["name"] = "Ei" ei["unit"] = "meV" ei["val"] = round(eimeV, 14) ei["stdv"] = 0 rpar.append(ei) else: self.log().warning("no Ei found") coord = OrderedDict() x = FlowOrderedDict() x["name"] = "w" x["unit"] = "meV" coord["x"] = x y = FlowOrderedDict() y["name"] = "S(q,w)" y["unit"] = "meV-1" coord["y"] = y z = FlowOrderedDict() if ax.isSpectra(): zname = "2th" zunit = "deg" else: zname = "q" zunit = "A-1" z["name"] = zname z["unit"] = zunit coord["z"] = FlowList() coord["z"].append(z) slices = [] bin = [] # if y axis is SpectrumAxis if ax.isSpectra: samplePos = ws.getInstrument().getSample().getPos() sourcePos = ws.getInstrument().getSource().getPos() beamPos = samplePos - sourcePos for i in range(nHist): detector = ws.getDetector(i) # convert radians to degrees twoTheta = detector.getTwoTheta(samplePos, beamPos)*180/math.pi twoTheta = round(twoTheta, 14) bin.append(twoTheta) elif ax.length() == nHist: # if y axis contains bin centers for i in range(ax.length()): xval = round(ax.getValue(), 14) bin.append(xval) else: # get the bin centers not the bin edges bin = self._get_bin_centers(ax) for i in range(nHist): slicethis = OrderedDict() # add j to slices, j = counts slicethis["j"] = i # save in list and commented Map to keep format val = FlowOrderedDict() val["val"] = bin[i] # z is bin centers of y axis, SpectrumAxis or NumericAxis in q units slicethis["z"] = FlowList() slicethis["z"].append(val) xax = ws.readX(i) # get the bin centers not the bin edges xcenters = self._get_bin_centers(xax) # x axis is NumericAxis in energy transfer units xx = [float(j) for j in xcenters] slicethis["x"] = FlowList(xx) ys = ws.dataY(i) # y is dataY of the workspace yy = [float(round(j, 14)) for j in ys] slicethis["y"] = FlowList(yy) slices.append(slicethis) data = OrderedDict() data["Meta"] = metadata data["History"] = hist data["Coord"] = coord data["RPar"] = rpar data["Slices"] = slices data["Slices"] = slices # create yaml file try: with open(filename, "w") as outfile: yaml.dump(data, outfile, default_flow_style=False, canonical=False, Dumper=MyDumper) outfile.close() except: raise RuntimeError("Can't write in File" + filename) def _get_bin_centers(self, ax): """ calculates the bin centers from the bin edges :param ax: bin center axis :return: list of bin centers """ bin = [] for i in range(1, ax.size): axval = round((ax[i]+ax[i-1])/2, 14) bin.append(axval) return bin class MyDumper(Dumper): """ regulates the indent for yaml Dumper """ def increase_indent(self, flow=False, indentless=False): return super(MyDumper, self).increase_indent(flow, False) class FlowOrderedDict(OrderedDict): """ Helper class to switch between flow style and no flow style Equal to OrderedDict class but other yaml representer """ pass class FlowList(list): """ Helper class to switch between flow style and no flow style Equal to list class but other yaml representer """ pass def _flow_list_rep(dumper, data): """Yaml representer for list in flow style """ return dumper.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True) def _flow_ord_dic_rep(dumper, data): """Yaml representer for OrderedDict in flow style """ return dumper.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True) def _represent_ordered_dict(dumper, data): """Yaml representer for OrderedDict regulates dumping for class OrderedDict """ value = [] for item_key, item_value in data.items(): node_key = dumper.represent_data(item_key) node_value = dumper.represent_data(item_value) value.append((node_key, node_value)) return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) # Adding representers to yaml yaml.add_representer(OrderedDict, _represent_ordered_dict) yaml.add_representer(FlowList, _flow_list_rep) yaml.add_representer(FlowOrderedDict, _flow_ord_dic_rep) #---------------------------------------------------------------------------------------------------------------------# AlgorithmFactory.subscribe(SaveYDA)
gpl-3.0
qubell/contrib-python-qubell-client
qubell/tests/testing/test_values_decorator.py
5
2834
import unittest from qubell.api.private.testing import values __author__ = 'dmakhno' # noinspection PyUnresolvedReferences class ValuesDecoratorTests(unittest.TestCase): class FakeInstance(object): def __init__(self, return_value): self.returnValues = return_value rv = {"str": "some string", "empty-str": '', "none": None, "i": 10, "f": 2.3, "b": True, "complex": {"complex-val": "some value", "complex-list-single": ["single"], "complex-list-multi": ["l1", "l2", "l3"]}} mapping = {"str": "str", "complex-val": "some_val", "complex-list-single": "single", "complex-list-multi": "multi", "complex": "complex", "empty-str": "empty", "none": "none", "i": "i", "f": "f", "b": "b"} instace = FakeInstance(rv) @values(mapping) def decorated_method(self, instance, str=None, complex=None, some_val=None, single=None, multi=None, empty=False, none=False, i=None, f=None, b=None, callback=None): callback.str = str callback.complex = complex callback.some_val = some_val callback.single = single callback.multi = multi callback.empty = empty callback.none = none callback.i = i callback.f = f callback.b = b def setUp(self): class dummy(object): pass self.obj = dummy() self.decorated_method(instance=self.instace, callback=self.obj) def test_json_string(self): assert self.obj.str == "some string" def test_json_empty(self): assert self.obj.empty == '' def test_json_none(self): assert self.obj.none == None def test_json_bool(self): assert self.obj.b == True def test_json_int(self): assert self.obj.i == 10 def test_json_float(self): assert self.obj.f == 2.3 def test_json_inside(self): assert self.obj.some_val == "some value" def test_list_with_one_element(self): assert self.obj.single == "single" def test_list_with_many_elemetns(self): self.assertEqual(self.obj.multi, self.rv['complex']['complex-list-multi']) def test_string(self): self.assertEqual(self.obj.complex, self.rv['complex']) @values({"x-name": "x", "y-name": "y"}) def missing_params_method(self, instance, a, b): pass def test_missing_params(self): with self.assertRaises(AttributeError) as context: self.missing_params_method(instance=self.instace) self.assertItemsEqual(context.exception.args[1], ["x-name", "y-name"]) @values({"empty-str": "empty"}) def exceeded_params_methos(self, instance): pass def test_exceeded_params(self): with self.assertRaises(TypeError) as context: self.exceeded_params_methos(instance=self.instace)
apache-2.0
hioa-cs/IncludeOS
test/stress/test.py
4
9043
#!/usr/bin/env python3 from __future__ import division from __future__ import print_function from builtins import str from builtins import range from past.utils import old_div import sys import socket import time import subprocess import os from vmrunner import vmrunner from vmrunner.prettify import color test_name="Stresstest" name_tag = "<" + test_name + ">" # We assume malloc will increase / decrease heap pagewise PAGE_SIZE = 4096 BURST_SIZE = 1000 BURST_COUNT = 10 BURST_INTERVAL = 0.1 HOST = "10.0.0.42" PORT_FLOOD = 4242 PORT_MEM = 4243 memuse_at_start = 0 sock_timeout = 20 # Boot the VM, taking a timeout as parameter thread_timeout = BURST_COUNT * 30 # It's to be expected that the VM allocates more room during the running of tests # e.g. for containers, packets etc. These should all be freed after a run. acceptable_increase = 12 * PAGE_SIZE # A persistent connection to the VM for getting memory info # TODO: This should be expanded to check more vital signs, such as time of day, # connection / packet statistics (>= what we have sent) etc. sock_mem = socket.socket sock_mem = socket.socket(socket.AF_INET, socket.SOCK_STREAM) heap_verified = False def get_mem(): name_tag = "<" + test_name + "::get_mem>" try: # We expect this socket to allready be opened time.sleep(1) sock_mem.send("memsize\n".encode()) received = sock_mem.recv(1000).rstrip() except Exception as e: print(color.FAIL(name_tag), "Python socket failed while getting memsize: ", e) return False print(color.INFO(name_tag),"Current VM memory usage reported as ", received) return int(received) def get_mem_start(): global memuse_at_start if memuse_at_start == 0: memuse_at_start = get_mem() return memuse_at_start def memory_increase(lead_time, expected_memuse = memuse_at_start): name_tag = "<" + test_name + "::memory_increase>" if lead_time: print(color.INFO(name_tag),"Checking for memory increase after a lead time of ",lead_time,"s.") # Give the VM a chance to free up resources before asking time.sleep(lead_time) use = get_mem() increase = use - expected_memuse percent = 0.0; if (increase): percent = float(increase) / expected_memuse if increase > acceptable_increase: print(color.WARNING(name_tag), "Memory increased by ", percent, "%.") print("(" , expected_memuse, "->", use, ",", increase,"b increase, but no increase expected.)") else: print(color.OK(name_tag + "Memory constant, no leak detected")) return increase # Fire a single burst of UDP packets def UDP_burst(burst_size = BURST_SIZE, burst_interval = BURST_INTERVAL): global memuse_at_start sock = socket.socket # SOCK_DGRAM is the socket type to use for UDP sockets sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(sock_timeout) # This is a stress-test, so we don't want to spend time checking the output # (especially not with python) # We just want to make sure the VM survives. data = "UDP is working as it's supposed to." try: for i in range(0, burst_size): sock.sendto(data, (HOST, PORT_FLOOD)) except Exception as e: print(color.WARNING("<Test.py> Python socket timed out while sending. ")) return False sock.close() time.sleep(burst_interval) return get_mem() # Fire a single burst of ICMP packets def ICMP_flood(burst_size = BURST_SIZE, burst_interval = BURST_INTERVAL): # Note: Ping-flooding requires sudo for optimal speed res = subprocess.check_call(["sudo","ping","-f", HOST, "-c", str(burst_size)], timeout=thread_timeout); time.sleep(burst_interval) return get_mem() # Fire a single burst of HTTP requests def httperf(burst_size = BURST_SIZE, burst_interval = BURST_INTERVAL): res = subprocess.check_call(["httperf","--hog", "--server", HOST, "--num-conn", str(burst_size)], timeout=thread_timeout); time.sleep(burst_interval) return get_mem() # Fire a single burst of ARP requests def ARP_burst(burst_size = BURST_SIZE, burst_interval = BURST_INTERVAL): # Note: Arping requires sudo, and we expect the bridge 'bridge43' to be present command = ["sudo", "arping", "-q","-W", str(0.0001), "-I", "bridge43", "-c", str(burst_size * 10), HOST] print(color.DATA(" ".join(command))) time.sleep(0.5) res = subprocess.check_call(command, timeout=thread_timeout); time.sleep(burst_interval) return get_mem() def heap_ok(line): global heap_verified heap_verified = True print(color.INFO("Stresstest::heap_ok"), "VM reports heap is increasing and decreasing as expected") def crash_test(string): print(color.INFO("Opening persistent TCP connection for diagnostics")) sock_mem.connect((HOST, PORT_MEM)) mem_before = get_mem_start() if mem_before <= 0: print(color.FAIL("Initial memory reported as " + str(mem_before))) return False if not heap_verified: print(color.FAIL("Heap behavior was not verified as expected. ")) return False print(color.HEADER("Initial crash test")) burst_size = BURST_SIZE * 10 ARP_burst(burst_size, 0) UDP_burst(burst_size, 0) ICMP_flood(burst_size, 0) httperf(burst_size, 0) time.sleep(BURST_INTERVAL) mem_after = get_mem() print(color.INFO("Crash test complete. Memory in use: "), mem_after) return mem_after >= memuse_at_start # Fire several bursts, e.g. trigger a function that fires bursts, several times def fire_bursts(func, sub_test_name, lead_out = 3): name_tag = "<" + sub_test_name + ">" print(color.HEADER(test_name + " initiating "+sub_test_name)) membase_start = get_mem() mem_base = membase_start # Track heap behavior increases = 0 decreases = 0 constant = 0 for i in range(0,BURST_COUNT): print(color.INFO(name_tag), " Run ", i+1) memi = func() if memi > mem_base: memincrease = memi - mem_base increases += 1 elif memi == mem_base: memincrease = 0 constant += 1 else: memincrease = 0 decreases += 1 # We want to know how much each burst increases memory relative to the last burst mem_base = memi if memincrease > acceptable_increase: print(color.WARNING(name_tag), "Memory increased by ",memincrease,"b, ",float(memincrease) / BURST_SIZE, "pr. packet \n") else: print(color.OK(name_tag), "Memory increase ",memincrease,"b \n") # Memory can decrease, we don't care about that # if memincrease > 0: # mem_base += memincrease print(color.INFO(name_tag),"Heap behavior: ", "+",increases, ", -",decreases, ", ==", constant) print(color.INFO(name_tag),"Done. Checking for liveliness") if memory_increase(lead_out, membase_start) > acceptable_increase: print(color.FAIL(sub_test_name + " failed ")) return False print(color.PASS(sub_test_name + " succeeded ")) return True # Trigger several UDP bursts def ARP(string): return fire_bursts(ARP_burst, "ARP bombardment") # Trigger several UDP bursts def UDP(string): return fire_bursts(UDP_burst, "UDP bombardment") # Trigger several ICMP bursts def ICMP(string): return fire_bursts(ICMP_flood, "Ping-flooding"); # Trigger several HTTP-brusts def TCP(string): return fire_bursts(httperf, "HTTP bombardment") # Get an auto-created VM from the vmrunner vm = vmrunner.vms[0] # Check for vital signs after all the bombardment is done def check_vitals(string): print(color.INFO("Checking vital signs")) mem = get_mem() diff = mem - memuse_at_start pages = old_div(diff, PAGE_SIZE) print(color.INFO("Memory use at test end:"), mem, "bytes") print(color.INFO("Memory difference from test start:"), memuse_at_start, "bytes (Diff:",diff, "b == ",pages, "pages)") sock_mem.close() vm.stop() wait_for_tw() return True # Wait for sockets to exit TIME_WAIT status def wait_for_tw(): print(color.INFO("Waiting for sockets to clear TIME_WAIT stage")) socket_limit = 11500 time_wait_proc = 30000 while time_wait_proc > socket_limit: output = subprocess.check_output(('netstat', '-anlt')).decode("utf-8") output = output.split('\n') time_wait_proc = 0 for line in output: if "TIME_WAIT" in line: time_wait_proc += 1 print(color.INFO("There are {0} sockets in use, waiting for value to drop below {1}".format(time_wait_proc, socket_limit))) time.sleep(7) # Add custom event-handlers vm.on_output("Heap functioning as expected", heap_ok) vm.on_output("Ready to start", crash_test) vm.on_output("Ready for ARP", ARP) vm.on_output("Ready for UDP", UDP) vm.on_output("Ready for ICMP", ICMP) vm.on_output("Ready for TCP", TCP) vm.on_output("Ready to end", check_vitals) if len(sys.argv) > 1: thread_timeout = int(sys.argv[1]) if len(sys.argv) > 3: BURST_COUNT = int(sys.argv[2]) BURST_SIZE = int(sys.argv[3]) print(color.HEADER(test_name + " initializing")) print(color.INFO(name_tag),"configured for ", BURST_COUNT,"bursts of", BURST_SIZE, "packets each") if len(sys.argv) > 4: vm.boot(timeout=thread_timeout, image_name=str(sys.argv[4])) else: vm.cmake().boot(thread_timeout).clean()
apache-2.0
hdinsight/hue
desktop/core/ext-py/Django-1.6.10/django/conf/locale/lt/formats.py
118
1833
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'Y \m. E j \d.' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = r'Y \m. E j \d., H:i:s' YEAR_MONTH_FORMAT = r'Y \m. F' MONTH_DAY_FORMAT = r'E j \d.' SHORT_DATE_FORMAT = 'Y-m-d' SHORT_DATETIME_FORMAT = 'Y-m-d H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' '%H.%M.%S', # '14.30.59' '%H.%M.%S.%f', # '14.30.59.000200' '%H.%M', # '14.30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59' '%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200' '%d.%m.%y %H.%M', # '25.10.06 14.30' '%d.%m.%y', # '25.10.06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
apache-2.0
rallylee/gem5
tests/configs/pc-simple-timing.py
13
2346
# Copyright (c) 2012 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Sandberg from m5.objects import * from x86_generic import * root = LinuxX86FSSystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_8x8, cpu_class=TimingSimpleCPU).create_root()
bsd-3-clause
ZhangXinNan/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/bijectors/reshape_test.py
14
13148
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Reshape Bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite from tensorflow.python.platform import test class _ReshapeBijectorTest(object): """Base class for testing the reshape transformation. Methods defined in this class call a method self.build_shapes() that is implemented by subclasses defined below, returning respectively ReshapeBijectorTestStatic: static shapes, ReshapeBijectorTestDynamic: shape placeholders of known ndims, and ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims, so that each test in this base class is automatically run over all three cases. The subclasses also implement assertRaisesError to test for either Python exceptions (in the case of static shapes) or TensorFlow op errors (dynamic shapes). """ def setUp(self): self._rng = np.random.RandomState(42) def testBijector(self): """Do a basic sanity check of forward, inverse, jacobian.""" expected_x = np.random.randn(4, 3, 2) expected_y = np.reshape(expected_x, [4, 6]) with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) (x_, y_, fldj_, ildj_) = sess.run(( bijector.inverse(expected_y), bijector.forward(expected_x), bijector.forward_log_det_jacobian(expected_x, event_ndims=2), bijector.inverse_log_det_jacobian(expected_y, event_ndims=2), ), feed_dict=feed_dict) self.assertEqual("reshape", bijector.name) self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0) self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0) self.assertAllClose(0., fldj_, rtol=1e-6, atol=0) self.assertAllClose(0., ildj_, rtol=1e-6, atol=0) def testEventShapeTensor(self): """Test event_shape_tensor methods when even ndims may be dynamic.""" shape_in_static = [2, 3] shape_out_static = [6,] shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static, shape_out_static) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) # using the _tensor methods, we should always get a fully-specified # result since these are evaluated at graph runtime. with self.test_session() as sess: (shape_out_, shape_in_) = sess.run(( bijector.forward_event_shape_tensor(shape_in), bijector.inverse_event_shape_tensor(shape_out), ), feed_dict=feed_dict) self.assertAllEqual(shape_out_static, shape_out_) self.assertAllEqual(shape_in_static, shape_in_) def testScalarReshape(self): """Test reshaping to and from a scalar shape ().""" expected_x = np.random.randn(4, 3, 1) expected_y = np.reshape(expected_x, [4, 3]) expected_x_scalar = np.random.randn(1,) expected_y_scalar = expected_x_scalar[0] shape_in, shape_out, feed_dict = self.build_shapes([], [1,]) with self.test_session() as sess: bijector = Reshape( event_shape_out=shape_in, event_shape_in=shape_out, validate_args=True) (x_, y_, x_scalar_, y_scalar_ ) = sess.run(( bijector.inverse(expected_y), bijector.forward(expected_x), bijector.inverse(expected_y_scalar), bijector.forward(expected_x_scalar), ), feed_dict=feed_dict) self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0) self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0) self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0) self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0) def testMultipleUnspecifiedDimensionsOpError(self): with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) with self.assertRaisesError( "elements must have at most one `-1`."): sess.run(bijector.forward_event_shape_tensor(shape_in), feed_dict=feed_dict) # pylint: disable=invalid-name def _testInvalidDimensionsOpError(self, expected_error_message): with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) with self.assertRaisesError(expected_error_message): sess.run(bijector.forward_event_shape_tensor(shape_in), feed_dict=feed_dict) # pylint: enable=invalid-name def testValidButNonMatchingInputOpError(self): x = np.random.randn(4, 3, 2) with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) # Here we pass in a tensor (x) whose shape is compatible with # the output shape, so tf.reshape will throw no error, but # doesn't match the expected input shape. with self.assertRaisesError( "Input `event_shape` does not match `event_shape_in`."): sess.run(bijector.forward(x), feed_dict=feed_dict) def testValidButNonMatchingInputPartiallySpecifiedOpError(self): x = np.random.randn(4, 3, 2) with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) with self.assertRaisesError( "Input `event_shape` does not match `event_shape_in`."): sess.run(bijector.forward(x), feed_dict=feed_dict) # pylint: disable=invalid-name def _testInputOutputMismatchOpError(self, expected_error_message): x1 = np.random.randn(4, 2, 3) x2 = np.random.randn(4, 1, 1, 5) with self.test_session() as sess: shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3], [1, 1, 5]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) with self.assertRaisesError(expected_error_message): sess.run(bijector.forward(x1), feed_dict=fd_mismatched) with self.assertRaisesError(expected_error_message): sess.run(bijector.inverse(x2), feed_dict=fd_mismatched) # pylint: enable=invalid-name def testOneShapePartiallySpecified(self): expected_x = np.random.randn(4, 6) expected_y = np.reshape(expected_x, [4, 2, 3]) with self.test_session() as sess: # one of input/output shapes is partially specified shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) (x_, y_, ) = sess.run(( bijector.inverse(expected_y), bijector.forward(expected_x), ), feed_dict=feed_dict) self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0) self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0) def testBothShapesPartiallySpecified(self): expected_x = np.random.randn(4, 2, 3) expected_y = np.reshape(expected_x, [4, 3, 2]) with self.test_session() as sess: shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2]) bijector = Reshape( event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True) (x_, y_, ) = sess.run(( bijector.inverse(expected_y), bijector.forward(expected_x), ), feed_dict=feed_dict) self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0) self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0) def testDefaultVectorShape(self): expected_x = np.random.randn(4, 4) expected_y = np.reshape(expected_x, [4, 2, 2]) with self.test_session() as sess: _, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2]) bijector = Reshape(shape_out, validate_args=True) (x_, y_, ) = sess.run(( bijector.inverse(expected_y), bijector.forward(expected_x), ), feed_dict=feed_dict) self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0) self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0) def build_shapes(self, *args, **kwargs): raise NotImplementedError("Subclass failed to implement `build_shapes`.") class ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest): def build_shapes(self, shape_in, shape_out): shape_in_static = shape_in shape_out_static = shape_out feed_dict = {} return shape_in_static, shape_out_static, feed_dict def assertRaisesError(self, msg): return self.assertRaisesRegexp(Exception, msg) def testEventShape(self): shape_in_static = tensor_shape.TensorShape([2, 3]) shape_out_static = tensor_shape.TensorShape([6,]) bijector = Reshape( event_shape_out=shape_out_static, event_shape_in=shape_in_static, validate_args=True) # test that forward_ and inverse_event_shape do sensible things # when shapes are statically known. self.assertEqual( bijector.forward_event_shape(shape_in_static), shape_out_static) self.assertEqual( bijector.inverse_event_shape(shape_out_static), shape_in_static) def testBijectiveAndFinite(self): x = np.random.randn(4, 2, 3) y = np.reshape(x, [4, 1, 2, 3]) with self.test_session(): bijector = Reshape( event_shape_in=[2, 3], event_shape_out=[1, 2, 3], validate_args=True) assert_bijective_and_finite( bijector, x, y, event_ndims=2, rtol=1e-6, atol=0) def testInvalidDimensionsOpError(self): self._testInvalidDimensionsOpError( "Invalid value in tensor used for shape: -2") def testInputOutputMismatchOpError(self): self._testInputOutputMismatchOpError("Cannot reshape a tensor with") class ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest): def build_shapes(self, shape_in, shape_out): shape_in_ph = array_ops.placeholder(shape=(len(shape_in),), dtype=dtypes.int32) shape_out_ph = array_ops.placeholder(shape=(len(shape_out),), dtype=dtypes.int32) feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out} return shape_in_ph, shape_out_ph, feed_dict def assertRaisesError(self, msg): return self.assertRaisesOpError(msg) def testInvalidDimensionsOpError(self): self._testInvalidDimensionsOpError( "elements must be either positive integers or `-1`.") def testInputOutputMismatchOpError(self): self._testInputOutputMismatchOpError("Input to reshape is a tensor with") class ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest): def build_shapes(self, shape_in, shape_out): shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32) shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32) feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out} return shape_in_ph, shape_out_ph, feed_dict def assertRaisesError(self, msg): return self.assertRaisesOpError(msg) def testInvalidDimensionsOpError(self): self._testInvalidDimensionsOpError( "elements must be either positive integers or `-1`.") def testInputOutputMismatchOpError(self): self._testInputOutputMismatchOpError("Input to reshape is a tensor with") if __name__ == "__main__": test.main()
apache-2.0
Ayub-Khan/edx-platform
lms/djangoapps/lti_provider/users.py
63
5258
""" LTI user management functionality. This module reconciles the two identities that an individual has in the campus LMS platform and on edX. """ import string import random import uuid from django.conf import settings from django.contrib.auth import authenticate, login from django.contrib.auth.models import User from django.core.exceptions import PermissionDenied from django.db import IntegrityError, transaction from lti_provider.models import LtiUser from student.models import UserProfile def authenticate_lti_user(request, lti_user_id, lti_consumer): """ Determine whether the user specified by the LTI launch has an existing account. If not, create a new Django User model and associate it with an LtiUser object. If the currently logged-in user does not match the user specified by the LTI launch, log out the old user and log in the LTI identity. """ try: lti_user = LtiUser.objects.get( lti_user_id=lti_user_id, lti_consumer=lti_consumer ) except LtiUser.DoesNotExist: # This is the first time that the user has been here. Create an account. lti_user = create_lti_user(lti_user_id, lti_consumer) if not (request.user.is_authenticated() and request.user == lti_user.edx_user): # The user is not authenticated, or is logged in as somebody else. # Switch them to the LTI user switch_user(request, lti_user, lti_consumer) def create_lti_user(lti_user_id, lti_consumer): """ Generate a new user on the edX platform with a random username and password, and associates that account with the LTI identity. """ edx_password = str(uuid.uuid4()) created = False while not created: try: edx_user_id = generate_random_edx_username() edx_email = "{}@{}".format(edx_user_id, settings.LTI_USER_EMAIL_DOMAIN) with transaction.atomic(): edx_user = User.objects.create_user( username=edx_user_id, password=edx_password, email=edx_email, ) # A profile is required if PREVENT_CONCURRENT_LOGINS flag is set. # TODO: We could populate user information from the LTI launch here, # but it's not necessary for our current uses. edx_user_profile = UserProfile(user=edx_user) edx_user_profile.save() created = True except IntegrityError: # The random edx_user_id wasn't unique. Since 'created' is still # False, we will retry with a different random ID. pass lti_user = LtiUser( lti_consumer=lti_consumer, lti_user_id=lti_user_id, edx_user=edx_user ) lti_user.save() return lti_user def switch_user(request, lti_user, lti_consumer): """ Log out the current user, and log in using the edX identity associated with the LTI ID. """ edx_user = authenticate( username=lti_user.edx_user.username, lti_user_id=lti_user.lti_user_id, lti_consumer=lti_consumer ) if not edx_user: # This shouldn't happen, since we've created edX accounts for any LTI # users by this point, but just in case we can return a 403. raise PermissionDenied() login(request, edx_user) def generate_random_edx_username(): """ Create a valid random edX user ID. An ID is at most 30 characters long, and can contain upper and lowercase letters and numbers. :return: """ allowable_chars = string.ascii_letters + string.digits username = '' for _index in range(30): username = username + random.SystemRandom().choice(allowable_chars) return username class LtiBackend(object): """ A Django authentication backend that authenticates users via LTI. This backend will only return a User object if it is associated with an LTI identity (i.e. the user was created by the create_lti_user method above). """ def authenticate(self, username=None, lti_user_id=None, lti_consumer=None): """ Try to authenticate a user. This method will return a Django user object if a user with the corresponding username exists in the database, and if a record that links that user with an LTI user_id field exists in the LtiUser collection. If such a user is not found, the method returns None (in line with the authentication backend specification). """ try: edx_user = User.objects.get(username=username) except User.DoesNotExist: return None try: LtiUser.objects.get( edx_user_id=edx_user.id, lti_user_id=lti_user_id, lti_consumer=lti_consumer ) except LtiUser.DoesNotExist: return None return edx_user def get_user(self, user_id): """ Return the User object for a user that has already been authenticated by this backend. """ try: return User.objects.get(id=user_id) except User.DoesNotExist: return None
agpl-3.0
rghe/ansible
lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py
49
10365
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cloudfront_invalidation short_description: create invalidations for aws cloudfront distributions description: - Allows for invalidation of a batch of paths for a CloudFront distribution. requirements: - boto3 >= 1.0.0 - python >= 2.6 version_added: "2.5" author: Willem van Ketwich (@wilvk) extends_documentation_fragment: - aws - ec2 options: distribution_id: description: - The id of the cloudfront distribution to invalidate paths for. Can be specified insted of the alias. required: false alias: description: - The alias of the cloudfront distribution to invalidate paths for. Can be specified instead of distribution_id. required: false caller_reference: description: - A unique reference identifier for the invalidation paths. required: false default: current datetime stamp target_paths: description: - A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*' required: true notes: - does not support check mode ''' EXAMPLES = ''' - name: create a batch of invalidations using a distribution_id for a reference cloudfront_invalidation: distribution_id: E15BU8SDCGSG57 caller_reference: testing 123 target_paths: - /testpathone/test1.css - /testpathtwo/test2.js - /testpaththree/test3.ss - name: create a batch of invalidations using an alias as a reference and one path using a wildcard match cloudfront_invalidation: alias: alias.test.com caller_reference: testing 123 target_paths: - /testpathone/test4.css - /testpathtwo/test5.js - /testpaththree/* ''' RETURN = ''' invalidation: description: The invalidation's information. returned: always type: complex contains: create_time: description: The date and time the invalidation request was first made. returned: always type: string sample: '2018-02-01T15:50:41.159000+00:00' id: description: The identifier for the invalidation request. returned: always type: string sample: I2G9MOWJZFV612 invalidation_batch: description: The current invalidation information for the batch request. returned: always type: complex contains: caller_reference: description: The value used to uniquely identify an invalidation request. returned: always type: string sample: testing 123 paths: description: A dict that contains information about the objects that you want to invalidate. returned: always type: complex contains: items: description: A list of the paths that you want to invalidate. returned: always type: list sample: - /testpathtwo/test2.js - /testpathone/test1.css - /testpaththree/test3.ss quantity: description: The number of objects that you want to invalidate. returned: always type: int sample: 3 status: description: The status of the invalidation request. returned: always type: string sample: Completed location: description: The fully qualified URI of the distribution and invalidation batch request. returned: always type: string sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622 ''' from ansible.module_utils.ec2 import get_aws_connection_info from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn from ansible.module_utils.ec2 import snake_dict_to_camel_dict from ansible.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager import datetime try: from botocore.exceptions import ClientError, BotoCoreError except ImportError: pass # caught by imported AnsibleAWSModule class CloudFrontInvalidationServiceManager(object): """ Handles CloudFront service calls to AWS for invalidations """ def __init__(self, module): self.module = module self.create_client('cloudfront') def create_client(self, resource): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self.module, boto3=True) self.client = boto3_conn(self.module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs) def create_invalidation(self, distribution_id, invalidation_batch): current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference']) try: response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch) response.pop('ResponseMetadata', None) if current_invalidation_response: return response, False else: return response, True except BotoCoreError as e: self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") except ClientError as e: if ('Your request contains a caller reference that was used for a previous invalidation batch ' 'for the same distribution.' in e.response['Error']['Message']): self.module.warn("InvalidationBatch target paths are not modifiable. " "To make a new invalidation please update caller_reference.") return current_invalidation_response, False else: self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.") def get_invalidation(self, distribution_id, caller_reference): current_invalidation = {} # find all invalidations for the distribution try: paginator = self.client.get_paginator('list_invalidations') invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', []) invalidation_ids = [inv['Id'] for inv in invalidations] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.") # check if there is an invalidation with the same caller reference for inv_id in invalidation_ids: try: invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation'] caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference') except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Error getting Cloudfront invalidation {0}".format(inv_id)) if caller_ref == caller_reference: current_invalidation = invalidation break current_invalidation.pop('ResponseMetadata', None) return current_invalidation class CloudFrontInvalidationValidationManager(object): """ Manages Cloudfront validations for invalidation batches """ def __init__(self, module): self.module = module self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) def validate_distribution_id(self, distribution_id, alias): try: if distribution_id is None and alias is None: self.module.fail_json(msg="distribution_id or alias must be specified") if distribution_id is None: distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias) return distribution_id except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating parameters.") def create_aws_list(self, invalidation_batch): aws_list = {} aws_list["Quantity"] = len(invalidation_batch) aws_list["Items"] = invalidation_batch return aws_list def validate_invalidation_batch(self, invalidation_batch, caller_reference): try: if caller_reference is not None: valid_caller_reference = caller_reference else: valid_caller_reference = datetime.datetime.now().isoformat() valid_invalidation_batch = { 'paths': self.create_aws_list(invalidation_batch), 'caller_reference': valid_caller_reference } return valid_invalidation_batch except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating invalidation batch.") def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( caller_reference=dict(), distribution_id=dict(), alias=dict(), target_paths=dict(required=True, type='list') )) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) validation_mgr = CloudFrontInvalidationValidationManager(module) service_mgr = CloudFrontInvalidationServiceManager(module) caller_reference = module.params.get('caller_reference') distribution_id = module.params.get('distribution_id') alias = module.params.get('alias') target_paths = module.params.get('target_paths') result = {} distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias) valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference) valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True) result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result)) if __name__ == '__main__': main()
gpl-3.0
chauhanhardik/populo
lms/djangoapps/shoppingcart/tests/test_views.py
3
96238
""" Tests for Shopping Cart views """ from collections import OrderedDict import pytz from urlparse import urlparse from decimal import Decimal import json from django.http import HttpRequest from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from django.core.urlresolvers import reverse from django.contrib.admin.sites import AdminSite from django.contrib.auth.models import Group, User from django.contrib.messages.storage.fallback import FallbackStorage from django.core import mail from django.core.cache import cache from pytz import UTC from freezegun import freeze_time from datetime import datetime, timedelta from mock import patch, Mock import ddt from common.test.utils import XssTestMixin from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.roles import CourseSalesAdminRole from util.date_utils import get_default_time_display from util.testing import UrlResetMixin from shoppingcart.views import _can_download_report, _get_date_from_str from shoppingcart.models import ( Order, CertificateItem, PaidCourseRegistration, CourseRegCodeItem, Coupon, CourseRegistrationCode, RegistrationCodeRedemption, DonationConfiguration, CouponRedemption) from student.tests.factories import UserFactory, AdminFactory, CourseModeFactory from courseware.tests.factories import InstructorFactory from student.models import CourseEnrollment from course_modes.models import CourseMode from edxmako.shortcuts import render_to_response from embargo.test_utils import restrict_course from shoppingcart.processors import render_purchase_form_html from shoppingcart.admin import SoftDeleteCouponAdmin from shoppingcart.views import initialize_report from shoppingcart.tests.payment_fake import PaymentFakeView from shoppingcart.processors.CyberSource2 import sign def mock_render_purchase_form_html(*args, **kwargs): return render_purchase_form_html(*args, **kwargs) form_mock = Mock(side_effect=mock_render_purchase_form_html) def mock_render_to_response(*args, **kwargs): return render_to_response(*args, **kwargs) render_mock = Mock(side_effect=mock_render_to_response) postpay_mock = Mock() @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) @ddt.ddt class ShoppingCartViewsTests(SharedModuleStoreTestCase, XssTestMixin): @classmethod def setUpClass(cls): super(ShoppingCartViewsTests, cls).setUpClass() cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') cls.course_key = cls.course.id verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course') cls.verified_course_key = verified_course.id xss_course = CourseFactory.create(org='xssorg', number='test', display_name='<script>alert("XSS")</script>') cls.xss_course_key = xss_course.id cls.testing_course = CourseFactory.create(org='edX', number='888', display_name='Testing Super Course') def setUp(self): super(ShoppingCartViewsTests, self).setUp() patcher = patch('student.models.tracker') self.mock_tracker = patcher.start() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.instructor = AdminFactory.create() self.cost = 40 self.coupon_code = 'abcde' self.reg_code = 'qwerty' self.percentage_discount = 10 self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() # Saving another testing course mode self.testing_cost = 20 self.testing_course_mode = CourseMode(course_id=self.testing_course.id, mode_slug="honor", mode_display_name="testing honor cert", min_price=self.testing_cost) self.testing_course_mode.save() self.cart = Order.get_cart_for_user(self.user) self.addCleanup(patcher.stop) self.now = datetime.now(pytz.UTC) self.yesterday = self.now - timedelta(days=1) self.tomorrow = self.now + timedelta(days=1) def get_discount(self, cost): """ This method simple return the discounted amount """ val = Decimal("{0:.2f}".format(Decimal(self.percentage_discount / 100.00) * cost)) return cost - val def add_coupon(self, course_key, is_active, code): """ add dummy coupon into models """ coupon = Coupon(code=code, description='testing code', course_id=course_key, percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active) coupon.save() def add_reg_code(self, course_key, mode_slug='honor', is_valid=True): """ add dummy registration code into models """ course_reg_code = CourseRegistrationCode( code=self.reg_code, course_id=course_key, created_by=self.user, mode_slug=mode_slug, is_valid=is_valid ) course_reg_code.save() def _add_course_mode(self, min_price=50, mode_slug='honor', expiration_date=None): """ Adds a course mode to the test course. """ mode = CourseModeFactory.create() mode.course_id = self.course.id mode.min_price = min_price mode.mode_slug = mode_slug mode.expiration_date = expiration_date mode.save() return mode def add_course_to_user_cart(self, course_key): """ adding course to user cart """ self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, course_key) return reg_item def login_user(self): self.client.login(username=self.user.username, password="password") def test_add_course_to_cart_anon(self): resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 403) @patch('shoppingcart.views.render_to_response', render_mock) def test_billing_details(self): billing_url = reverse('billing_details') self.login_user() # page not found error because order_type is not business resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 404) #chagne the order_type to business self.cart.order_type = 'business' self.cart.save() resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/billing_details.html') # check for the default currency in the context self.assertEqual(context['currency'], 'usd') self.assertEqual(context['currency_symbol'], '$') data = {'company_name': 'Test Company', 'company_contact_name': 'JohnDoe', 'company_contact_email': 'john@est.com', 'recipient_name': 'Mocker', 'recipient_email': 'mock@germ.com', 'company_address_line_1': 'DC Street # 1', 'company_address_line_2': '', 'company_city': 'DC', 'company_state': 'NY', 'company_zip': '22003', 'company_country': 'US', 'customer_reference_number': 'PO#23'} resp = self.client.post(billing_url, data) self.assertEqual(resp.status_code, 200) @patch('shoppingcart.views.render_to_response', render_mock) @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) def test_billing_details_with_override_currency_settings(self): billing_url = reverse('billing_details') self.login_user() #chagne the order_type to business self.cart.order_type = 'business' self.cart.save() resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 200) ((template, context), __) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/billing_details.html') # check for the override currency settings in the context self.assertEqual(context['currency'], 'PKR') self.assertEqual(context['currency_symbol'], 'Rs') def test_same_coupon_code_applied_on_multiple_items_in_the_cart(self): """ test to check that that the same coupon code applied on multiple items in the cart. """ self.login_user() # add first course to user cart resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) # add and apply the coupon code to course in the cart self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # now add the same coupon code to the second course(testing_course) self.add_coupon(self.testing_course.id, True, self.coupon_code) #now add the second course to cart, the coupon code should be # applied when adding the second course to the cart resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.testing_course.id.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) #now check the user cart and see that the discount has been applied on both the courses resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) #first course price is 40$ and the second course price is 20$ # after 10% discount on both the courses the total price will be 18+36 = 54 self.assertIn('54.00', resp.content) def test_add_course_to_cart_already_in_cart(self): PaidCourseRegistration.add_to_order(self.cart, self.course_key) self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 400) self.assertIn('The course {0} is already in your cart.'.format(self.course_key.to_deprecated_string()), resp.content) def test_course_discount_invalid_coupon(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) non_existing_code = "non_existing_code" resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content) def test_valid_qty_greater_then_one_and_purchase_type_should_business(self): qty = 2 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * qty) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'business') def test_in_valid_qty_case(self): # invalid quantity, Quantity must be between 1 and 1000. qty = 0 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be between 1 and 1000.", resp.content) # invalid quantity, Quantity must be an integer. qty = 'abcde' resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be an integer.", resp.content) # invalid quantity, Quantity is not present in request resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be between 1 and 1000.", resp.content) def test_valid_qty_but_item_not_found(self): qty = 2 item_id = '-1' self.login_user() resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item_id, 'qty': qty}) self.assertEqual(resp.status_code, 404) self.assertEqual('Order item does not exist.', resp.content) # now testing the case if item id not found in request, resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertEqual('Order item not found in request.', resp.content) def test_purchase_type_should_be_personal_when_qty_is_one(self): qty = 1 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * 1) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_purchase_type_on_removing_item_and_cart_has_item_with_qty_one(self): qty = 5 self.add_course_to_user_cart(self.course_key) item2 = self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) cart = Order.get_cart_for_user(self.user) cart_items = cart.orderitem_set.all() test_flag = False for cartitem in cart_items: if cartitem.qty == 5: test_flag = True resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id}) self.assertEqual(resp.status_code, 200) self.assertTrue(test_flag) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_billing_details_btn_in_cart_when_qty_is_greater_than_one(self): qty = 5 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertIn("Billing Details", resp.content) def test_purchase_type_should_be_personal_when_remove_all_items_from_cart(self): item1 = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item1.id, 'qty': 2}) self.assertEqual(resp.status_code, 200) item2 = self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': 5}) self.assertEqual(resp.status_code, 200) cart = Order.get_cart_for_user(self.user) cart_items = cart.orderitem_set.all() test_flag = False for cartitem in cart_items: test_flag = True resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id}) self.assertEqual(resp.status_code, 200) self.assertTrue(test_flag) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_use_valid_coupon_code_and_qty_is_greater_than_one(self): qty = 5 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * qty) # use coupon code self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost * qty, 180) def test_course_discount_invalid_reg_code(self): self.add_reg_code(self.course_key) self.add_course_to_user_cart(self.course_key) non_existing_code = "non_existing_code" resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content) def test_course_discount_inactive_coupon(self): self.add_coupon(self.course_key, False, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content) def test_course_does_not_exist_in_cart_against_valid_coupon(self): course_key = self.course_key.to_deprecated_string() + 'testing' self.add_coupon(course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content) def test_inactive_registration_code_returns_error(self): """ test to redeem inactive registration code and it returns an error. """ course_key = self.course_key.to_deprecated_string() self.add_reg_code(course_key, is_valid=False) self.add_course_to_user_cart(self.course_key) # now apply the inactive registration code # it will raise an exception resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 400) self.assertIn( "This enrollment code ({enrollment_code}) is no longer valid.".format( enrollment_code=self.reg_code), resp.content) def test_course_does_not_exist_in_cart_against_valid_reg_code(self): course_key = self.course_key.to_deprecated_string() + 'testing' self.add_reg_code(course_key) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Code '{0}' is not valid for any course in the shopping cart.".format(self.reg_code), resp.content) def test_cart_item_qty_greater_than_1_against_valid_reg_code(self): course_key = self.course_key.to_deprecated_string() self.add_reg_code(course_key) item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4}) self.assertEqual(resp.status_code, 200) # now update the cart item quantity and then apply the registration code # it will raise an exception resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Cart item quantity should not be greater than 1 when applying activation code", resp.content) @ddt.data(True, False) def test_reg_code_uses_associated_mode(self, expired_mode): """Tests the use of reg codes on verified courses, expired or active. """ course_key = self.course_key.to_deprecated_string() expiration_date = self.yesterday if expired_mode else self.tomorrow self._add_course_mode(mode_slug='verified', expiration_date=expiration_date) self.add_reg_code(course_key, mode_slug='verified') self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost') self.assertEqual(resp.status_code, 200) self.assertIn(self.course.display_name, resp.content) @ddt.data(True, False) def test_reg_code_uses_unknown_mode(self, expired_mode): """Tests the use of reg codes on verified courses, expired or active. """ course_key = self.course_key.to_deprecated_string() expiration_date = self.yesterday if expired_mode else self.tomorrow self._add_course_mode(mode_slug='verified', expiration_date=expiration_date) self.add_reg_code(course_key, mode_slug='bananas') self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost') self.assertEqual(resp.status_code, 200) self.assertIn(self.course.display_name, resp.content) self.assertIn("error processing your redeem code", resp.content) def test_course_discount_for_valid_active_coupon_code(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) # after getting 10 percent discount self.assertEqual(self.cart.total_cost, self.get_discount(self.cost)) # now using the same coupon code against the same order. # Only one coupon redemption should be allowed per order. resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 400) self.assertIn("Only one coupon redemption is allowed against an order", resp.content) def test_course_discount_against_two_distinct_coupon_codes(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) # now using another valid active coupon code. # Only one coupon redemption should be allowed per order. self.add_coupon(self.course_key, True, 'abxyz') resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'abxyz'}) self.assertEqual(resp.status_code, 400) self.assertIn("Only one coupon redemption is allowed against an order", resp.content) def test_same_coupons_code_on_multiple_courses(self): # add two same coupon codes on two different courses self.add_coupon(self.course_key, True, self.coupon_code) self.add_coupon(self.testing_course.id, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) item = self.cart.orderitem_set.all().select_subclasses()[1] self.assertEquals(item.unit_cost, self.get_discount(self.testing_cost)) def test_soft_delete_coupon(self): self.add_coupon(self.course_key, True, self.coupon_code) coupon = Coupon(code='TestCode', description='testing', course_id=self.course_key, percentage_discount=12, created_by=self.user, is_active=True) coupon.save() self.assertEquals(coupon.__unicode__(), '[Coupon] code: TestCode course: MITx/999/Robot_Super_Course') admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo') admin.is_staff = True get_coupon = Coupon.objects.get(id=1) request = HttpRequest() request.user = admin setattr(request, 'session', 'session') messages = FallbackStorage(request) setattr(request, '_messages', messages) coupon_admin = SoftDeleteCouponAdmin(Coupon, AdminSite()) test_query_set = coupon_admin.queryset(request) test_actions = coupon_admin.get_actions(request) self.assertIn('really_delete_selected', test_actions['really_delete_selected']) self.assertEqual(get_coupon.is_active, True) coupon_admin.really_delete_selected(request, test_query_set) for coupon in test_query_set: self.assertEqual(coupon.is_active, False) coupon_admin.delete_model(request, get_coupon) self.assertEqual(get_coupon.is_active, False) coupon = Coupon(code='TestCode123', description='testing123', course_id=self.course_key, percentage_discount=22, created_by=self.user, is_active=True) coupon.save() test_query_set = coupon_admin.queryset(request) coupon_admin.really_delete_selected(request, test_query_set) for coupon in test_query_set: self.assertEqual(coupon.is_active, False) def test_course_free_discount_for_valid_active_reg_code(self): self.add_reg_code(self.course_key) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) # now testing registration code already used scenario, reusing the same code # the item has been removed when using the registration code for the first time resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 400) self.assertIn("This enrollment code ({enrollment_code}) is not valid.".format( enrollment_code=self.reg_code ), resp.content) def test_upgrade_from_valid_reg_code(self): """Use a valid registration code to upgrade from honor to verified mode. """ # Ensure the course has a verified mode course_key = self.course_key.to_deprecated_string() self._add_course_mode(mode_slug='verified') self.add_reg_code(course_key, mode_slug='verified') # Enroll as honor in the course with the current user. CourseEnrollment.enroll(self.user, self.course_key) self.login_user() current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key) self.assertEquals('honor', current_enrollment) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) # Once upgraded, should be "verified" current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key) self.assertEquals('verified', current_enrollment) @patch('shoppingcart.views.log.debug') def test_non_existing_coupon_redemption_on_removing_item(self, debug_log): reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) debug_log.assert_called_with( 'Code redemption does not exist for order item id=%s.', str(reg_item.id) ) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) @patch('shoppingcart.views.log.info') def test_existing_coupon_redemption_on_removing_item(self, info_log): self.add_coupon(self.course_key, True, self.coupon_code) reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) info_log.assert_called_with( 'Coupon "%s" redemption entry removed for user "%s" for order item "%s"', self.coupon_code, self.user, str(reg_item.id) ) @patch('shoppingcart.views.log.info') def test_reset_redemption_for_coupon(self, info_log): self.add_coupon(self.course_key, True, self.coupon_code) reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.reset_code_redemption', args=[])) self.assertEqual(resp.status_code, 200) info_log.assert_called_with( 'Coupon redemption entry removed for user %s for order %s', self.user, reg_item.id ) @patch('shoppingcart.views.log.info') def test_coupon_discount_for_multiple_courses_in_cart(self, info_log): reg_item = self.add_course_to_user_cart(self.course_key) self.add_coupon(self.course_key, True, self.coupon_code) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit_cost should be updated for that particular course for which coupon code is registered items = self.cart.orderitem_set.all().select_subclasses() for item in items: if item.id == reg_item.id: self.assertEquals(item.unit_cost, self.get_discount(self.cost)) self.assertEquals(item.list_price, self.cost) elif item.id == cert_item.id: self.assertEquals(item.list_price, self.cost) self.assertEquals(item.unit_cost, self.cost) # Delete the discounted item, corresponding coupon redemption should # be removed for that particular discounted item resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) info_log.assert_called_with( 'Coupon "%s" redemption entry removed for user "%s" for order item "%s"', self.coupon_code, self.user, str(reg_item.id) ) @patch('shoppingcart.views.log.info') def test_delete_certificate_item(self, info_log): self.add_course_to_user_cart(self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) # Delete the discounted item, corresponding coupon redemption should be removed for that particular discounted item resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cert_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) info_log.assert_called_with("order item %s removed for user %s", str(cert_item.id), self.user) @patch('shoppingcart.views.log.info') def test_remove_coupon_redemption_on_clear_cart(self, info_log): reg_item = self.add_course_to_user_cart(self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) info_log.assert_called_with( 'Coupon redemption entry removed for user %s for order %s', self.user, reg_item.id ) def test_add_course_to_cart_already_registered(self): CourseEnrollment.enroll(self.user, self.course_key) self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 400) self.assertIn('You are already registered in course {0}.'.format(self.course_key.to_deprecated_string()), resp.content) def test_add_nonexistent_course_to_cart(self): self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=['non/existent/course'])) self.assertEqual(resp.status_code, 404) self.assertIn("The course you requested does not exist.", resp.content) def test_add_course_to_cart_success(self): self.login_user() reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]) resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key)) @patch('shoppingcart.views.render_purchase_form_html', form_mock) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_cart(self): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) ((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses() self.assertIn(reg_item, purchase_form_arg_cart_items) self.assertIn(cert_item, purchase_form_arg_cart_items) self.assertEqual(len(purchase_form_arg_cart_items), 2) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') self.assertEqual(len(context['shoppingcart_items']), 2) self.assertEqual(context['amount'], 80) self.assertIn("80.00", context['form_html']) # check for the default currency in the context self.assertEqual(context['currency'], 'usd') self.assertEqual(context['currency_symbol'], '$') @patch('shoppingcart.views.render_purchase_form_html', form_mock) @patch('shoppingcart.views.render_to_response', render_mock) @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) def test_show_cart_with_override_currency_settings(self): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) ((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses() self.assertIn(reg_item, purchase_form_arg_cart_items) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') # check for the override currency settings in the context self.assertEqual(context['currency'], 'PKR') self.assertEqual(context['currency_symbol'], 'Rs') def test_clear_cart(self): self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) @patch('shoppingcart.views.log.exception') def test_remove_item(self, exception_log): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) self.assertNotIn(reg_item, self.cart.orderitem_set.all().select_subclasses()) self.cart.purchase() resp2 = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cert_item.id}) self.assertEqual(resp2.status_code, 200) exception_log.assert_called_with( 'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased', str(cert_item.id) ) resp3 = self.client.post( reverse('shoppingcart.views.remove_item', args=[]), {'id': -1} ) self.assertEqual(resp3.status_code, 200) exception_log.assert_called_with( 'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased', '-1' ) @patch('shoppingcart.views.process_postpay_callback', postpay_mock) def test_postpay_callback_success(self): postpay_mock.return_value = {'success': True, 'order': self.cart} self.login_user() resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[])) self.assertEqual(resp.status_code, 302) self.assertEqual(urlparse(resp.__getitem__('location')).path, reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) @patch('shoppingcart.views.process_postpay_callback', postpay_mock) @patch('shoppingcart.views.render_to_response', render_mock) def test_postpay_callback_failure(self): postpay_mock.return_value = {'success': False, 'order': self.cart, 'error_html': 'ERROR_TEST!!!'} self.login_user() resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn('ERROR_TEST!!!', resp.content) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/error.html') self.assertEqual(context['order'], self.cart) self.assertEqual(context['error_html'], 'ERROR_TEST!!!') @ddt.data(0, 1) def test_show_receipt_json(self, num_items): # Create the correct number of items in the order for __ in range(num_items): CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") # Should have gotten a successful response self.assertEqual(resp.status_code, 200) # Parse the response as JSON and check the contents json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('currency'), self.cart.currency) self.assertEqual(json_resp.get('purchase_datetime'), get_default_time_display(self.cart.purchase_time)) self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost) self.assertEqual(json_resp.get('status'), "purchased") self.assertEqual(json_resp.get('billed_to'), { 'first_name': self.cart.bill_to_first, 'last_name': self.cart.bill_to_last, 'street1': self.cart.bill_to_street1, 'street2': self.cart.bill_to_street2, 'city': self.cart.bill_to_city, 'state': self.cart.bill_to_state, 'postal_code': self.cart.bill_to_postalcode, 'country': self.cart.bill_to_country }) self.assertEqual(len(json_resp.get('items')), num_items) for item in json_resp.get('items'): self.assertEqual(item, { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Honor Code Certificate for course Test Course', 'course_key': unicode(self.verified_course_key) }) def test_show_receipt_xss(self): CertificateItem.add_to_order(self.cart, self.xss_course_key, self.cost, 'honor') self.cart.purchase() self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url) self.assert_xss(resp, '<script>alert("XSS")</script>') @patch('shoppingcart.views.render_to_response', render_mock) def test_reg_code_xss(self): self.add_reg_code(self.xss_course_key) # One courses in user shopping cart self.add_course_to_user_cart(self.xss_course_key) self.assertEquals(self.cart.orderitem_set.count(), 1) post_response = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(post_response.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) redeem_response = self.client.get(redeem_url) self.assert_xss(redeem_response, '<script>alert("XSS")</script>') def test_show_receipt_json_multiple_items(self): # Two different item types PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") # Should have gotten a successful response self.assertEqual(resp.status_code, 200) # Parse the response as JSON and check the contents json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost) items = json_resp.get('items') self.assertEqual(len(items), 2) self.assertEqual(items[0], { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Registration for Course: Robot Super Course', 'course_key': unicode(self.course_key) }) self.assertEqual(items[1], { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Honor Code Certificate for course Test Course', 'course_key': unicode(self.verified_course_key) }) def test_receipt_json_refunded(self): mock_enrollment = Mock() mock_enrollment.refundable.side_effect = lambda: True mock_enrollment.course_id = self.verified_course_key mock_enrollment.user = self.user CourseMode.objects.create( course_id=self.verified_course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost ) cert = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified') self.cart.purchase() cert.refund_cert_callback(course_enrollment=mock_enrollment) self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 200) json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('status'), 'refunded') def test_show_receipt_404s(self): PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() user2 = UserFactory.create() cart2 = Order.get_cart_for_user(user2) PaidCourseRegistration.add_to_order(cart2, self.course_key) cart2.purchase() self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[cart2.id])) self.assertEqual(resp.status_code, 404) resp2 = self.client.get(reverse('shoppingcart.views.show_receipt', args=[1000])) self.assertEqual(resp2.status_code, 404) def test_total_amount_of_purchased_course(self): self.add_course_to_user_cart(self.course_key) self.assertEquals(self.cart.orderitem_set.count(), 1) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') # Total amount of a particular course that is purchased by different users total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key) self.assertEqual(total_amount, 36) self.client.login(username=self.instructor.username, password="test") cart = Order.get_cart_for_user(self.instructor) PaidCourseRegistration.add_to_order(cart, self.course_key) cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key) self.assertEqual(total_amount, 76) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_valid_coupon_code(self): self.add_course_to_user_cart(self.course_key) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn(str(self.get_discount(self.cost)), resp.content) @patch('shoppingcart.views.render_to_response', render_mock) def test_reg_code_and_course_registration_scenario(self): self.add_reg_code(self.course_key) # One courses in user shopping cart self.add_course_to_user_cart(self.course_key) self.assertEquals(self.cart.orderitem_set.count(), 1) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) @patch('shoppingcart.views.render_to_response', render_mock) def test_reg_code_with_multiple_courses_and_checkout_scenario(self): self.add_reg_code(self.course_key) # Two courses in user shopping cart self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) item2 = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) resp = self.client.get(redeem_url) self.assertEquals(resp.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in resp.content) #now activate the user by enrolling him/her to the course resp = self.client.post(redeem_url) self.assertEquals(resp.status_code, 200) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertIn('Payment', resp.content) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertEqual(context['order'].total_cost, self.testing_cost) course_enrollment = CourseEnrollment.objects.filter(user=self.user) self.assertEqual(course_enrollment.count(), 2) # make sure the enrollment_ids were stored in the PaidCourseRegistration items # refetch them first since they are updated # item1 has been deleted from the the cart. # User has been enrolled for the item1 item2 = PaidCourseRegistration.objects.get(id=item2.id) self.assertIsNotNone(item2.course_enrollment) self.assertEqual(item2.course_enrollment.course_id, self.testing_course.id) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_valid_reg_code(self): self.add_course_to_user_cart(self.course_key) self.add_reg_code(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('0.00', resp.content) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertFalse(context['any_refunds']) # check for the default currency settings in the context self.assertEqual(context['currency_symbol'], '$') self.assertEqual(context['currency'], 'usd') @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_override_currency_settings(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) # check for the override currency settings in the context self.assertEqual(context['currency_symbol'], 'Rs') self.assertEqual(context['currency'], 'PKR') @patch('shoppingcart.views.render_to_response', render_mock) def test_courseregcode_item_total_price(self): self.cart.order_type = 'business' self.cart.save() CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.assertEquals(CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key), 80) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_order_type_business(self): self.cart.order_type = 'business' self.cart.save() reg_item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2) self.cart.add_billing_details(company_name='T1Omega', company_contact_name='C1', company_contact_email='test@t1.com', recipient_email='test@t2.com') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') # mail is sent to these emails recipient_email, company_contact_email, order.user.email self.assertEquals(len(mail.outbox), 3) self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) # when order_type = 'business' the user is not enrolled in the # course but presented with the enrollment links self.assertFalse(CourseEnrollment.is_enrolled(self.cart.user, self.course_key)) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) # check for the enrollment codes content self.assertIn('Please send each professional one of these unique registration codes to enroll into the course.', resp.content) # fetch the newly generated registration codes course_registration_codes = CourseRegistrationCode.objects.filter(order=self.cart) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) # now check for all the registration codes in the receipt # and all the codes should be unused at this point self.assertIn(course_registration_codes[0].code, context['reg_code_info_list'][0]['code']) self.assertIn(course_registration_codes[1].code, context['reg_code_info_list'][1]['code']) self.assertFalse(context['reg_code_info_list'][0]['is_redeemed']) self.assertFalse(context['reg_code_info_list'][1]['is_redeemed']) self.assertIn(self.cart.purchase_time.strftime("%B %d, %Y"), resp.content) self.assertIn(self.cart.company_name, resp.content) self.assertIn(self.cart.company_contact_name, resp.content) self.assertIn(self.cart.company_contact_email, resp.content) self.assertIn(self.cart.recipient_email, resp.content) self.assertIn("Invoice #{order_id}".format(order_id=self.cart.id), resp.content) self.assertIn('You have successfully purchased <b>{total_registration_codes} course registration codes' .format(total_registration_codes=context['total_registration_codes']), resp.content) # now redeem one of registration code from the previous order redeem_url = reverse('register_code_redemption', args=[context['reg_code_info_list'][0]['code']]) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) self.assertTrue('View Dashboard' in response.content) # now view the receipt page again to see if any registration codes # has been expired or not resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') # now check for all the registration codes in the receipt # and one of code should be used at this point self.assertTrue(context['reg_code_info_list'][0]['is_redeemed']) self.assertFalse(context['reg_code_info_list'][1]['is_redeemed']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_upgrade(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) ((template, context), _) = render_mock.call_args # When we come from the upgrade flow, we get these context variables self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertFalse(context['any_refunds']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_refund(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') cert_item.status = "refunded" cert_item.save() self.assertEqual(self.cart.total_cost, 40) self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('40.00', resp.content) ((template, context), _tmp) = render_mock.call_args self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertTrue(context['any_refunds']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_custom_receipt_page(self): cert_item = CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'honor') self.cart.purchase() self.login_user() receipt_url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(receipt_url) self.assertEqual(resp.status_code, 200) ((template, _context), _tmp) = render_mock.call_args self.assertEqual(template, cert_item.single_item_receipt_template) def _assert_404(self, url, use_post=False): """ Helper method to assert that a given url will return a 404 status code """ if use_post: response = self.client.post(url) else: response = self.client.get(url) self.assertEquals(response.status_code, 404) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': False}) def test_disabled_paid_courses(self): """ Assert that the pages that require ENABLE_PAID_COURSE_REGISTRATION=True return a HTTP 404 status code when we have this flag turned off """ self.login_user() self._assert_404(reverse('shoppingcart.views.show_cart', args=[])) self._assert_404(reverse('shoppingcart.views.clear_cart', args=[])) self._assert_404(reverse('shoppingcart.views.remove_item', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.register_code_redemption', args=["testing"])) self._assert_404(reverse('shoppingcart.views.use_code', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.update_user_cart', args=[])) self._assert_404(reverse('shoppingcart.views.reset_code_redemption', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.billing_details', args=[])) def test_upgrade_postpay_callback_emits_ga_event(self): # Enroll as honor in the course with the current user. CourseEnrollment.enroll(self.user, self.course_key) # add verified mode CourseMode.objects.create( course_id=self.verified_course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost ) # Purchase a verified certificate self.cart = Order.get_cart_for_user(self.user) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified') self.cart.start_purchase() self.login_user() # setting the attempting upgrade session value. session = self.client.session session['attempting_upgrade'] = True session.save() ordered_params = OrderedDict([ ('amount', self.cost), ('currency', 'usd'), ('transaction_type', 'sale'), ('orderNumber', str(self.cart.id)), ('access_key', '123456789'), ('merchantID', 'edx'), ('djch', '012345678912'), ('orderPage_version', 2), ('orderPage_serialNumber', '1234567890'), ('profile_id', "00000001"), ('reference_number', str(self.cart.id)), ('locale', 'en'), ('signed_date_time', '2014-08-18T13:59:31Z'), ]) resp_params = PaymentFakeView.response_post_params(sign(ordered_params)) self.assertTrue(self.client.session.get('attempting_upgrade')) url = reverse('shoppingcart.views.postpay_callback') self.client.post(url, resp_params, follow=True) self.assertFalse(self.client.session.get('attempting_upgrade')) self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member 'edx.course.enrollment.upgrade.succeeded', { 'user_id': self.user.id, 'course_id': self.verified_course_key.to_deprecated_string(), 'mode': 'verified' } ) class ReceiptRedirectTest(SharedModuleStoreTestCase): """Test special-case redirect from the receipt page. """ COST = 40 PASSWORD = 'password' @classmethod def setUpClass(cls): super(ReceiptRedirectTest, cls).setUpClass() cls.course = CourseFactory.create() cls.course_key = cls.course.id def setUp(self): super(ReceiptRedirectTest, self).setUp() self.user = UserFactory.create() self.user.set_password(self.PASSWORD) self.user.save() self.course_mode = CourseMode( course_id=self.course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.COST ) self.course_mode.save() self.cart = Order.get_cart_for_user(self.user) self.client.login( username=self.user.username, password=self.PASSWORD ) def test_postpay_callback_redirect_to_verify_student(self): # Create other carts first # This ensures that the order ID and order item IDs do not match Order.get_cart_for_user(self.user).start_purchase() Order.get_cart_for_user(self.user).start_purchase() Order.get_cart_for_user(self.user).start_purchase() # Purchase a verified certificate self.cart = Order.get_cart_for_user(self.user) CertificateItem.add_to_order( self.cart, self.course_key, self.COST, 'verified' ) self.cart.start_purchase() # Simulate hitting the post-pay callback with patch('shoppingcart.views.process_postpay_callback') as mock_process: mock_process.return_value = {'success': True, 'order': self.cart} url = reverse('shoppingcart.views.postpay_callback') resp = self.client.post(url, follow=True) # Expect to be redirected to the payment confirmation # page in the verify_student app redirect_url = reverse( 'verify_student_payment_confirmation', kwargs={'course_id': unicode(self.course_key)} ) redirect_url += '?payment-order-num={order_num}'.format( order_num=self.cart.id ) self.assertIn(redirect_url, resp.redirect_chain[0][0]) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) class ShoppingcartViewsClosedEnrollment(ModuleStoreTestCase): """ Test suite for ShoppingcartViews Course Enrollments Closed or not """ def setUp(self): super(ShoppingcartViewsClosedEnrollment, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.instructor = AdminFactory.create() self.cost = 40 self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.testing_course = CourseFactory.create( org='Edx', number='999', display_name='Testing Super Course', metadata={"invitation_only": False} ) self.percentage_discount = 20.0 self.coupon_code = 'asdsad' self.course_mode = CourseMode(course_id=self.testing_course.id, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.cart = Order.get_cart_for_user(self.user) self.now = datetime.now(pytz.UTC) self.tomorrow = self.now + timedelta(days=1) self.nextday = self.tomorrow + timedelta(days=1) def add_coupon(self, course_key, is_active, code): """ add dummy coupon into models """ coupon = Coupon(code=code, description='testing code', course_id=course_key, percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active) coupon.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") @patch('shoppingcart.views.render_to_response', render_mock) def test_to_check_that_cart_item_enrollment_is_closed(self): self.login_user() reg_item1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key) expired_course_item = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) # now add the same coupon code to the second course(testing_course) self.add_coupon(self.testing_course.id, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=getattr(expired_course_item, 'course_id'), order=expired_course_item.order_id) self.assertEqual(coupon_redemption.count(), 1) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart # coupon redemption entry should also be deleted when the item is expired. resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) # now the redemption entry should be deleted from the table. coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=getattr(expired_course_item, 'course_id'), order=expired_course_item.order_id) self.assertEqual(coupon_redemption.count(), 0) ((template, context), _tmp) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item1, context['shoppingcart_items'][0]) self.assertEqual(1, len(context['shoppingcart_items'])) self.assertEqual(True, context['is_course_enrollment_closed']) self.assertIn(self.testing_course.display_name, context['expired_course_names']) def test_to_check_that_cart_item_enrollment_is_closed_when_clicking_the_payment_button(self): self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart resp = self.client.get(reverse('shoppingcart.views.verify_cart')) self.assertEqual(resp.status_code, 200) self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed']) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) self.assertIn('40.00', resp.content) def test_is_enrollment_closed_when_order_type_is_business(self): self.login_user() self.cart.order_type = 'business' self.cart.save() PaidCourseRegistration.add_to_order(self.cart, self.course_key) CourseRegCodeItem.add_to_order(self.cart, self.testing_course.id, 2) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) resp = self.client.post(reverse('shoppingcart.views.billing_details')) self.assertEqual(resp.status_code, 200) self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed']) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) self.assertIn('40.00', resp.content) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) class RegistrationCodeRedemptionCourseEnrollment(SharedModuleStoreTestCase): """ Test suite for RegistrationCodeRedemption Course Enrollments """ @classmethod def setUpClass(cls): super(RegistrationCodeRedemptionCourseEnrollment, cls).setUpClass() cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') cls.course_key = cls.course.id def setUp(self, **kwargs): super(RegistrationCodeRedemptionCourseEnrollment, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.cost = 40 self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") def test_registration_redemption_post_request_ratelimited(self): """ Try (and fail) registration code redemption 30 times in a row on an non-existing registration code post request """ cache.clear() url = reverse('register_code_redemption', args=['asdasd']) self.login_user() for i in xrange(30): # pylint: disable=unused-variable response = self.client.post(url) self.assertEquals(response.status_code, 404) # then the rate limiter should kick in and give a HttpForbidden response response = self.client.post(url) self.assertEquals(response.status_code, 403) # now reset the time to 5 mins from now in future in order to unblock reset_time = datetime.now(UTC) + timedelta(seconds=300) with freeze_time(reset_time): response = self.client.post(url) self.assertEquals(response.status_code, 404) cache.clear() def test_registration_redemption_get_request_ratelimited(self): """ Try (and fail) registration code redemption 30 times in a row on an non-existing registration code get request """ cache.clear() url = reverse('register_code_redemption', args=['asdasd']) self.login_user() for i in xrange(30): # pylint: disable=unused-variable response = self.client.get(url) self.assertEquals(response.status_code, 404) # then the rate limiter should kick in and give a HttpForbidden response response = self.client.get(url) self.assertEquals(response.status_code, 403) # now reset the time to 5 mins from now in future in order to unblock reset_time = datetime.now(UTC) + timedelta(seconds=300) with freeze_time(reset_time): response = self.client.get(url) self.assertEquals(response.status_code, 404) cache.clear() def test_course_enrollment_active_registration_code_redemption(self): """ Test for active registration code course enrollment """ cache.clear() instructor = InstructorFactory(course_key=self.course_key) self.client.login(username=instructor.username, password='test') # Registration Code Generation only available to Sales Admins. CourseSalesAdminRole(self.course.id).add_users(instructor) url = reverse('generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}) data = { 'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com', 'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123', 'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '', 'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': '' } response = self.client.post(url, data) self.assertEquals(response.status_code, 200) # get the first registration from the newly created registration codes registration_code = CourseRegistrationCode.objects.all()[0].code redeem_url = reverse('register_code_redemption', args=[registration_code]) self.login_user() response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertIn('Activate Course Enrollment', response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) self.assertIn('View Dashboard', response.content) #now check that the registration code has already been redeemed and user is already registered in the course RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code) response = self.client.get(redeem_url) self.assertEquals(len(RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)), 1) self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content) #now check that the registration code has already been redeemed response = self.client.post(redeem_url) self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content) #now check the response of the dashboard page dashboard_url = reverse('dashboard') response = self.client.get(dashboard_url) self.assertEquals(response.status_code, 200) self.assertIn(self.course.display_name, response.content) @ddt.ddt class RedeemCodeEmbargoTests(UrlResetMixin, ModuleStoreTestCase): """Test blocking redeem code redemption based on country access rules. """ USERNAME = 'bob' PASSWORD = 'test' @patch.dict(settings.FEATURES, {'EMBARGO': True}) def setUp(self): super(RedeemCodeEmbargoTests, self).setUp('embargo') self.course = CourseFactory.create() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.user.username, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") @ddt.data('get', 'post') @patch.dict(settings.FEATURES, {'EMBARGO': True}) def test_registration_code_redemption_embargo(self, method): # Create a valid registration code reg_code = CourseRegistrationCode.objects.create( code="abcd1234", course_id=self.course.id, created_by=self.user ) # Try to redeem the code from a restricted country with restrict_course(self.course.id) as redirect_url: url = reverse( 'register_code_redemption', kwargs={'registration_code': 'abcd1234'} ) response = getattr(self.client, method)(url) self.assertRedirects(response, redirect_url) # The registration code should NOT be redeemed is_redeemed = RegistrationCodeRedemption.objects.filter( registration_code=reg_code ).exists() self.assertFalse(is_redeemed) # The user should NOT be enrolled is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id) self.assertFalse(is_enrolled) @ddt.ddt class DonationViewTest(SharedModuleStoreTestCase): """Tests for making a donation. These tests cover both the single-item purchase flow, as well as the receipt page for donation items. """ DONATION_AMOUNT = "23.45" PASSWORD = "password" @classmethod def setUpClass(cls): super(DonationViewTest, cls).setUpClass() cls.course = CourseFactory.create(display_name="Test Course") def setUp(self): """Create a test user and order. """ super(DonationViewTest, self).setUp() # Create and login a user self.user = UserFactory.create() self.user.set_password(self.PASSWORD) self.user.save() result = self.client.login(username=self.user.username, password=self.PASSWORD) self.assertTrue(result) # Enable donations config = DonationConfiguration.current() config.enabled = True config.save() def test_donation_for_org(self): self._donate(self.DONATION_AMOUNT) self._assert_receipt_contains("tax purposes") def test_donation_for_course_receipt(self): # Donate to our course self._donate(self.DONATION_AMOUNT, course_id=self.course.id) # Verify the receipt page self._assert_receipt_contains("tax purposes") self._assert_receipt_contains(self.course.display_name) def test_smallest_possible_donation(self): self._donate("0.01") self._assert_receipt_contains("0.01") @ddt.data( {}, {"amount": "abcd"}, {"amount": "-1.00"}, {"amount": "0.00"}, {"amount": "0.001"}, {"amount": "0"}, {"amount": "23.45", "course_id": "invalid"} ) def test_donation_bad_request(self, bad_params): response = self.client.post(reverse('donation'), bad_params) self.assertEqual(response.status_code, 400) def test_donation_requires_login(self): self.client.logout() response = self.client.post(reverse('donation'), {'amount': self.DONATION_AMOUNT}) self.assertEqual(response.status_code, 302) def test_no_such_course(self): response = self.client.post( reverse("donation"), {"amount": self.DONATION_AMOUNT, "course_id": "edx/DemoX/Demo"} ) self.assertEqual(response.status_code, 400) @ddt.data("get", "put", "head", "options", "delete") def test_donation_requires_post(self, invalid_method): response = getattr(self.client, invalid_method)( reverse("donation"), {"amount": self.DONATION_AMOUNT} ) self.assertEqual(response.status_code, 405) def test_donations_disabled(self): config = DonationConfiguration.current() config.enabled = False config.save() # Logged in -- should be a 404 response = self.client.post(reverse('donation')) self.assertEqual(response.status_code, 404) # Logged out -- should still be a 404 self.client.logout() response = self.client.post(reverse('donation')) self.assertEqual(response.status_code, 404) def _donate(self, donation_amount, course_id=None): """Simulate a donation to a course. This covers the entire payment flow, except for the external payment processor, which is simulated. Arguments: donation_amount (unicode): The amount the user is donating. Keyword Arguments: course_id (CourseKey): If provided, make a donation to the specific course. Raises: AssertionError """ # Purchase a single donation item # Optionally specify a particular course for the donation params = {'amount': donation_amount} if course_id is not None: params['course_id'] = course_id url = reverse('donation') response = self.client.post(url, params) self.assertEqual(response.status_code, 200) # Use the fake payment implementation to simulate the parameters # we would receive from the payment processor. payment_info = json.loads(response.content) self.assertEqual(payment_info["payment_url"], "/shoppingcart/payment_fake") # If this is a per-course donation, verify that we're sending # the course ID to the payment processor. if course_id is not None: self.assertEqual( payment_info["payment_params"]["merchant_defined_data1"], unicode(course_id) ) self.assertEqual( payment_info["payment_params"]["merchant_defined_data2"], "donation_course" ) else: self.assertEqual(payment_info["payment_params"]["merchant_defined_data1"], "") self.assertEqual( payment_info["payment_params"]["merchant_defined_data2"], "donation_general" ) processor_response_params = PaymentFakeView.response_post_params(payment_info["payment_params"]) # Use the response parameters to simulate a successful payment url = reverse('shoppingcart.views.postpay_callback') response = self.client.post(url, processor_response_params) self.assertRedirects(response, self._receipt_url) def _assert_receipt_contains(self, expected_text): """Load the receipt page and verify that it contains the expected text.""" resp = self.client.get(self._receipt_url) self.assertContains(resp, expected_text) @property def _receipt_url(self): order_id = Order.objects.get(user=self.user, status="purchased").id return reverse("shoppingcart.views.show_receipt", kwargs={"ordernum": order_id}) class CSVReportViewsTest(SharedModuleStoreTestCase): """ Test suite for CSV Purchase Reporting """ @classmethod def setUpClass(cls): super(CSVReportViewsTest, cls).setUpClass() cls.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') cls.course_key = cls.course.id verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course') cls.verified_course_key = verified_course.id def setUp(self): super(CSVReportViewsTest, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.cost = 40 self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.course_mode2 = CourseMode(course_id=self.course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost) self.course_mode2.save() self.cart = Order.get_cart_for_user(self.user) self.dl_grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP) self.dl_grp.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") def add_to_download_group(self, user): """ Helper fn to add self.user to group that's allowed to download report CSV """ user.groups.add(self.dl_grp) def test_report_csv_no_access(self): self.login_user() response = self.client.get(reverse('payment_csv_report')) self.assertEqual(response.status_code, 403) def test_report_csv_bad_method(self): self.login_user() self.add_to_download_group(self.user) response = self.client.put(reverse('payment_csv_report')) self.assertEqual(response.status_code, 400) @patch('shoppingcart.views.render_to_response', render_mock) def test_report_csv_get(self): self.login_user() self.add_to_download_group(self.user) response = self.client.get(reverse('payment_csv_report')) ((template, context), unused_kwargs) = render_mock.call_args self.assertEqual(template, 'shoppingcart/download_report.html') self.assertFalse(context['total_count_error']) self.assertFalse(context['date_fmt_error']) self.assertIn("Download CSV Reports", response.content.decode('UTF-8')) @patch('shoppingcart.views.render_to_response', render_mock) def test_report_csv_bad_date(self): self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': 'BAD', 'end_date': 'BAD', 'requested_report': 'itemized_purchase_report'}) ((template, context), unused_kwargs) = render_mock.call_args self.assertEqual(template, 'shoppingcart/download_report.html') self.assertFalse(context['total_count_error']) self.assertTrue(context['date_fmt_error']) self.assertIn("There was an error in your date input. It should be formatted as YYYY-MM-DD", response.content.decode('UTF-8')) CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE = ",1,purchased,1,40,40,usd,Registration for Course: Robot Super Course," def test_report_csv_itemized(self): report_type = 'itemized_purchase_report' start_date = '1970-01-01' end_date = '2100-01-01' PaidCourseRegistration.add_to_order(self.cart, self.course_key) self.cart.purchase() self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date, 'end_date': end_date, 'requested_report': report_type}) self.assertEqual(response['Content-Type'], 'text/csv') report = initialize_report(report_type, start_date, end_date) self.assertIn(",".join(report.header()), response.content) self.assertIn(self.CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE, response.content) def test_report_csv_university_revenue_share(self): report_type = 'university_revenue_share' start_date = '1970-01-01' end_date = '2100-01-01' start_letter = 'A' end_letter = 'Z' self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date, 'end_date': end_date, 'start_letter': start_letter, 'end_letter': end_letter, 'requested_report': report_type}) self.assertEqual(response['Content-Type'], 'text/csv') report = initialize_report(report_type, start_date, end_date, start_letter, end_letter) self.assertIn(",".join(report.header()), response.content) class UtilFnsTest(TestCase): """ Tests for utility functions in views.py """ def setUp(self): super(UtilFnsTest, self).setUp() self.user = UserFactory.create() def test_can_download_report_no_group(self): """ Group controlling perms is not present """ self.assertFalse(_can_download_report(self.user)) def test_can_download_report_not_member(self): """ User is not part of group controlling perms """ Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP).save() self.assertFalse(_can_download_report(self.user)) def test_can_download_report(self): """ User is part of group controlling perms """ grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP) grp.save() self.user.groups.add(grp) self.assertTrue(_can_download_report(self.user)) def test_get_date_from_str(self): test_str = "2013-10-01" date = _get_date_from_str(test_str) self.assertEqual(2013, date.year) self.assertEqual(10, date.month) self.assertEqual(1, date.day)
agpl-3.0
molebot/brython
www/src/Lib/test/test_bufio.py
112
2599
import unittest from test import support import io # C implementation. import _pyio as pyio # Python implementation. # Simple test to ensure that optimizations in the IO library deliver the # expected results. For best testing, run this under a debug-build Python too # (to exercise asserts in the C code). lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000, 16384, 32768, 65536, 1000000] class BufferSizeTest: def try_one(self, s): # Write s + "\n" + s to file, then open it and ensure that successive # .readline()s deliver what we wrote. # Ensure we can open TESTFN for writing. support.unlink(support.TESTFN) # Since C doesn't guarantee we can write/read arbitrary bytes in text # files, use binary mode. f = self.open(support.TESTFN, "wb") try: # write once with \n and once without f.write(s) f.write(b"\n") f.write(s) f.close() f = open(support.TESTFN, "rb") line = f.readline() self.assertEqual(line, s + b"\n") line = f.readline() self.assertEqual(line, s) line = f.readline() self.assertTrue(not line) # Must be at EOF f.close() finally: support.unlink(support.TESTFN) def drive_one(self, pattern): for length in lengths: # Repeat string 'pattern' as often as needed to reach total length # 'length'. Then call try_one with that string, a string one larger # than that, and a string one smaller than that. Try this with all # small sizes and various powers of 2, so we exercise all likely # stdio buffer sizes, and "off by one" errors on both sides. q, r = divmod(length, len(pattern)) teststring = pattern * q + pattern[:r] self.assertEqual(len(teststring), length) self.try_one(teststring) self.try_one(teststring + b"x") self.try_one(teststring[:-1]) def test_primepat(self): # A pattern with prime length, to avoid simple relationships with # stdio buffer sizes. self.drive_one(b"1234567890\00\01\02\03\04\05\06") def test_nullpat(self): self.drive_one(bytes(1000)) class CBufferSizeTest(BufferSizeTest, unittest.TestCase): open = io.open class PyBufferSizeTest(BufferSizeTest, unittest.TestCase): open = staticmethod(pyio.open) if __name__ == "__main__": unittest.main()
bsd-3-clause
Anonymouslemming/ansible
lib/ansible/modules/cloud/amazon/route53.py
24
22578
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: route53 version_added: "1.3" short_description: add or delete entries in Amazons Route53 DNS service description: - Creates and deletes DNS records in Amazons Route53 service options: state: description: - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed to I(state) as default and the choices 'present' and 'absent' have been added, but I(command) still works as well. required: true aliases: [ 'command' ] choices: [ 'present', 'absent', 'get', 'create', 'delete' ] zone: description: - The DNS zone to modify required: true hosted_zone_id: description: - The Hosted Zone ID of the DNS zone to modify required: false version_added: "2.0" default: null record: description: - The full DNS record to create or delete required: true ttl: description: - The TTL to give the new record required: false default: 3600 (one hour) type: description: - The type of DNS record to create required: true choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA' ] alias: description: - Indicates if this is an alias record. required: false version_added: "1.9" default: False choices: [ 'True', 'False' ] alias_hosted_zone_id: description: - The hosted zone identifier. required: false version_added: "1.9" default: null alias_evaluate_target_health: description: - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers. required: false version_added: "2.1" default: false value: description: - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. - When deleting a record all values for the record must be specified or Route53 will not delete it. required: false default: null overwrite: description: - Whether an existing record should be overwritten on create if values do not match required: false default: null retry_interval: description: - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long. required: false default: 500 private_zone: description: - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone. required: false default: false version_added: "1.9" identifier: description: - Have to be specified for Weighted, latency-based and failover resource record sets only. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. required: false default: null version_added: "2.0" weight: description: - Weighted resource record sets only. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. required: false default: null version_added: "2.0" region: description: - Latency-based resource record sets only Among resource record sets that have the same combination of DNS name and type, a value that determines which region this should be associated with for the latency-based routing required: false default: null version_added: "2.0" health_check: description: - Health check to associate with this record required: false default: null version_added: "2.0" failover: description: - Failover resource record sets only. Whether this is the primary or secondary resource record set. Allowed values are PRIMARY and SECONDARY required: false default: null version_added: "2.0" vpc_id: description: - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. required: false default: null version_added: "2.0" wait: description: - Wait until the changes have been replicated to all Amazon Route 53 DNS servers. required: false default: no version_added: "2.1" wait_timeout: description: - How long to wait for the changes to be replicated, in seconds. required: false default: 300 version_added: "2.1" author: - "Bruce Pennypacker (@bpennypacker)" - "Mike Buzzetti <mike.buzzetti@gmail.com>" extends_documentation_fragment: aws ''' EXAMPLES = ''' # Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated - route53: state: present zone: foo.com record: new.foo.com type: A ttl: 7200 value: 1.1.1.1,2.2.2.2,3.3.3.3 wait: yes # Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated - route53: state: present zone: foo.com record: new.foo.com type: A ttl: 7200 value: - 1.1.1.1 - 2.2.2.2 - 3.3.3.3 wait: yes # Retrieve the details for new.foo.com - route53: state: get zone: foo.com record: new.foo.com type: A register: rec # Delete new.foo.com A record using the results from the get command - route53: state: absent zone: foo.com record: "{{ rec.set.record }}" ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" # Add an AAAA record. Note that because there are colons in the value # that the IPv6 address must be quoted. Also shows using the old form command=create. - route53: command: create zone: foo.com record: localhost.foo.com type: AAAA ttl: 7200 value: "::1" # Add a SRV record with multiple fields for a service on port 22222 # For more information on SRV records see: # https://en.wikipedia.org/wiki/SRV_record - route53: state: present zone: foo.com record: "_example-service._tcp.foo.com" type: SRV value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" # Add a TXT record. Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: - route53: state: present zone: foo.com record: localhost.foo.com type: TXT ttl: 7200 value: '"bar"' # Add an alias record that points to an Amazon ELB: - route53: state: present zone: foo.com record: elb.foo.com type: A value: "{{ elb_dns_name }}" alias: True alias_hosted_zone_id: "{{ elb_zone_id }}" # Retrieve the details for elb.foo.com - route53: state: get zone: foo.com record: elb.foo.com type: A register: rec # Delete an alias record using the results from the get command - route53: state: absent zone: foo.com record: "{{ rec.set.record }}" ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" alias: True alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" # Add an alias record that points to an Amazon ELB and evaluates it health: - route53: state: present zone: foo.com record: elb.foo.com type: A value: "{{ elb_dns_name }}" alias: True alias_hosted_zone_id: "{{ elb_zone_id }}" alias_evaluate_target_health: True # Add an AAAA record with Hosted Zone ID. - route53: state: present zone: foo.com hosted_zone_id: Z2AABBCCDDEEFF record: localhost.foo.com type: AAAA ttl: 7200 value: "::1" # Use a routing policy to distribute traffic: - route53: state: present zone: foo.com record: www.foo.com type: CNAME value: host1.foo.com ttl: 30 # Routing policy identifier: "host1@www" weight: 100 health_check: "d994b780-3150-49fd-9205-356abdd42e75" ''' MINIMUM_BOTO_VERSION = '2.28.0' WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls import time import distutils.version # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info try: import boto import boto.ec2 from boto import route53 from boto.route53 import Route53Connection from boto.route53.record import Record, ResourceRecordSets from boto.route53.status import Status HAS_BOTO = True except ImportError: HAS_BOTO = False class TimeoutError(Exception): pass def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id): """Finds a zone by name or zone_id""" for zone in invoke_with_throttling_retries(conn.get_zones): # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params private_zone = module.boolean(zone.config.get('PrivateZone', False)) if private_zone == want_private and ((zone.name == zone_name and zone_id is None) or zone.id.replace('/hostedzone/', '') == zone_id): if want_vpc_id: # NOTE: These details aren't available in other boto methods, hence the necessary # extra API call hosted_zone = invoke_with_throttling_retries(conn.get_hosted_zone, zone.id) zone_details = hosted_zone['GetHostedZoneResponse'] # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 if isinstance(zone_details['VPCs'], dict): if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: return zone else: # Forward compatibility for when boto fixes that bug if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: return zone else: return zone return None def commit(changes, retry_interval, wait, wait_timeout): """Commit changes, but retry PriorRequestNotComplete errors.""" result = None retry = 10 while True: try: retry -= 1 result = changes.commit() break except boto.route53.exception.DNSServerError as e: code = e.body.split("<Code>")[1] code = code.split("</Code>")[0] if code != 'PriorRequestNotComplete' or retry < 0: raise e time.sleep(float(retry_interval)) if wait: timeout_time = time.time() + wait_timeout connection = changes.connection change = result['ChangeResourceRecordSetsResponse']['ChangeInfo'] status = Status(connection, change) while status.status != 'INSYNC' and time.time() < timeout_time: time.sleep(WAIT_RETRY_SLEEP) status.update() if time.time() >= timeout_time: raise TimeoutError() return result # Shamelessly copied over from https://git.io/vgmDG IGNORE_CODE = 'Throttling' MAX_RETRIES = 5 def invoke_with_throttling_retries(function_ref, *argv, **kwargs): retries = 0 while True: try: retval = function_ref(*argv, **kwargs) return retval except boto.exception.BotoServerError as e: if e.code != IGNORE_CODE or retries == MAX_RETRIES: raise e time.sleep(5 * (2**retries)) retries += 1 def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(aliases=['command'], choices=['present', 'absent', 'get', 'create', 'delete'], required=True), zone=dict(required=True), hosted_zone_id=dict(required=False, default=None), record=dict(required=True), ttl=dict(required=False, type='int', default=3600), type=dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA'], required=True), alias=dict(required=False, type='bool'), alias_hosted_zone_id=dict(required=False), alias_evaluate_target_health=dict(required=False, type='bool', default=False), value=dict(required=False, type='list', default=[]), overwrite=dict(required=False, type='bool'), retry_interval=dict(required=False, default=500), private_zone=dict(required=False, type='bool', default=False), identifier=dict(required=False, default=None), weight=dict(required=False, type='int'), region=dict(required=False), health_check=dict(required=False), failover=dict(required=False, choices=['PRIMARY', 'SECONDARY']), vpc_id=dict(required=False), wait=dict(required=False, type='bool', default=False), wait_timeout=dict(required=False, type='int', default=300), )) # state=present, absent, create, delete THEN value is required required_if = [('state', 'present', ['value']), ('state', 'create', ['value'])] required_if.extend([('state', 'absent', ['value']), ('state', 'delete', ['value'])]) # If alias is True then you must specify alias_hosted_zone as well required_together = [['alias', 'alias_hosted_zone_id']] # failover, region, and weight are mutually exclusive mutually_exclusive = [('failover', 'region', 'weight')] module = AnsibleModule(argument_spec=argument_spec, required_together=required_together, required_if=required_if, mutually_exclusive=mutually_exclusive) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION): module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION)) if module.params['state'] in ('present', 'create'): command_in = 'create' elif module.params['state'] in ('absent', 'delete'): command_in = 'delete' elif module.params['state'] == 'get': command_in = 'get' zone_in = module.params.get('zone').lower() hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') alias_in = module.params.get('alias') alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') retry_interval_in = module.params.get('retry_interval') if module.params['vpc_id'] is not None: private_zone_in = True else: private_zone_in = module.params.get('private_zone') identifier_in = module.params.get('identifier') weight_in = module.params.get('weight') region_in = module.params.get('region') health_check_in = module.params.get('health_check') failover_in = module.params.get('failover') vpc_id_in = module.params.get('vpc_id') wait_in = module.params.get('wait') wait_timeout_in = module.params.get('wait_timeout') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if zone_in[-1:] != '.': zone_in += "." if record_in[-1:] != '.': record_in += "." if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") if (weight_in is not None or region_in is not None or failover_in is not None) and identifier_in is None: module.fail_json(msg="If you specify failover, region or weight you must also specify identifier") if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.") # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) # Find the named zone ID zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg=errmsg) record = {} found_record = False wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, identifier=identifier_in, weight=weight_in, region=region_in, health_check=health_check_in, failover=failover_in) for v in value_in: if alias_in: wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in) else: wanted_rset.add_value(v) sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone.id, name=record_in, type=type_in, identifier=identifier_in) sets_iter = iter(sets) while True: try: rset = invoke_with_throttling_retries(next, sets_iter) except StopIteration: break # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block rset.name = decoded_name if identifier_in is not None: identifier_in = str(identifier_in) if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: found_record = True record['zone'] = zone_in record['type'] = rset.type record['record'] = decoded_name record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) if hosted_zone_id_in: record['hosted_zone_id'] = hosted_zone_id_in record['identifier'] = rset.identifier record['weight'] = rset.weight record['region'] = rset.region record['failover'] = rset.failover record['health_check'] = rset.health_check if hosted_zone_id_in: record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name record['values'] = [rset.alias_dns_name] record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health else: record['alias'] = False record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): module.exit_json(changed=False) # We need to look only at the first rrset returned by the above call, # so break here. The returned elements begin with the one matching our # requested name, type, and identifier, if such an element exists, # followed by all others that come after it in alphabetical order. # Therefore, if the first set does not match, no subsequent set will # match either. break if command_in == 'get': if type_in == 'NS': ns = record.get('values', []) else: # Retrieve name servers associated to the zone. z = invoke_with_throttling_retries(conn.get_zone, zone_in) ns = invoke_with_throttling_retries(z.get_nameservers) module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) changes = ResourceRecordSets(conn, zone.id) if command_in == 'create' or command_in == 'delete': if command_in == 'create' and found_record: if not module.params['overwrite']: module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") command = 'UPSERT' else: command = command_in.upper() changes.add_change_record(command, wanted_rset) try: result = invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in) except boto.route53.exception.DNSServerError as e: txt = e.body.split("<Message>")[1] txt = txt.split("</Message>")[0] if "but it already exists" in txt: module.exit_json(changed=False) else: module.fail_json(msg=txt) except TimeoutError: module.fail_json(msg='Timeout waiting for changes to replicate') module.exit_json(changed=True) if __name__ == '__main__': main()
gpl-3.0
moxon6/chemlab
build/lib.win32-3.4/chemlab/graphics/renderers/atom.py
6
3236
import numpy as np from .. import colors from ...db import ChemlabDB from .base import AbstractRenderer from .sphere import SphereRenderer from .sphere_imp import SphereImpostorRenderer from .point import PointRenderer vdw_dict = ChemlabDB().get("data", 'vdwdict') class AtomRenderer(AbstractRenderer): """Render atoms by using different rendering methods. **Parameters** widget: The parent QChemlabWidget r_array: np.ndarray((NATOMS, 3), dtype=float) The atomic coordinate array type_array: np.ndarray((NATOMS, 3), dtype=object) An array containing all the atomic symbols like `Ar`, `H`, `O`. If the atomic type is unknown, use the `Xx` symbol. backend: "impostors" | "polygons" | "points" You can choose the rendering method between the sphere impostors, polygonal sphere and points. .. seealso: :py:class:`~chemlab.graphics.renderers.SphereRenderer` :py:class:`~chemlab.graphics.renderers.SphereImpostorRenderer` :py:class:`~chemlab.graphics.renderers.PointRenderer` color_scheme: dict, should contain the 'Xx' key,value pair A dictionary mapping atom types to colors. By default it is the color scheme provided by `chemlab.graphics.colors.default_atom_map`. The 'Xx' symbol value is taken as the default color. radii_map: dict, should contain the 'Xx' key,value pair. A dictionary mapping atom types to radii. The default is the mapping contained in `chemlab.db.vdw.vdw_dict` """ def __init__(self, widget, r_array, type_array, backend='impostors', color_scheme=colors.default_atom_map, radii_map=vdw_dict, shading='phong'): radii = [] colorlist = [] natoms = len(r_array) for i in range(natoms): radii.append(radii_map[type_array[i]]) colorlist.append(color_scheme.get(type_array[i], color_scheme['Xx'])) self.radii = radii self.colors = np.array(colorlist, dtype='uint8') if backend == 'polygons': self.sr = SphereRenderer(widget, r_array, radii, colorlist, shading = shading) elif backend == 'impostors': self.sr = SphereImpostorRenderer(widget, r_array, radii, colorlist, shading=shading) elif backend == 'points': self.sr = PointRenderer(widget, r_array, colorlist) else: raise Exception("No backend %s available. Choose between polygons, impostors or points" % backend) def draw(self): self.sr.draw() def update_positions(self, r_array): """Update the atomic positions """ self.sr.update_positions(r_array) def update_colors(self, cols): self.sr.update_colors(cols) def update_radii(self, radii): self.sr.update_radii(radii) def hide(self, mask): self.sr.hide(mask) def change_shading(self, shd): self.sr.change_shading(shd)
gpl-3.0
texcaltech/windmilltownhomes-old
django/template/context.py
15
5405
from django.core.exceptions import ImproperlyConfigured from django.utils.importlib import import_module # Cache of actual callables. _standard_context_processors = None # We need the CSRF processor no matter what the user has in their settings, # because otherwise it is a security vulnerability, and we can't afford to leave # this to human error or failure to read migration instructions. _builtin_context_processors = ('django.core.context_processors.csrf',) class ContextPopException(Exception): "pop() has been called more times than push()" pass class BaseContext(object): def __init__(self, dict_=None): dict_ = dict_ or {} self.dicts = [dict_] def __repr__(self): return repr(self.dicts) def __iter__(self): for d in reversed(self.dicts): yield d def push(self): d = {} self.dicts.append(d) return d def pop(self): if len(self.dicts) == 1: raise ContextPopException return self.dicts.pop() def __setitem__(self, key, value): "Set a variable in the current context" self.dicts[-1][key] = value def __getitem__(self, key): "Get a variable's value, starting at the current context and going upward" for d in reversed(self.dicts): if key in d: return d[key] raise KeyError(key) def __delitem__(self, key): "Delete a variable from the current context" del self.dicts[-1][key] def has_key(self, key): for d in self.dicts: if key in d: return True return False def __contains__(self, key): return self.has_key(key) def get(self, key, otherwise=None): for d in reversed(self.dicts): if key in d: return d[key] return otherwise class Context(BaseContext): "A stack container for variable context" def __init__(self, dict_=None, autoescape=True, current_app=None, use_l10n=None): self.autoescape = autoescape self.use_l10n = use_l10n self.current_app = current_app self.render_context = RenderContext() super(Context, self).__init__(dict_) def update(self, other_dict): "Like dict.update(). Pushes an entire dictionary's keys and values onto the context." if not hasattr(other_dict, '__getitem__'): raise TypeError('other_dict must be a mapping (dictionary-like) object.') self.dicts.append(other_dict) return other_dict class RenderContext(BaseContext): """ A stack container for storing Template state. RenderContext simplifies the implementation of template Nodes by providing a safe place to store state between invocations of a node's `render` method. The RenderContext also provides scoping rules that are more sensible for 'template local' variables. The render context stack is pushed before each template is rendered, creating a fresh scope with nothing in it. Name resolution fails if a variable is not found at the top of the RequestContext stack. Thus, variables are local to a specific template and don't affect the rendering of other templates as they would if they were stored in the normal template context. """ def __iter__(self): for d in self.dicts[-1]: yield d def has_key(self, key): return key in self.dicts[-1] def get(self, key, otherwise=None): d = self.dicts[-1] if key in d: return d[key] return otherwise # This is a function rather than module-level procedural code because we only # want it to execute if somebody uses RequestContext. def get_standard_processors(): from django.conf import settings global _standard_context_processors if _standard_context_processors is None: processors = [] collect = [] collect.extend(_builtin_context_processors) collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS) for path in collect: i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e)) try: func = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr)) processors.append(func) _standard_context_processors = tuple(processors) return _standard_context_processors class RequestContext(Context): """ This subclass of template.Context automatically populates itself using the processors defined in TEMPLATE_CONTEXT_PROCESSORS. Additional processors can be specified as a list of callables using the "processors" keyword argument. """ def __init__(self, request, dict=None, processors=None, current_app=None, use_l10n=None): Context.__init__(self, dict, current_app=current_app, use_l10n=use_l10n) if processors is None: processors = () else: processors = tuple(processors) for processor in get_standard_processors() + processors: self.update(processor(request))
bsd-3-clause
mzdaniel/oh-mainline
vendor/packages/twisted/twisted/test/test_udp.py
18
25186
# -*- test-case-name: twisted.test.test_udp -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for implementations of L{IReactorUDP} and L{IReactorMulticast}. """ from twisted.trial import unittest, util from twisted.internet.defer import Deferred, gatherResults, maybeDeferred from twisted.internet import protocol, reactor, error, defer, interfaces, udp from twisted.python import runtime class Mixin: started = 0 stopped = 0 startedDeferred = None def __init__(self): self.packets = [] def startProtocol(self): self.started = 1 if self.startedDeferred is not None: d, self.startedDeferred = self.startedDeferred, None d.callback(None) def stopProtocol(self): self.stopped = 1 class Server(Mixin, protocol.DatagramProtocol): packetReceived = None refused = 0 def datagramReceived(self, data, addr): self.packets.append((data, addr)) if self.packetReceived is not None: d, self.packetReceived = self.packetReceived, None d.callback(None) class Client(Mixin, protocol.ConnectedDatagramProtocol): packetReceived = None refused = 0 def datagramReceived(self, data): self.packets.append(data) if self.packetReceived is not None: d, self.packetReceived = self.packetReceived, None d.callback(None) def connectionFailed(self, failure): if self.startedDeferred is not None: d, self.startedDeferred = self.startedDeferred, None d.errback(failure) self.failure = failure def connectionRefused(self): if self.startedDeferred is not None: d, self.startedDeferred = self.startedDeferred, None d.errback(error.ConnectionRefusedError("yup")) self.refused = 1 class GoodClient(Server): def connectionRefused(self): if self.startedDeferred is not None: d, self.startedDeferred = self.startedDeferred, None d.errback(error.ConnectionRefusedError("yup")) self.refused = 1 class BadClientError(Exception): """ Raised by BadClient at the end of every datagramReceived call to try and screw stuff up. """ class BadClient(protocol.DatagramProtocol): """ A DatagramProtocol which always raises an exception from datagramReceived. Used to test error handling behavior in the reactor for that method. """ d = None def setDeferred(self, d): """ Set the Deferred which will be called back when datagramReceived is called. """ self.d = d def datagramReceived(self, bytes, addr): if self.d is not None: d, self.d = self.d, None d.callback(bytes) raise BadClientError("Application code is very buggy!") class UDPTestCase(unittest.TestCase): def test_oldAddress(self): """ The C{type} of the host address of a listening L{DatagramProtocol}'s transport is C{"UDP"}. """ server = Server() d = server.startedDeferred = defer.Deferred() p = reactor.listenUDP(0, server, interface="127.0.0.1") def cbStarted(ignored): addr = p.getHost() self.assertEquals(addr.type, 'UDP') return p.stopListening() return d.addCallback(cbStarted) def test_startStop(self): """ The L{DatagramProtocol}'s C{startProtocol} and C{stopProtocol} methods are called when its transports starts and stops listening, respectively. """ server = Server() d = server.startedDeferred = defer.Deferred() port1 = reactor.listenUDP(0, server, interface="127.0.0.1") def cbStarted(ignored): self.assertEquals(server.started, 1) self.assertEquals(server.stopped, 0) return port1.stopListening() def cbStopped(ignored): self.assertEquals(server.stopped, 1) return d.addCallback(cbStarted).addCallback(cbStopped) def test_rebind(self): """ Re-listening with the same L{DatagramProtocol} re-invokes the C{startProtocol} callback. """ server = Server() d = server.startedDeferred = defer.Deferred() p = reactor.listenUDP(0, server, interface="127.0.0.1") def cbStarted(ignored, port): return port.stopListening() def cbStopped(ignored): d = server.startedDeferred = defer.Deferred() p = reactor.listenUDP(0, server, interface="127.0.0.1") return d.addCallback(cbStarted, p) return d.addCallback(cbStarted, p) def test_bindError(self): """ A L{CannotListenError} exception is raised when attempting to bind a second protocol instance to an already bound port """ server = Server() d = server.startedDeferred = defer.Deferred() port = reactor.listenUDP(0, server, interface='127.0.0.1') def cbStarted(ignored): self.assertEquals(port.getHost(), server.transport.getHost()) server2 = Server() self.assertRaises( error.CannotListenError, reactor.listenUDP, port.getHost().port, server2, interface='127.0.0.1') d.addCallback(cbStarted) def cbFinished(ignored): return port.stopListening() d.addCallback(cbFinished) return d def test_sendPackets(self): """ Datagrams can be sent with the transport's C{write} method and received via the C{datagramReceived} callback method. """ server = Server() serverStarted = server.startedDeferred = defer.Deferred() port1 = reactor.listenUDP(0, server, interface="127.0.0.1") client = GoodClient() clientStarted = client.startedDeferred = defer.Deferred() def cbServerStarted(ignored): self.port2 = reactor.listenUDP(0, client, interface="127.0.0.1") return clientStarted d = serverStarted.addCallback(cbServerStarted) def cbClientStarted(ignored): client.transport.connect("127.0.0.1", server.transport.getHost().port) cAddr = client.transport.getHost() sAddr = server.transport.getHost() serverSend = client.packetReceived = defer.Deferred() server.transport.write("hello", (cAddr.host, cAddr.port)) clientWrites = [ ("a",), ("b", None), ("c", (sAddr.host, sAddr.port))] def cbClientSend(ignored): if clientWrites: nextClientWrite = server.packetReceived = defer.Deferred() nextClientWrite.addCallback(cbClientSend) client.transport.write(*clientWrites.pop(0)) return nextClientWrite # No one will ever call .errback on either of these Deferreds, # but there is a non-trivial amount of test code which might # cause them to fail somehow. So fireOnOneErrback=True. return defer.DeferredList([ cbClientSend(None), serverSend], fireOnOneErrback=True) d.addCallback(cbClientStarted) def cbSendsFinished(ignored): cAddr = client.transport.getHost() sAddr = server.transport.getHost() self.assertEquals( client.packets, [("hello", (sAddr.host, sAddr.port))]) clientAddr = (cAddr.host, cAddr.port) self.assertEquals( server.packets, [("a", clientAddr), ("b", clientAddr), ("c", clientAddr)]) d.addCallback(cbSendsFinished) def cbFinished(ignored): return defer.DeferredList([ defer.maybeDeferred(port1.stopListening), defer.maybeDeferred(self.port2.stopListening)], fireOnOneErrback=True) d.addCallback(cbFinished) return d def test_connectionRefused(self): """ A L{ConnectionRefusedError} exception is raised when a connection attempt is actively refused by the other end. Note: This test assumes no one is listening on port 80 UDP. """ client = GoodClient() clientStarted = client.startedDeferred = defer.Deferred() port = reactor.listenUDP(0, client, interface="127.0.0.1") server = Server() serverStarted = server.startedDeferred = defer.Deferred() port2 = reactor.listenUDP(0, server, interface="127.0.0.1") d = defer.DeferredList( [clientStarted, serverStarted], fireOnOneErrback=True) def cbStarted(ignored): connectionRefused = client.startedDeferred = defer.Deferred() client.transport.connect("127.0.0.1", 80) for i in range(10): client.transport.write(str(i)) server.transport.write(str(i), ("127.0.0.1", 80)) return self.assertFailure( connectionRefused, error.ConnectionRefusedError) d.addCallback(cbStarted) def cbFinished(ignored): return defer.DeferredList([ defer.maybeDeferred(port.stopListening), defer.maybeDeferred(port2.stopListening)], fireOnOneErrback=True) d.addCallback(cbFinished) return d def test_badConnect(self): """ A call to the transport's connect method fails with a L{ValueError} when a non-IP address is passed as the host value. A call to a transport's connect method fails with a L{RuntimeError} when the transport is already connected. """ client = GoodClient() port = reactor.listenUDP(0, client, interface="127.0.0.1") self.assertRaises(ValueError, client.transport.connect, "localhost", 80) client.transport.connect("127.0.0.1", 80) self.assertRaises(RuntimeError, client.transport.connect, "127.0.0.1", 80) return port.stopListening() def test_datagramReceivedError(self): """ When datagramReceived raises an exception it is logged but the port is not disconnected. """ finalDeferred = defer.Deferred() def cbCompleted(ign): """ Flush the exceptions which the reactor should have logged and make sure they're actually there. """ errs = self.flushLoggedErrors(BadClientError) self.assertEquals(len(errs), 2, "Incorrectly found %d errors, expected 2" % (len(errs),)) finalDeferred.addCallback(cbCompleted) client = BadClient() port = reactor.listenUDP(0, client, interface='127.0.0.1') def cbCleanup(result): """ Disconnect the port we started and pass on whatever was given to us in case it was a Failure. """ return defer.maybeDeferred(port.stopListening).addBoth(lambda ign: result) finalDeferred.addBoth(cbCleanup) addr = port.getHost() # UDP is not reliable. Try to send as many as 60 packets before giving # up. Conceivably, all sixty could be lost, but they probably won't be # unless all UDP traffic is being dropped, and then the rest of these # UDP tests will likely fail as well. Ideally, this test (and probably # others) wouldn't even use actual UDP traffic: instead, they would # stub out the socket with a fake one which could be made to behave in # whatever way the test desires. Unfortunately, this is hard because # of differences in various reactor implementations. attempts = range(60) succeededAttempts = [] def makeAttempt(): """ Send one packet to the listening BadClient. Set up a 0.1 second timeout to do re-transmits in case the packet is dropped. When two packets have been received by the BadClient, stop sending and let the finalDeferred's callbacks do some assertions. """ if not attempts: try: self.fail("Not enough packets received") except: finalDeferred.errback() self.failIfIdentical(client.transport, None, "UDP Protocol lost its transport") packet = str(attempts.pop(0)) packetDeferred = defer.Deferred() client.setDeferred(packetDeferred) client.transport.write(packet, (addr.host, addr.port)) def cbPacketReceived(packet): """ A packet arrived. Cancel the timeout for it, record it, and maybe finish the test. """ timeoutCall.cancel() succeededAttempts.append(packet) if len(succeededAttempts) == 2: # The second error has not yet been logged, since the # exception which causes it hasn't even been raised yet. # Give the datagramReceived call a chance to finish, then # let the test finish asserting things. reactor.callLater(0, finalDeferred.callback, None) else: makeAttempt() def ebPacketTimeout(err): """ The packet wasn't received quickly enough. Try sending another one. It doesn't matter if the packet for which this was the timeout eventually arrives: makeAttempt throws away the Deferred on which this function is the errback, so when datagramReceived callbacks, so it won't be on this Deferred, so it won't raise an AlreadyCalledError. """ makeAttempt() packetDeferred.addCallbacks(cbPacketReceived, ebPacketTimeout) packetDeferred.addErrback(finalDeferred.errback) timeoutCall = reactor.callLater( 0.1, packetDeferred.errback, error.TimeoutError( "Timed out in testDatagramReceivedError")) makeAttempt() return finalDeferred def test_portRepr(self): """ The port number being listened on can be found in the string returned from calling repr() on L{twisted.internet.udp.Port}. """ client = GoodClient() p = reactor.listenUDP(0, client) portNo = str(p.getHost().port) self.failIf(repr(p).find(portNo) == -1) def stoppedListening(ign): self.failIf(repr(p).find(portNo) != -1) d = defer.maybeDeferred(p.stopListening) d.addCallback(stoppedListening) return d def test_NoWarningOnBroadcast(self): """ C{'<broadcast>'} is an alternative way to say C{'255.255.255.255'} ({socket.gethostbyname("<broadcast>")} returns C{'255.255.255.255'}), so because it becomes a valid IP address, no deprecation warning about passing hostnames to L{twisted.internet.udp.Port.write} needs to be emitted by C{write()} in this case. """ class fakeSocket: def sendto(self, foo, bar): pass p = udp.Port(0, Server()) p.socket = fakeSocket() p.write("test", ("<broadcast>", 1234)) warnings = self.flushWarnings([self.test_NoWarningOnBroadcast]) self.assertEquals(len(warnings), 0) class ReactorShutdownInteraction(unittest.TestCase): """Test reactor shutdown interaction""" def setUp(self): """Start a UDP port""" self.server = Server() self.port = reactor.listenUDP(0, self.server, interface='127.0.0.1') def tearDown(self): """Stop the UDP port""" return self.port.stopListening() def testShutdownFromDatagramReceived(self): """Test reactor shutdown while in a recvfrom() loop""" # udp.Port's doRead calls recvfrom() in a loop, as an optimization. # It is important this loop terminate under various conditions. # Previously, if datagramReceived synchronously invoked # reactor.stop(), under certain reactors, the Port's socket would # synchronously disappear, causing an AttributeError inside that # loop. This was mishandled, causing the loop to spin forever. # This test is primarily to ensure that the loop never spins # forever. finished = defer.Deferred() pr = self.server.packetReceived = defer.Deferred() def pktRece(ignored): # Simulate reactor.stop() behavior :( self.server.transport.connectionLost() # Then delay this Deferred chain until the protocol has been # disconnected, as the reactor should do in an error condition # such as we are inducing. This is very much a whitebox test. reactor.callLater(0, finished.callback, None) pr.addCallback(pktRece) def flushErrors(ignored): # We are breaking abstraction and calling private APIs, any # number of horrible errors might occur. As long as the reactor # doesn't hang, this test is satisfied. (There may be room for # another, stricter test.) self.flushLoggedErrors() finished.addCallback(flushErrors) self.server.transport.write('\0' * 64, ('127.0.0.1', self.server.transport.getHost().port)) return finished class MulticastTestCase(unittest.TestCase): def setUp(self): self.server = Server() self.client = Client() # multicast won't work if we listen over loopback, apparently self.port1 = reactor.listenMulticast(0, self.server) self.port2 = reactor.listenMulticast(0, self.client) self.client.transport.connect( "127.0.0.1", self.server.transport.getHost().port) def tearDown(self): return gatherResults([ maybeDeferred(self.port1.stopListening), maybeDeferred(self.port2.stopListening)]) def testTTL(self): for o in self.client, self.server: self.assertEquals(o.transport.getTTL(), 1) o.transport.setTTL(2) self.assertEquals(o.transport.getTTL(), 2) def test_loopback(self): """ Test that after loopback mode has been set, multicast packets are delivered to their sender. """ self.assertEquals(self.server.transport.getLoopbackMode(), 1) addr = self.server.transport.getHost() joined = self.server.transport.joinGroup("225.0.0.250") def cbJoined(ignored): d = self.server.packetReceived = Deferred() self.server.transport.write("hello", ("225.0.0.250", addr.port)) return d joined.addCallback(cbJoined) def cbPacket(ignored): self.assertEqual(len(self.server.packets), 1) self.server.transport.setLoopbackMode(0) self.assertEquals(self.server.transport.getLoopbackMode(), 0) self.server.transport.write("hello", ("225.0.0.250", addr.port)) # This is fairly lame. d = Deferred() reactor.callLater(0, d.callback, None) return d joined.addCallback(cbPacket) def cbNoPacket(ignored): self.assertEqual(len(self.server.packets), 1) joined.addCallback(cbNoPacket) return joined def test_interface(self): """ Test C{getOutgoingInterface} and C{setOutgoingInterface}. """ self.assertEqual( self.client.transport.getOutgoingInterface(), "0.0.0.0") self.assertEqual( self.server.transport.getOutgoingInterface(), "0.0.0.0") d1 = self.client.transport.setOutgoingInterface("127.0.0.1") d2 = self.server.transport.setOutgoingInterface("127.0.0.1") result = gatherResults([d1, d2]) def cbInterfaces(ignored): self.assertEqual( self.client.transport.getOutgoingInterface(), "127.0.0.1") self.assertEqual( self.server.transport.getOutgoingInterface(), "127.0.0.1") result.addCallback(cbInterfaces) return result def test_joinLeave(self): """ Test that multicast a group can be joined and left. """ d = self.client.transport.joinGroup("225.0.0.250") def clientJoined(ignored): return self.client.transport.leaveGroup("225.0.0.250") d.addCallback(clientJoined) def clientLeft(ignored): return self.server.transport.joinGroup("225.0.0.250") d.addCallback(clientLeft) def serverJoined(ignored): return self.server.transport.leaveGroup("225.0.0.250") d.addCallback(serverJoined) return d def test_joinFailure(self): """ Test that an attempt to join an address which is not a multicast address fails with L{error.MulticastJoinError}. """ # 127.0.0.1 is not a multicast address, so joining it should fail. return self.assertFailure( self.client.transport.joinGroup("127.0.0.1"), error.MulticastJoinError) if runtime.platform.isWindows() and not runtime.platform.isVista(): test_joinFailure.todo = "Windows' multicast is wonky" def test_multicast(self): """ Test that a multicast group can be joined and messages sent to and received from it. """ c = Server() p = reactor.listenMulticast(0, c) addr = self.server.transport.getHost() joined = self.server.transport.joinGroup("225.0.0.250") def cbJoined(ignored): d = self.server.packetReceived = Deferred() c.transport.write("hello world", ("225.0.0.250", addr.port)) return d joined.addCallback(cbJoined) def cbPacket(ignored): self.assertEquals(self.server.packets[0][0], "hello world") joined.addCallback(cbPacket) def cleanup(passthrough): result = maybeDeferred(p.stopListening) result.addCallback(lambda ign: passthrough) return result joined.addCallback(cleanup) return joined def test_multiListen(self): """ Test that multiple sockets can listen on the same multicast port and that they both receive multicast messages directed to that address. """ firstClient = Server() firstPort = reactor.listenMulticast( 0, firstClient, listenMultiple=True) portno = firstPort.getHost().port secondClient = Server() secondPort = reactor.listenMulticast( portno, secondClient, listenMultiple=True) joined = self.server.transport.joinGroup("225.0.0.250") def serverJoined(ignored): d1 = firstClient.packetReceived = Deferred() d2 = secondClient.packetReceived = Deferred() firstClient.transport.write("hello world", ("225.0.0.250", portno)) return gatherResults([d1, d2]) joined.addCallback(serverJoined) def gotPackets(ignored): self.assertEquals(firstClient.packets[0][0], "hello world") self.assertEquals(secondClient.packets[0][0], "hello world") joined.addCallback(gotPackets) def cleanup(passthrough): result = gatherResults([ maybeDeferred(firstPort.stopListening), maybeDeferred(secondPort.stopListening)]) result.addCallback(lambda ign: passthrough) return result joined.addBoth(cleanup) return joined if runtime.platform.isWindows(): test_multiListen.skip = ("on non-linux platforms it appears multiple " "processes can listen, but not multiple sockets " "in same process?") if not interfaces.IReactorUDP(reactor, None): UDPTestCase.skip = "This reactor does not support UDP" ReactorShutdownInteraction.skip = "This reactor does not support UDP" if not interfaces.IReactorMulticast(reactor, None): MulticastTestCase.skip = "This reactor does not support multicast" def checkForLinux22(): import os if os.path.exists("/proc/version"): s = open("/proc/version").read() if s.startswith("Linux version"): s = s.split()[2] if s.split(".")[:2] == ["2", "2"]: f = MulticastTestCase.testInterface.im_func f.todo = "figure out why this fails in linux 2.2" checkForLinux22()
agpl-3.0
wwright2/dcim3-angstrom1
sources/openembedded-core/meta/lib/oeqa/selftest/sstate.py
7
2453
import datetime import unittest import os import re import shutil import oeqa.utils.ftools as ftools from oeqa.selftest.base import oeSelfTest from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer class SStateBase(oeSelfTest): def setUpLocal(self): self.temp_sstate_location = None self.sstate_path = get_bb_var('SSTATE_DIR') self.distro = get_bb_var('NATIVELSBSTRING') self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) # Creates a special sstate configuration with the option to add sstate mirrors def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]): self.temp_sstate_location = temp_sstate_location if self.temp_sstate_location: temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path self.append_config(config_temp_sstate) self.track_for_cleanup(temp_sstate_path) self.sstate_path = get_bb_var('SSTATE_DIR') self.distro = get_bb_var('NATIVELSBSTRING') self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) if add_local_mirrors: config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""' self.append_config(config_set_sstate_if_not_set) for local_mirror in add_local_mirrors: self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror') config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror self.append_config(config_sstate_mirror) # Returns a list containing sstate files def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True): result = [] for root, dirs, files in os.walk(self.sstate_path): if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root): for f in files: if re.search(filename_regex, f): result.append(f) if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root): for f in files: if re.search(filename_regex, f): result.append(f) return result
mit
aarony/decaf-platform
scripts/qapi-types.py
47
6007
# # QAPI types generator # # Copyright IBM, Corp. 2011 # # Authors: # Anthony Liguori <aliguori@us.ibm.com> # # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. from ordereddict import OrderedDict from qapi import * import sys import os import getopt import errno def generate_fwd_struct(name, members): return mcgen(''' typedef struct %(name)s %(name)s; typedef struct %(name)sList { %(name)s *value; struct %(name)sList *next; } %(name)sList; ''', name=name) def generate_struct(structname, fieldname, members): ret = mcgen(''' struct %(name)s { ''', name=structname) for argname, argentry, optional, structured in parse_args(members): if optional: ret += mcgen(''' bool has_%(c_name)s; ''', c_name=c_var(argname)) if structured: push_indent() ret += generate_struct("", argname, argentry) pop_indent() else: ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=c_type(argentry), c_name=c_var(argname)) if len(fieldname): fieldname = " " + fieldname ret += mcgen(''' }%(field)s; ''', field=fieldname) return ret def generate_enum_lookup(name, values): ret = mcgen(''' const char *%(name)s_lookup[] = { ''', name=name) i = 0 for value in values: ret += mcgen(''' "%(value)s", ''', value=value.lower()) ret += mcgen(''' NULL, }; ''') return ret def generate_enum(name, values): lookup_decl = mcgen(''' extern const char *%(name)s_lookup[]; ''', name=name) enum_decl = mcgen(''' typedef enum %(name)s { ''', name=name) # append automatically generated _MAX value enum_values = values + [ 'MAX' ] i = 0 for value in enum_values: enum_decl += mcgen(''' %(abbrev)s_%(value)s = %(i)d, ''', abbrev=de_camel_case(name).upper(), value=c_var(value).upper(), i=i) i += 1 enum_decl += mcgen(''' } %(name)s; ''', name=name) return lookup_decl + enum_decl def generate_union(name, typeinfo): ret = mcgen(''' struct %(name)s { %(name)sKind kind; union { ''', name=name) for key in typeinfo: ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=c_type(typeinfo[key]), c_name=c_var(key)) ret += mcgen(''' }; }; ''') return ret def generate_type_cleanup_decl(name): ret = mcgen(''' void qapi_free_%(type)s(%(c_type)s obj); ''', c_type=c_type(name),type=name) return ret def generate_type_cleanup(name): ret = mcgen(''' void qapi_free_%(type)s(%(c_type)s obj) { QapiDeallocVisitor *md; Visitor *v; if (!obj) { return; } md = qapi_dealloc_visitor_new(); v = qapi_dealloc_get_visitor(md); visit_type_%(type)s(v, &obj, NULL, NULL); qapi_dealloc_visitor_cleanup(md); } ''', c_type=c_type(name),type=name) return ret try: opts, args = getopt.gnu_getopt(sys.argv[1:], "p:o:", ["prefix=", "output-dir="]) except getopt.GetoptError, err: print str(err) sys.exit(1) output_dir = "" prefix = "" c_file = 'qapi-types.c' h_file = 'qapi-types.h' for o, a in opts: if o in ("-p", "--prefix"): prefix = a elif o in ("-o", "--output-dir"): output_dir = a + "/" c_file = output_dir + prefix + c_file h_file = output_dir + prefix + h_file try: os.makedirs(output_dir) except os.error, e: if e.errno != errno.EEXIST: raise fdef = open(c_file, 'w') fdecl = open(h_file, 'w') fdef.write(mcgen(''' /* AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * deallocation functions for schema-defined QAPI types * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * Michael Roth <mdroth@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qapi/qapi-dealloc-visitor.h" #include "%(prefix)sqapi-types.h" #include "%(prefix)sqapi-visit.h" ''', prefix=prefix)) fdecl.write(mcgen(''' /* AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI types * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #ifndef %(guard)s #define %(guard)s #include "qapi/qapi-types-core.h" ''', guard=guardname(h_file))) exprs = parse_schema(sys.stdin) for expr in exprs: ret = "\n" if expr.has_key('type'): ret += generate_fwd_struct(expr['type'], expr['data']) elif expr.has_key('enum'): ret += generate_enum(expr['enum'], expr['data']) fdef.write(generate_enum_lookup(expr['enum'], expr['data'])) elif expr.has_key('union'): ret += generate_fwd_struct(expr['union'], expr['data']) + "\n" ret += generate_enum('%sKind' % expr['union'], expr['data'].keys()) else: continue fdecl.write(ret) for expr in exprs: ret = "\n" if expr.has_key('type'): ret += generate_struct(expr['type'], "", expr['data']) + "\n" ret += generate_type_cleanup_decl(expr['type'] + "List") fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n") ret += generate_type_cleanup_decl(expr['type']) fdef.write(generate_type_cleanup(expr['type']) + "\n") elif expr.has_key('union'): ret += generate_union(expr['union'], expr['data']) else: continue fdecl.write(ret) fdecl.write(''' #endif ''') fdecl.flush() fdecl.close() fdef.flush() fdef.close()
gpl-2.0
eadgarchen/tensorflow
tensorflow/python/debug/cli/curses_widgets.py
157
6418
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Widgets for Curses-based CLI.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.debug.cli import debugger_cli_common RL = debugger_cli_common.RichLine class NavigationHistoryItem(object): """Individual item in navigation history.""" def __init__(self, command, screen_output, scroll_position): """Constructor of NavigationHistoryItem. Args: command: (`str`) the command line text. screen_output: the screen output of the command. scroll_position: (`int`) scroll position in the screen output. """ self.command = command self.screen_output = screen_output self.scroll_position = scroll_position class CursesNavigationHistory(object): """Navigation history containing commands, outputs and scroll info.""" BACK_ARROW_TEXT = "<--" FORWARD_ARROW_TEXT = "-->" def __init__(self, capacity): """Constructor of CursesNavigationHistory. Args: capacity: (`int`) How many items this object can hold. Each item consists of a command stirng, an output RichTextLines object and a scroll position. Raises: ValueError: If capacity is not a positive number. """ if capacity <= 0: raise ValueError("In valid capacity value: %d" % capacity) self._capacity = capacity self._items = [] self._pointer = -1 def add_item(self, command, screen_output, scroll_position): """Add an item to the navigation histoyr. Args: command: command line text. screen_output: screen output produced for the command. scroll_position: (`int`) scroll position in the screen output. """ if self._pointer + 1 < len(self._items): self._items = self._items[:self._pointer + 1] self._items.append( NavigationHistoryItem(command, screen_output, scroll_position)) if len(self._items) > self._capacity: self._items = self._items[-self._capacity:] self._pointer = len(self._items) - 1 def update_scroll_position(self, new_scroll_position): """Update the scroll position of the currently-pointed-to history item. Args: new_scroll_position: (`int`) new scroll-position value. Raises: ValueError: If the history is empty. """ if not self._items: raise ValueError("Empty navigation history") self._items[self._pointer].scroll_position = new_scroll_position def size(self): return len(self._items) def pointer(self): return self._pointer def go_back(self): """Go back one place in the history, if possible. Decrease the pointer value by 1, if possible. Otherwise, the pointer value will be unchanged. Returns: The updated pointer value. Raises: ValueError: If history is empty. """ if not self._items: raise ValueError("Empty navigation history") if self.can_go_back(): self._pointer -= 1 return self._items[self._pointer] def go_forward(self): """Go forward one place in the history, if possible. Increase the pointer value by 1, if possible. Otherwise, the pointer value will be unchanged. Returns: The updated pointer value. Raises: ValueError: If history is empty. """ if not self._items: raise ValueError("Empty navigation history") if self.can_go_forward(): self._pointer += 1 return self._items[self._pointer] def can_go_back(self): """Test whether client can go back one place. Returns: (`bool`) Whether going back one place is possible. """ return self._pointer >= 1 def can_go_forward(self): """Test whether client can go forward one place. Returns: (`bool`) Whether going back one place is possible. """ return self._pointer + 1 < len(self._items) def render(self, max_length, backward_command, forward_command, latest_command_attribute="black_on_white", old_command_attribute="magenta_on_white"): """Render the rich text content of the single-line navigation bar. Args: max_length: (`int`) Maximum length of the navigation bar, in characters. backward_command: (`str`) command for going backward. Used to construct the shortcut menu item. forward_command: (`str`) command for going forward. Used to construct the shortcut menu item. latest_command_attribute: font attribute for lastest command. old_command_attribute: font attribute for old (non-latest) command. Returns: (`debugger_cli_common.RichTextLines`) the navigation bar text with attributes. """ output = RL("| ") output += RL( self.BACK_ARROW_TEXT, (debugger_cli_common.MenuItem(None, backward_command) if self.can_go_back() else None)) output += RL(" ") output += RL( self.FORWARD_ARROW_TEXT, (debugger_cli_common.MenuItem(None, forward_command) if self.can_go_forward() else None)) if self._items: command_attribute = (latest_command_attribute if (self._pointer == (len(self._items) - 1)) else old_command_attribute) output += RL(" | ") if self._pointer != len(self._items) - 1: output += RL("(-%d) " % (len(self._items) - 1 - self._pointer), command_attribute) if len(output) < max_length: maybe_truncated_command = self._items[self._pointer].command[ :(max_length - len(output))] output += RL(maybe_truncated_command, command_attribute) return debugger_cli_common.rich_text_lines_from_rich_line_list([output])
apache-2.0
gangadhar-kadam/sapphire_app
selling/doctype/cgi_fun/cgi_fun.py
1
3899
#For license information, please see license.txt from __future__ import unicode_literals import webnotes sql = webnotes.conn.sql from webnotes.model.doc import Document, addchild from webnotes.utils import cstr, cint, flt, comma_or from datetime import date,timedelta import datetime from webnotes.model.code import get_obj class DocType: def __init__(self, d, dl): self.doc, self.doclist = d, dl def create_customer(self,company,store): re="select name from tabCustomer where name='"+store+"'" res=sql(re) if res: return "Customer already exist:- "+cstr(res[0][0]) else: qry="insert into tabCustomer (name,customer_name,customer_group,customer_type,territory,company) values ('"+store+"','"+store+"','Default Customer Group','Individual','Default','"+company+"')" sql(qry) return "created customer :-"+store def create_so(self,customer,delivery_date,currency,customer_group,territory,price_list_name,price_list_currency,company,fiscal_year,plc_conversion_rate,po_no,items,status1,tot): ss=Document('Sales Order') ss.naming_series='SO' ss.customer=customer from datetime import datetime ss.delivery_date=datetime.strptime(delivery_date,"%Y-%m-%d").date() ss.currency=currency ss.customer_group=customer_group ss.selling_price_list='Operator MTN' ss.price_list_currency=price_list_currency ss.company=company ss.territory='Lagos' ss.fiscal_year='2013' ss.plc_conversion_rate=plc_conversion_rate ss.po_no=po_no ss.save(new=1) p='' for i in items: ssi=Document('Sales Order Item') ssi.item_code=i['item_code'] ssi.item_name=i['item_name'] ssi.description=i['description'] ssi.parent=ss.name ssi.qty=i['qty'] ssi.stock_uom=i['stock_uom'] #ssi.ref_rate=i['ref_rate'] ssi.adj_rate=i['adj_rate'] ssi.export_rate=i['export_rate'] ssi.export_amount=i['export_amount'] p=i['date'] ssi.reserved_warehouse='Auxano Warehouse' ssi.save(new=1) ss.save() return ss.name def get_item(self): items=sql("select name,item_name,description,stock_uom,standard_rate,default_warehouse from tabItem") import json itemlist=[] for (name,item_name,description,stock_uom,standard_rate,default_warehouse) in items: item={} item["name"] = name item["item_name"] = item_name item["description"] = description item["stock_uom"] = stock_uom item["standard_rate"] = standard_rate item["default_warehouse"] = 'Default Warehouse' itemlist.append(item) parentobj={} parentobj['items']=itemlist return json.dumps(parentobj) def price_list(self): qry="select price_list,item_code,ref_rate,buying_or_selling from `tabItem Price` where price_list='Operator MTN'" prices=sql(qry) import json pricelist=[] for (price_list,item_code,ref_rate,buying_or_selling) in prices: price={} price["item_code"] = item_code price["ref_rate"] = ref_rate price["buying_or_selling"] = buying_or_selling pricelist.append(price) parntobj={} parntobj['prices']=pricelist return json.dumps(parntobj)
agpl-3.0
2013Commons/hue
desktop/core/ext-py/Django-1.4.5/django/contrib/gis/geos/libgeos.py
87
5581
""" This module houses the ctypes initialization procedures, as well as the notice and error handler function callbacks (get called when an error occurs in GEOS). This module also houses GEOS Pointer utilities, including get_pointer_arr(), and GEOM_PTR. """ import os import re import sys from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER from ctypes.util import find_library from django.contrib.gis.geos.error import GEOSException # Custom library path set? try: from django.conf import settings lib_path = settings.GEOS_LIBRARY_PATH except (AttributeError, EnvironmentError, ImportError): lib_path = None # Setting the appropriate names for the GEOS-C library. if lib_path: lib_names = None elif os.name == 'nt': # Windows NT libraries lib_names = ['geos_c', 'libgeos_c-1'] elif os.name == 'posix': # *NIX libraries lib_names = ['geos_c', 'GEOS'] else: raise ImportError('Unsupported OS "%s"' % os.name) # Using the ctypes `find_library` utility to find the path to the GEOS # shared library. This is better than manually specifiying each library name # and extension (e.g., libgeos_c.[so|so.1|dylib].). if lib_names: for lib_name in lib_names: lib_path = find_library(lib_name) if not lib_path is None: break # No GEOS library could be found. if lib_path is None: raise ImportError('Could not find the GEOS library (tried "%s"). ' 'Try setting GEOS_LIBRARY_PATH in your settings.' % '", "'.join(lib_names)) # Getting the GEOS C library. The C interface (CDLL) is used for # both *NIX and Windows. # See the GEOS C API source code for more details on the library function calls: # http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html lgeos = CDLL(lib_path) # The notice and error handler C function callback definitions. # Supposed to mimic the GEOS message handler (C below): # typedef void (*GEOSMessageHandler)(const char *fmt, ...); NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def notice_h(fmt, lst, output_h=sys.stdout): try: warn_msg = fmt % lst except: warn_msg = fmt output_h.write('GEOS_NOTICE: %s\n' % warn_msg) notice_h = NOTICEFUNC(notice_h) ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p) def error_h(fmt, lst, output_h=sys.stderr): try: err_msg = fmt % lst except: err_msg = fmt output_h.write('GEOS_ERROR: %s\n' % err_msg) error_h = ERRORFUNC(error_h) #### GEOS Geometry C data structures, and utility functions. #### # Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR class GEOSGeom_t(Structure): pass class GEOSPrepGeom_t(Structure): pass class GEOSCoordSeq_t(Structure): pass class GEOSContextHandle_t(Structure): pass # Pointers to opaque GEOS geometry structures. GEOM_PTR = POINTER(GEOSGeom_t) PREPGEOM_PTR = POINTER(GEOSPrepGeom_t) CS_PTR = POINTER(GEOSCoordSeq_t) CONTEXT_PTR = POINTER(GEOSContextHandle_t) # Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection # GEOS routines def get_pointer_arr(n): "Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer." GeomArr = GEOM_PTR * n return GeomArr() # Returns the string version of the GEOS library. Have to set the restype # explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms. geos_version = lgeos.GEOSversion geos_version.argtypes = None geos_version.restype = c_char_p # Regular expression should be able to parse version strings such as # '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1' or '3.4.0dev-CAPI-1.8.0' version_regex = re.compile(r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)$') def geos_version_info(): """ Returns a dictionary containing the various version metadata parsed from the GEOS version string, including the version number, whether the version is a release candidate (and what number release candidate), and the C API version. """ ver = geos_version() m = version_regex.match(ver) if not m: raise GEOSException('Could not parse version info string "%s"' % ver) return dict((key, m.group(key)) for key in ('version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')) # Version numbers and whether or not prepared geometry support is available. _verinfo = geos_version_info() GEOS_MAJOR_VERSION = int(_verinfo['major']) GEOS_MINOR_VERSION = int(_verinfo['minor']) GEOS_SUBMINOR_VERSION = int(_verinfo['subminor']) del _verinfo GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION) GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0) if GEOS_PREPARE: # Here we set up the prototypes for the initGEOS_r and finishGEOS_r # routines. These functions aren't actually called until they are # attached to a GEOS context handle -- this actually occurs in # geos/prototypes/threadsafe.py. lgeos.initGEOS_r.restype = CONTEXT_PTR lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR] else: # When thread-safety isn't available, the initGEOS routine must be called # first. This function takes the notice and error functions, defined # as Python callbacks above, as parameters. Here is the C code that is # wrapped: # extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function); lgeos.initGEOS(notice_h, error_h) # Calling finishGEOS() upon exit of the interpreter. import atexit atexit.register(lgeos.finishGEOS)
apache-2.0
hanicker/odoo
addons/base_import_module/controllers/main.py
354
1518
# -*- coding: utf-8 -*- import functools import openerp from openerp.http import Controller, route, request, Response def webservice(f): @functools.wraps(f) def wrap(*args, **kw): try: return f(*args, **kw) except Exception, e: return Response(response=str(e), status=500) return wrap class ImportModule(Controller): def check_user(self, uid=None): if uid is None: uid = request.uid is_admin = request.registry['res.users'].has_group(request.cr, uid, 'base.group_erp_manager') if not is_admin: raise openerp.exceptions.AccessError("Only administrators can upload a module") @route('/base_import_module/login', type='http', auth='none', methods=['POST']) @webservice def login(self, login, password, db=None): if db and db != request.db: raise Exception("Could not select database '%s'" % db) uid = request.session.authenticate(request.db, login, password) if not uid: return Response(response="Wrong login/password", status=401) self.check_user(uid) return "ok" @route('/base_import_module/upload', type='http', auth='user', methods=['POST']) @webservice def upload(self, mod_file=None, force='', **kw): self.check_user() force = True if force == '1' else False return request.registry['ir.module.module'].import_zipfile(request.cr, request.uid, mod_file, force=force, context=request.context)[0]
agpl-3.0
cg31/tensorflow
tensorflow/contrib/layers/python/layers/feature_column_ops.py
5
36498
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to FeatureColumn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework.python.framework import checkpoint_utils from tensorflow.contrib.framework.python.framework import experimental from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.layers.python.layers import embedding_ops from tensorflow.contrib.layers.python.layers import feature_column as fc from tensorflow.contrib.layers.python.layers import layers from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging def _embeddings_from_arguments(column, args, weight_collections, trainable, output_rank=2): """Returns embeddings for a column based on the computed arguments. Args: column: the column name. args: the _DeepEmbeddingLookupArguments for this column. weight_collections: collections to store weights in. trainable: whether these embeddings should be trainable. output_rank: the desired rank of the returned `Tensor`. Inner dimensions will be combined to produce the desired rank. Returns: the embeddings. Raises: ValueError: if not possible to create. """ # pylint: disable=protected-access input_tensor = layers._inner_flatten(args.input_tensor, output_rank) weight_tensor = None if args.weight_tensor is not None: weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank) # pylint: enable=protected-access if args.hashed: embeddings = contrib_variables.model_variable( name='weights', shape=[args.vocab_size], dtype=dtypes.float32, initializer=args.initializer, trainable=trainable, collections=weight_collections) return embedding_ops.hashed_embedding_lookup_sparse( embeddings, input_tensor, args.dimension, combiner=args.combiner, name='lookup') if args.shared_embedding_name is not None: shared_embedding_collection_name = ( 'SHARED_EMBEDDING_COLLECTION_' + args.shared_embedding_name.upper()) graph = ops.get_default_graph() shared_embedding_collection = ( graph.get_collection_ref(shared_embedding_collection_name)) shape = [args.vocab_size, args.dimension] if shared_embedding_collection: if len(shared_embedding_collection) > 1: raise ValueError('Collection %s can only contain one ' '(partitioned) variable.' % shared_embedding_collection_name) else: embeddings = shared_embedding_collection[0] if embeddings.get_shape() != shape: raise ValueError('The embedding variable with name {} already ' 'exists, but its shape does not match required ' 'embedding shape here. Please make sure to use ' 'different shared_embedding_name for different ' 'shared embeddings.'.format( args.shared_embedding_name)) else: embeddings = contrib_variables.model_variable( name=args.shared_embedding_name, shape=shape, dtype=dtypes.float32, initializer=args.initializer, trainable=trainable, collections=weight_collections) graph.add_to_collection(shared_embedding_collection_name, embeddings) else: embeddings = contrib_variables.model_variable( name='weights', shape=[args.vocab_size, args.dimension], dtype=dtypes.float32, initializer=args.initializer, trainable=trainable, collections=weight_collections) if isinstance(embeddings, variables.Variable): embeddings = [embeddings] else: embeddings = embeddings._get_variable_list() # pylint: disable=protected-access # pylint: disable=protected-access _maybe_restore_from_checkpoint( column._checkpoint_path(), embeddings) return embedding_ops.safe_embedding_lookup_sparse( embeddings, input_tensor, sparse_weights=weight_tensor, combiner=args.combiner, name=column.name + 'weights') def _input_from_feature_columns(columns_to_tensors, feature_columns, weight_collections, trainable, scope, output_rank, default_name): """Implementation of `input_from(_sequence)_feature_columns`.""" check_feature_columns(feature_columns) with variable_scope.variable_scope(scope, default_name=default_name, values=columns_to_tensors.values()): output_tensors = [] transformer = _Transformer(columns_to_tensors) if weight_collections: weight_collections = list(set(list(weight_collections) + [ops.GraphKeys.VARIABLES])) for column in sorted(set(feature_columns), key=lambda x: x.key): with variable_scope.variable_scope(None, default_name=column.name, values=columns_to_tensors.values()): transformed_tensor = transformer.transform(column) try: # pylint: disable=protected-access arguments = column._deep_embedding_lookup_arguments( transformed_tensor) output_tensors.append(_embeddings_from_arguments( column, arguments, weight_collections, trainable, output_rank=output_rank)) except NotImplementedError as ee: try: # pylint: disable=protected-access output_tensors.append(column._to_dnn_input_layer( transformed_tensor, weight_collections, trainable, output_rank=output_rank)) except ValueError as e: raise ValueError('Error creating input layer for column: {}.\n' '{}, {}'.format(column.name, e, ee)) return array_ops.concat(output_rank - 1, output_tensors) def input_from_feature_columns(columns_to_tensors, feature_columns, weight_collections=None, trainable=True, scope=None): """A tf.contrib.layer style input layer builder based on FeatureColumns. Generally a single example in training data is described with feature columns. At the first layer of the model, this column oriented data should be converted to a single tensor. Each feature column needs a different kind of operation during this conversion. For example sparse features need a totally different handling than continuous features. An example usage of input_from_feature_columns is as follows: # Building model for training columns_to_tensor = tf.parse_example(...) first_layer = input_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=feature_columns) second_layer = fully_connected(first_layer, ...) ... where feature_columns can be defined as follows: occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) feature_columns=[occupation_emb, age_buckets] Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, `inflow` may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived by FeatureColumn. weight_collections: List of graph collections to which weights are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: A Tensor which can be consumed by hidden layers in the neural network. Raises: ValueError: if FeatureColumn cannot be consumed by a neural network. """ return _input_from_feature_columns(columns_to_tensors, feature_columns, weight_collections, trainable, scope, output_rank=2, default_name='input_from_feature_columns') @experimental def sequence_input_from_feature_columns(columns_to_tensors, feature_columns, weight_collections=None, trainable=True, scope=None): """Builds inputs for sequence models from `FeatureColumn`s. See documentation for `input_from_feature_columns`. The following types of `FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`, `_EmbeddingColumn`, `_HashedEmbeddingColumn`, `_RealValuedColumn`, `_DataFrameColumn`. In addition, columns in `feature_columns` may not be constructed using any of the following: `HashedEmbeddingColumn`, `BucketizedColumn`, `CrossedColumn`. Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, `inflow` may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived by FeatureColumn. weight_collections: List of graph collections to which weights are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: A Tensor which can be consumed by hidden layers in the neural network. Raises: ValueError: if FeatureColumn cannot be consumed by a neural network. """ _check_supported_sequence_columns(feature_columns) _check_forbidden_sequence_columns(feature_columns) return _input_from_feature_columns( columns_to_tensors, feature_columns, weight_collections, trainable, scope, output_rank=3, default_name='sequence_input_from_feature_columns') def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions def _maybe_restore_from_checkpoint(checkpoint_path, variable): if checkpoint_path is not None: path, tensor_name = checkpoint_path weights_to_restore = variable if len(variable) == 1: weights_to_restore = variable[0] checkpoint_utils.init_from_checkpoint(path, {tensor_name: weights_to_restore}) def _create_joint_embedding_lookup(columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates an embedding lookup for all columns sharing a single weight.""" for arg in embedding_lookup_arguments: assert arg.weight_tensor is None, ( 'Joint sums for weighted sparse columns are not supported. ' 'Please use weighted_sum_from_feature_columns instead.') assert arg.combiner == 'sum', ( 'Combiners other than sum are not supported for joint sums. ' 'Please use weighted_sum_from_feature_columns instead.') assert len(embedding_lookup_arguments) >= 1, ( 'At least one column must be in the model.') prev_size = 0 sparse_tensors = [] for a in embedding_lookup_arguments: t = a.input_tensor values = t.values + prev_size prev_size += a.vocab_size sparse_tensors.append( ops.SparseTensor(t.indices, values, t.shape)) sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors) with variable_scope.variable_scope( None, default_name='linear_weights', values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[prev_size, num_outputs], dtype=dtypes.float32, initializer=init_ops.zeros_initializer, trainable=trainable, collections=weight_collections) if isinstance(variable, variables.Variable): variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, sparse_tensor, sparse_weights=None, combiner='sum', name='_weights') return variable, predictions def joint_weighted_sum_from_feature_columns(columns_to_tensors, feature_columns, num_outputs, weight_collections=None, trainable=True, scope=None): """A restricted linear prediction builder based on FeatureColumns. As long as all feature columns are unweighted sparse columns this computes the prediction of a linear model which stores all weights in a single variable. Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, `inflow` may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived from FeatureColumn. num_outputs: An integer specifying number of outputs. Default value is 1. weight_collections: List of graph collections to which weights are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: A tuple of followings: * A Tensor which represents predictions of a linear model. * A list of Variables storing the weights. * A Variable which is used for bias. Raises: ValueError: if FeatureColumn cannot be used for linear predictions. """ check_feature_columns(feature_columns) with variable_scope.variable_scope( scope, default_name='joint_weighted_sum_from_feature_columns', values=columns_to_tensors.values()): transformer = _Transformer(columns_to_tensors) embedding_lookup_arguments = [] for column in sorted(set(feature_columns), key=lambda x: x.key): transformed_tensor = transformer.transform(column) try: embedding_lookup_arguments.append( column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access except NotImplementedError: raise NotImplementedError('Real-valued columns are not supported. ' 'Use weighted_sum_from_feature_columns ' 'instead, or bucketize these columns.') variable, predictions_no_bias = _create_joint_embedding_lookup( columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections) bias = contrib_variables.model_variable( 'bias_weight', shape=[num_outputs], initializer=init_ops.zeros_initializer, collections=_add_variable_collection(weight_collections)) _log_variable(bias) predictions = nn_ops.bias_add(predictions_no_bias, bias) return predictions, variable, bias def weighted_sum_from_feature_columns(columns_to_tensors, feature_columns, num_outputs, weight_collections=None, trainable=True, scope=None): """A tf.contrib.layer style linear prediction builder based on FeatureColumns. Generally a single example in training data is described with feature columns. This function generates weighted sum for each num_outputs. Weighted sum refers to logits in classification problems. It refers to prediction itself for linear regression problems. An example usage of weighted_sum_from_feature_columns is as follows: # Building model for training columns_to_tensor = tf.parse_example(...) logits = weighted_sum_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=feature_columns, num_outputs=1) loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels) where feature_columns can be defined as follows: occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) occupation_x_age = crossed_column(columns=[occupation, age_buckets], hash_bucket_size=10000) feature_columns=[age_buckets, occupation, occupation_x_age] Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, `inflow` may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived from FeatureColumn. num_outputs: An integer specifying number of outputs. Default value is 1. weight_collections: List of graph collections to which weights are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: A tuple of followings: * A Tensor which represents predictions of a linear model. * A dictionary which maps feature_column to corresponding Variable. * A Variable which is used for bias. Raises: ValueError: if FeatureColumn cannot be used for linear predictions. """ check_feature_columns(feature_columns) with variable_scope.variable_scope( scope, default_name='weighted_sum_from_feature_columns', values=columns_to_tensors.values()): output_tensors = [] column_to_variable = dict() transformer = _Transformer(columns_to_tensors) # pylint: disable=protected-access for column in sorted(set(feature_columns), key=lambda x: x.key): transformed_tensor = transformer.transform(column) try: embedding_lookup_arguments = column._wide_embedding_lookup_arguments( transformed_tensor) variable, predictions = _create_embedding_lookup( column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections) except NotImplementedError: with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): tensor = column._to_dense_tensor(transformed_tensor) tensor = fc._reshape_real_valued_tensor(tensor, 2, column.name) variable = [contrib_variables.model_variable( name='weight', shape=[tensor.get_shape()[1], num_outputs], initializer=init_ops.zeros_initializer, collections=weight_collections)] predictions = math_ops.matmul(tensor, variable[0], name='matmul') except ValueError as ee: raise ValueError('Error creating weighted sum for column: {}.\n' '{}'.format(column.name, ee)) output_tensors.append(predictions) column_to_variable[column] = variable _log_variable(variable) _maybe_restore_from_checkpoint(column._checkpoint_path(), variable) # pylint: enable=protected-access predictions_no_bias = math_ops.add_n(output_tensors) bias = contrib_variables.model_variable( 'bias_weight', shape=[num_outputs], initializer=init_ops.zeros_initializer, collections=_add_variable_collection(weight_collections)) _log_variable(bias) predictions = nn_ops.bias_add(predictions_no_bias, bias) return predictions, column_to_variable, bias def parse_feature_columns_from_examples(serialized, feature_columns, name=None, example_names=None): """Parses tf.Examples to extract tensors for given feature_columns. This is a wrapper of 'tf.parse_example'. A typical usage is as follows: ```python columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=my_features) # Where my_features are: # Define features and transformations country = sparse_column_with_keys(column_name="native_country", keys=["US", "BRA", ...]) country_emb = embedding_column(sparse_id_column=country, dimension=3, combiner="sum") occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") occupation_x_country = crossed_column(columns=[occupation, country], hash_bucket_size=10000) age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) my_features = [occupation_emb, age_buckets, country_emb] ``` Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn. name: A name for this operation (optional). example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. Returns: A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values. """ check_feature_columns(feature_columns) columns_to_tensors = parsing_ops.parse_example( serialized=serialized, features=fc.create_feature_spec_for_parsing(feature_columns), name=name, example_names=example_names) transformer = _Transformer(columns_to_tensors) for column in sorted(set(feature_columns), key=lambda x: x.key): transformer.transform(column) return columns_to_tensors def parse_feature_columns_from_sequence_examples( serialized, context_feature_columns, sequence_feature_columns, name=None, example_name=None): """Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s. Args: serialized: A scalar (0-D Tensor) of type string, a single serialized `SequenceExample` proto. context_feature_columns: An iterable containing the feature columns for context features. All items should be instances of classes derived from `_FeatureColumn`. Can be `None`. sequence_feature_columns: An iterable containing the feature columns for sequence features. All items should be instances of classes derived from `_FeatureColumn`. Can be `None`. name: A name for this operation (optional). example_name: A scalar (0-D Tensor) of type string (optional), the names of the serialized proto. Returns: A tuple consisting of: context_features: a dict mapping `FeatureColumns` from `context_feature_columns` to their parsed `Tensors`/`SparseTensor`s. sequence_features: a dict mapping `FeatureColumns` from `sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s. """ # Sequence example parsing requires a single (scalar) example. try: serialized = array_ops.reshape(serialized, []) except ValueError as e: raise ValueError( 'serialized must contain as single sequence example. Batching must be ' 'done after parsing for sequence examples. Error: {}'.format(e)) if context_feature_columns is None: context_feature_columns = [] if sequence_feature_columns is None: sequence_feature_columns = [] check_feature_columns(context_feature_columns) context_feature_spec = fc.create_feature_spec_for_parsing( context_feature_columns) check_feature_columns(sequence_feature_columns) sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access sequence_feature_columns, allow_missing_by_default=False) return parsing_ops.parse_single_sequence_example(serialized, context_feature_spec, sequence_feature_spec, example_name, name) def _log_variable(variable): if isinstance(variable, list): for var in variable: if isinstance(variable, variables.Variable): logging.info('Created variable %s, with device=%s', var.name, var.device) elif isinstance(variable, variables.Variable): logging.info('Created variable %s, with device=%s', variable.name, variable.device) def _infer_real_valued_column_for_tensor(name, tensor): """Creates a real_valued_column for given tensor and name.""" if isinstance(tensor, ops.SparseTensor): raise ValueError( 'SparseTensor is not supported for auto detection. Please define ' 'corresponding FeatureColumn for tensor {} {}.', name, tensor) if not (tensor.dtype.is_integer or tensor.dtype.is_floating): raise ValueError( 'Non integer or non floating types are not supported for auto detection' '. Please define corresponding FeatureColumn for tensor {} {}.', name, tensor) shape = tensor.get_shape().as_list() dimension = 1 for i in range(1, len(shape)): dimension *= shape[i] return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype) def infer_real_valued_columns(features): if not isinstance(features, dict): return [_infer_real_valued_column_for_tensor('', features)] feature_columns = [] for key, value in features.items(): feature_columns.append(_infer_real_valued_column_for_tensor(key, value)) return feature_columns def check_feature_columns(feature_columns): """Checks the validity of the set of FeatureColumns. Args: feature_columns: A set of instances or subclasses of FeatureColumn. Raises: ValueError: If there are duplicate feature column keys. """ seen_keys = set() for f in feature_columns: key = f.key if key in seen_keys: raise ValueError('Duplicate feature column key found for column: {}. ' 'This usually means that the column is almost identical ' 'to another column, and one must be discarded.'.format( f.name)) seen_keys.add(key) class _Transformer(object): """Handles all the transformations defined by FeatureColumn if needed. FeatureColumn specifies how to digest an input column to the network. Some feature columns require data transformations. This class handles those transformations if they are not handled already. Some features may be used in more than one places. For example one can use a bucketized feature by itself and a cross with it. In that case Transformer should create only one bucketization op instead of multiple ops for each feature column. To handle re-use of transformed columns, Transformer keeps all previously transformed columns. An example usage of Transformer is as follows: occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) occupation_x_age = crossed_column(columns=[occupation, age_buckets], hash_bucket_size=10000) columns_to_tensor = tf.parse_example(...) transformer = Transformer(columns_to_tensor) occupation_x_age_tensor = transformer.transform(occupation_x_age) occupation_tensor = transformer.transform(occupation) age_buckets_tensor = transformer.transform(age_buckets) """ def __init__(self, columns_to_tensors): """Initializes transfomer. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, `inflow` may have handled transformations. Transformed features are inserted in columns_to_tensors. """ self._columns_to_tensors = columns_to_tensors def transform(self, feature_column): """Returns a Tensor which represents given feature_column. Args: feature_column: An instance of FeatureColumn. Returns: A Tensor which represents given feature_column. It may create a new Tensor or re-use an existing one. Raises: ValueError: if FeatureColumn cannot be handled by this Transformer. """ logging.debug('Transforming feature_column %s', feature_column) if feature_column in self._columns_to_tensors: # Feature_column is already transformed. return self._columns_to_tensors[feature_column] feature_column.insert_transformed_feature(self._columns_to_tensors) if feature_column not in self._columns_to_tensors: raise ValueError('Column {} is not supported.'.format( feature_column.name)) return self._columns_to_tensors[feature_column] def _add_variable_collection(weight_collections): if weight_collections: weight_collections = list( set(list(weight_collections) + [ops.GraphKeys.VARIABLES])) return weight_collections # TODO(jamieas): remove the following logic once all FeatureColumn types are # supported for sequences. # pylint: disable=protected-access _SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn, fc._EmbeddingColumn, fc._RealValuedColumn) _FORBIDDEN_SEQUENCE_COLUMNS = (fc._HashedEmbeddingColumn, fc._BucketizedColumn, fc._CrossedColumn) def _check_supported_sequence_columns(feature_columns): """Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`.""" for col in feature_columns: if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS): raise ValueError( 'FeatureColumn type {} is not currently supported for sequence data.'. format(type(col).__name__)) def _get_parent_columns(feature_column): """Returns the tuple of `FeatureColumn`s that `feature_column` depends on.""" if isinstance(feature_column, (fc._WeightedSparseColumn, fc._OneHotColumn, fc._EmbeddingColumn,)): return (feature_column.sparse_id_column,) if isinstance(feature_column, (fc._BucketizedColumn,)): return (feature_column.source_column,) if isinstance(feature_column, (fc._CrossedColumn)): return tuple(feature_column.columns) return tuple() def _gather_feature_columns(feature_columns): """Returns a list of all ancestor `FeatureColumns` of `feature_columns`.""" gathered = list(feature_columns) i = 0 while i < len(gathered): for column in _get_parent_columns(gathered[i]): if column not in gathered: gathered.append(column) i += 1 return gathered def _check_forbidden_sequence_columns(feature_columns): """Recursively cecks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`.""" all_feature_columns = _gather_feature_columns(feature_columns) for feature_column in all_feature_columns: if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS): raise ValueError( 'Column {} is of type {}, which is not currently supported for ' 'sequences.'.format(feature_column.name, type(feature_column).__name__))
apache-2.0
WuPei/cv_reconstructor
main.py
1
4437
import fileManager as fm import camera import numpy as np import cv2 import shader as pts_shader import os import timeit from texture import Mesh from SortBuilding import SortBuilding as sb import math def sortBasedOnZ(mylist,refer_list): return [x for (y,x) in sorted(zip(refer_list,mylist),key = lambda pair:pair[0],reverse = False)] def sortMeshes(points,rgb_values,camera_pos): #STORE EACH FOUR PONITS INTO A MESH print "-----start sorting the meshes for frame:"+str(t)+"------" meshlist = [] meshz = [] for i in range(0, len(points), 4): mesh = Mesh([points[i], points[i+1], points[i+2], points[i+3]], [rgb_values[i], rgb_values[i+1], rgb_values[i+2], rgb_values[i+3]]) meshlist.append(mesh) meshz.append(mesh.getZ(camera_pos)) #Sort Meshes meshlist = sortBasedOnZ(meshlist, meshz) #Mesh back to points points = [] rgb_values = [] for i in range(len(meshlist)): p1, p2, p3, p4 = meshlist[i].getPoints() c1, c2, c3, c4 = meshlist[i].getColors() points.append(p1) points.append(p2) points.append(p3) points.append(p4) rgb_values.append(c1) rgb_values.append(c2) rgb_values.append(c3) rgb_values.append(c4) return points,rgb_values def generate1stPath(frame_num): cam_pos = [camera_pos for x in range(frame_num)] cam_ori = [camera_ori for x in range(frame_num)] for each_angle in range(frame_num): cam = camera.Camera(points, camera_pos, camera_ori, 1) cam.rotateCamera(y_axis,-37.2+each_angle*1*37.2/56) theta = each_angle*1.0/180*np.pi alpha = math.atan(200.0/300) beta= alpha + theta R = 300.0/math.cos(alpha) cam_ori[each_angle] = cam.ori_mat cam_pos[each_angle] = [0,300-R*math.cos(beta),0,-R*math.sin(beta)] #print "x,z",300-R*math.cos(beta),-R*math.sin(beta) return cam_pos,cam_ori def generate2ndPath(frame_num): camera_pos = [0, 300, 0, -200] cam_pos = [camera_pos for x in range(frame_num)] cam_ori = [camera_ori for x in range(frame_num)] for each_angle in range(frame_num): cam = camera.Camera(points, camera_pos, camera_ori, 1) cam.rotateCamera(x_axis,-each_angle*1*60.0/90) theta = each_angle*2.0/180*np.pi R = 200.0 cam_ori[each_angle] = cam.ori_mat cam_pos[each_angle] = [0,300,(200-R*math.cos(theta)),-200-R*math.sin(theta)] return cam_pos,cam_ori width = 1632 height = 1224 y_axis = [0, 1, 0] x_axis = [1, 0, 0] file = fm.FileManager() dir = 'Models/' outDir = "output/" skyDir = "sky.png" frame_num = 90 #90 for 2nd path #112 for 1st path previous_img = [cv2.imread(skyDir,cv2.CV_LOAD_IMAGE_COLOR) for i in range(frame_num)] camera_pos = [0, 0, 0, -200] I = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] camera_ori = np.matrix(I) points = [] sort_build = sb() #cam_pos,cam_ori = generate1stPath(frame_num) cam_pos,cam_ori = generate2ndPath(frame_num) mode = 1 init = 0 end = frame_num print "initial:",init,"end:",end for t in range(init,end): result = sort_build.SortBuildings(cam_pos[t]) sorted_models = [x for [x,y] in result] print "sorted models:",sorted_models for index in range(len(sorted_models)): fileindex = sorted_models[index] filename = os.path.join(dir,"model_"+str(fileindex)+".dat") print "----------get projected points ",filename,"-------------" points, rgb_values = file.importPointsWithRGB(filename) start = timeit.default_timer() #sort the points based on meshes points, rgb_values = sortMeshes(points,rgb_values,cam_pos[t]) #create the camera cam = camera.Camera(points, cam_pos[t], cam_ori[t], 1) print "----------get projected points-------------" x_cords, y_cords ,z_cords = cam.getProjectedPts(height, width) #shading the projected points shader = pts_shader.Shader(width, height,previous_img[t]) print "----projected poitns generated-----" out_img = shader.shading(x_cords,y_cords,rgb_values,mode) print "Processing Time:", timeit.default_timer()-start,"s" print "-----points shaded------------------" #write all the middle frames cv2.imwrite(os.path.join(outDir,"frame_"+str(t)+"_img_"+str(index)+".png"),out_img) print "---img_"+str(t)+"_frame_"+str(index)+".png created!" previous_img[t] = out_img #this main file is used to create the middle frames, for creating a video, please use makingVideoMain for i in range(init,end): cv2.imwrite(os.path.join(outDir,"result_"+str(i)+".png"),previous_img[i]) print "------------Main Phase Finished------------" print "--All Results has been output, please use makingVideoMain.py to generate a video--"
mit
neilhan/tensorflow
tensorflow/tensorboard/tensorboard.py
7
5713
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Serve TensorFlow summary data to a web frontend. This is a simple web server to proxy data from the event_loader to the web, and serve static web files. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import socket from tensorflow.python.platform import app from tensorflow.python.platform import flags from tensorflow.python.platform import resource_loader from tensorflow.python.platform import status_bar from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import event_file_inspector as efi from tensorflow.python.summary import event_multiplexer from tensorflow.tensorboard.backend import server flags.DEFINE_string('logdir', '', """logdir specifies the directory where TensorBoard will look to find TensorFlow event files that it can display. TensorBoard will recursively walk the directory structure rooted at logdir, looking for .*tfevents.* files. You may also pass a comma separated list of log directories, and TensorBoard will watch each directory. You can also assign names to individual log directories by putting a colon between the name and the path, as in tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2 """) flags.DEFINE_boolean('debug', False, 'Whether to run the app in debug mode. ' 'This increases log verbosity to DEBUG.') flags.DEFINE_string('host', '0.0.0.0', 'What host to listen to. Defaults to ' 'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to' 'disable remote access (also quiets security warnings).') flags.DEFINE_boolean('inspect', False, """Use this flag to print out a digest of your event files to the command line, when no data is shown on TensorBoard or the data shown looks weird. Example usages: tensorboard --inspect --event_file=myevents.out tensorboard --inspect --event_file=myevents.out --tag=loss tensorboard --inspect --logdir=mylogdir tensorboard --inspect --logdir=mylogdir --tag=loss See tensorflow/python/summary/event_file_inspector.py for more info and detailed usage. """) flags.DEFINE_string( 'tag', '', 'The particular tag to query for. Only used if --inspect is present') flags.DEFINE_string( 'event_file', '', 'The particular event file to query for. Only used if --inspect is present ' 'and --logdir is not specified.') flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.') flags.DEFINE_boolean('purge_orphaned_data', True, 'Whether to purge data that ' 'may have been orphaned due to TensorBoard restarts. ' 'Disabling purge_orphaned_data can be used to debug data ' 'disappearance.') flags.DEFINE_integer('reload_interval', 60, 'How often the backend should load ' 'more data.') FLAGS = flags.FLAGS def main(unused_argv=None): logdir = os.path.expanduser(FLAGS.logdir) event_file = os.path.expanduser(FLAGS.event_file) if FLAGS.debug: logging.set_verbosity(logging.DEBUG) logging.info('TensorBoard is in debug mode.') if FLAGS.inspect: logging.info('Not bringing up TensorBoard, but inspecting event files.') efi.inspect(logdir, event_file, FLAGS.tag) return 0 if not logdir: msg = ('A logdir must be specified. Run `tensorboard --help` for ' 'details and examples.') logging.error(msg) print(msg) return -1 logging.info('Starting TensorBoard in directory %s', os.getcwd()) path_to_run = server.ParseEventFilesSpec(logdir) logging.info('TensorBoard path_to_run is: %s', path_to_run) multiplexer = event_multiplexer.EventMultiplexer( size_guidance=server.TENSORBOARD_SIZE_GUIDANCE, purge_orphaned_data=FLAGS.purge_orphaned_data) server.StartMultiplexerReloadingThread(multiplexer, path_to_run, FLAGS.reload_interval) try: tb_server = server.BuildServer(multiplexer, FLAGS.host, FLAGS.port) except socket.error: if FLAGS.port == 0: msg = 'Unable to find any open ports.' logging.error(msg) print(msg) return -2 else: msg = 'Tried to connect to port %d, but address is in use.' % FLAGS.port logging.error(msg) print(msg) return -3 try: tag = resource_loader.load_resource('tensorboard/TAG').strip() logging.info('TensorBoard is tag: %s', tag) except IOError: logging.info('Unable to read TensorBoard tag') tag = '' status_bar.SetupStatusBarInsideGoogle('TensorBoard %s' % tag, FLAGS.port) print('Starting TensorBoard %s on port %d' % (tag, FLAGS.port)) if FLAGS.host == "0.0.0.0": try: host = socket.gethostbyname(socket.gethostname()) print('(You can navigate to http://%s:%d)' % (host, FLAGS.port)) except socket.gaierror: pass else: print('(You can navigate to http://%s:%d)' % (FLAGS.host, FLAGS.port)) tb_server.serve_forever() if __name__ == '__main__': app.run()
apache-2.0
librasungirl/openthread
tools/harness-automation/cases/border_7_1_3.py
9
1876
#!/usr/bin/env python # # Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # from autothreadharness.harness_case import HarnessCase import unittest class Border_7_1_3(HarnessCase): role = HarnessCase.ROLE_BORDER case = '7 1 3' golden_devices_required = 3 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
bsd-3-clause
wolfv/uberwriter
uberwriter/config.py
1
2377
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # Copyright (C) 2019, Wolf Vollprecht <w.vollprecht@gmail.com> # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. ### END LICENSE ### DO NOT EDIT THIS FILE ### __all__ = [ 'project_path_not_found', 'get_data_file', 'get_data_path', ] # Where your project will look for your data (for instance, images and ui # files). By default, this is ../data, relative your trunk layout __uberwriter_data_directory__ = '../data/' __license__ = 'GPL-3' __version__ = 'VERSION' import os class ProjectPathNotFound(Exception): """Raised when we can't find the project directory.""" def get_data_file(*path_segments): """Get the full path to a data file. Returns the path to a file underneath the data directory (as defined by `get_data_path`). Equivalent to os.path.join(get_data_path(), *path_segments). """ return os.path.join(get_data_path(), *path_segments) def get_data_path(): """Retrieve uberwriter data path This path is by default <uberwriter_path>/../data/ in trunk and /opt/uberwriter/data in an installed version but this path is specified at installation time. """ # Get pathname absolute or relative. if os.path.isfile("/.flatpak-info"): return '/app/share/uberwriter/' path = os.path.join( os.path.dirname(__file__), __uberwriter_data_directory__) # We try first if the data exists in the local folder and then # in the system installation path abs_data_path = os.path.abspath(path) if not os.path.exists(abs_data_path): abs_data_path = '/usr/share/uberwriter/' elif not os.path.exists(abs_data_path): raise ProjectPathNotFound return abs_data_path def get_version(): return __version__
gpl-3.0
cloudbase/nova
nova/api/openstack/compute/suspend_server.py
4
3292
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute from nova import exception from nova.policies import suspend_server as ss_policies ALIAS = "os-suspend-server" class SuspendServerController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SuspendServerController, self).__init__(*args, **kwargs) self.compute_api = compute.API() @wsgi.response(202) @extensions.expected_errors((404, 409)) @wsgi.action('suspend') def _suspend(self, req, id, body): """Permit admins to suspend the server.""" context = req.environ['nova.context'] try: server = common.get_instance(self.compute_api, context, id) context.can(ss_policies.POLICY_ROOT % 'suspend', target={'user_id': server.user_id, 'project_id': server.project_id}) self.compute_api.suspend(context, server) except exception.InstanceUnknownCell as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'suspend', id) @wsgi.response(202) @extensions.expected_errors((404, 409)) @wsgi.action('resume') def _resume(self, req, id, body): """Permit admins to resume the server from suspend.""" context = req.environ['nova.context'] context.can(ss_policies.POLICY_ROOT % 'resume') try: server = common.get_instance(self.compute_api, context, id) self.compute_api.resume(context, server) except exception.InstanceUnknownCell as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'resume', id) class SuspendServer(extensions.V21APIExtensionBase): """Enable suspend/resume server actions.""" name = "SuspendServer" alias = ALIAS version = 1 def get_controller_extensions(self): controller = SuspendServerController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
apache-2.0
ctag/cpe453
JMRI/jython/javaone/signalstop.py
21
1157
# stop on signals def setStopBlock(block, signal,dir) : s = signals.getSignalHead(signal) b = jmri.jmrit.tracker.StoppingBlock(block) b.addSignal(s,dir) return b def setStopBlock2(block, signal1, signal2, dir) : s1 = signals.getSignalHead(signal1) s2 = signals.getSignalHead(signal2) b = jmri.jmrit.tracker.StoppingBlock(block) b.addSignal(s1, s2, dir) return b SB157 = setStopBlock2(IB157,"200 Facing Upper", "200 Facing Lower", CCW) SB158 = setStopBlock(IB158,"200 Main",CW) SB159 = setStopBlock(IB159,"200 Siding",CW) SB150 = setStopBlock2(IB150,"201 Facing Upper", "201 Facing Lower", CW) SB160 = setStopBlock(IB160,"201 Main",CCW) SB161 = setStopBlock(IB161,"201 Siding",CCW) SB156 = setStopBlock2(IB156,"202 Facing Upper", "202 Facing Lower", CW) SB154 = setStopBlock(IB154,"202 Main",CCW) SB155 = setStopBlock(IB155,"202 Siding",CCW) SB151 = setStopBlock2(IB151,"203 Facing Upper", "203 Facing Lower", CCW) SB152 = setStopBlock(IB152,"203 Main",CW) SB153 = setStopBlock(IB153,"203 Siding",CW) stopblocks = [SB157, SB158, SB159, SB150, SB160, SB161, SB156, SB154, SB155, SB151, SB152, SB153]
gpl-2.0
danimaribeiro/PyTrustNFe
tests/test_nfse_paulistana_tipo_cpfcnpj.py
1
1842
# coding=utf-8 import os.path import unittest from pytrustnfe.xml import render_xml, sanitize_response from tests.const import DEFAULT_RPS, NFSE template_path = 'pytrustnfe/nfse/paulistana/templates' def _get_nfse(tipo_cpfcnpj): nfse = NFSE lista_rps = DEFAULT_RPS for rps in lista_rps: rps['tomador']['tipo_cpfcnpj'] = tipo_cpfcnpj rps['tomador']['cpf_cnpj'] = '12345678923256' nfse['lista_rps'] = lista_rps return nfse def get_objects(nfse): xml_rps = render_xml(template_path, 'EnvioRPS.xml', False, nfse=nfse) _, obj_rps = sanitize_response(xml_rps) xml_lote_rps = render_xml(template_path, 'EnvioLoteRPS.xml', False, nfse=nfse) _, obj_lote_rps = sanitize_response(xml_lote_rps) return obj_rps, obj_lote_rps class test_nfse_paulistana_tipo_cpfcnpj(unittest.TestCase): def test_tipo_cpfcnpj_1(self): nfse = _get_nfse(tipo_cpfcnpj=1) obj_rps, obj_lote_rps = get_objects(nfse) self.assertTrue(hasattr(obj_rps.RPS, 'CPFCNPJTomador')) self.assertTrue(hasattr(obj_rps.RPS.CPFCNPJTomador, 'CPF')) self.assertTrue(hasattr(obj_lote_rps.RPS, 'CPFCNPJTomador')) self.assertTrue(hasattr(obj_lote_rps.RPS.CPFCNPJTomador, 'CPF')) def test_tipo_cpfcnpj_2(self): nfse = _get_nfse(tipo_cpfcnpj=2) obj_rps, obj_lote_rps = get_objects(nfse) self.assertTrue(hasattr(obj_rps.RPS, 'CPFCNPJTomador')) self.assertTrue(hasattr(obj_rps.RPS.CPFCNPJTomador, 'CNPJ')) self.assertTrue(hasattr(obj_lote_rps.RPS, 'CPFCNPJTomador')) self.assertTrue(hasattr(obj_lote_rps.RPS.CPFCNPJTomador, 'CNPJ')) def test_tipo_cpfcnpj_3(self): nfse = _get_nfse(tipo_cpfcnpj=3) obj_rps, obj_lote_rps = get_objects(nfse) self.assertFalse(hasattr(obj_rps.RPS, 'CPFCNPJTomador'))
agpl-3.0
snowzjx/ns3-buffer-management
src/nix-vector-routing/bindings/modulegen__gcc_ILP32.py
34
388692
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.nix_vector_routing', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] module.add_class('Inet6SocketAddress', import_from_module='ns.network') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] module.add_class('InetSocketAddress', import_from_module='ns.network') ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class] module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## node-list.h (module 'network'): ns3::NodeList [class] module.add_class('NodeList', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration] module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-nix-vector-helper.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorHelper [class] module.add_class('Ipv4NixVectorHelper', parent=root_module['ns3::Ipv4RoutingHelper']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::Ipv6MulticastFilterMode [enumeration] module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTosTag [class] module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class] module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class] module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel.h (module 'network'): ns3::Channel [class] module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## net-device.h (module 'network'): ns3::NetDeviceQueue [class] module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class] module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## net-device.h (module 'network'): ns3::QueueItem [class] module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class] module.add_class('BridgeChannel', import_from_module='ns.bridge', parent=root_module['ns3::Channel']) ## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class] module.add_class('BridgeNetDevice', import_from_module='ns.bridge', parent=root_module['ns3::NetDevice']) ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting [class] module.add_class('Ipv4ListRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol']) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorRouting [class] module.add_class('Ipv4NixVectorRouting', parent=root_module['ns3::Ipv4RoutingProtocol']) module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >', u'ns3::NixMap_t') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >*', u'ns3::NixMap_t*') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >&', u'ns3::NixMap_t&') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >', u'ns3::Ipv4RouteMap_t') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >*', u'ns3::Ipv4RouteMap_t*') typehandlers.add_type_alias(u'std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >&', u'ns3::Ipv4RouteMap_t&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3NodeList_methods(root_module, root_module['ns3::NodeList']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Ipv4NixVectorHelper_methods(root_module, root_module['ns3::Ipv4NixVectorHelper']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag']) register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue']) register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel']) register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice']) register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting']) register_Ns3Ipv4NixVectorRouting_methods(root_module, root_module['ns3::Ipv4NixVectorRouting']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Inet6SocketAddress_methods(root_module, cls): ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor] cls.add_constructor([param('char const *', 'ipv6')]) ## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function] cls.add_method('ConvertFrom', 'ns3::Inet6SocketAddress', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function] cls.add_method('GetIpv6', 'ns3::Ipv6Address', [], is_const=True) ## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function] cls.add_method('SetIpv6', 'void', [param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3InetSocketAddress_methods(root_module, cls): ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor] cls.add_constructor([param('char const *', 'ipv4')]) ## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::InetSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function] cls.add_method('GetIpv4', 'ns3::Ipv4Address', [], is_const=True) ## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ipv4Address', 'address')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv4RoutingHelper_methods(root_module, cls): ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor] cls.add_constructor([]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintNeighborCacheEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) ## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_static=True) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeList_methods(root_module, cls): ## node-list.h (module 'network'): ns3::NodeList::NodeList() [constructor] cls.add_constructor([]) ## node-list.h (module 'network'): ns3::NodeList::NodeList(ns3::NodeList const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeList const &', 'arg0')]) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Node >', 'node')], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::GetNNodes() [member function] cls.add_method('GetNNodes', 'uint32_t', [], is_static=True) ## node-list.h (module 'network'): static ns3::Ptr<ns3::Node> ns3::NodeList::GetNode(uint32_t n) [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'n')], is_static=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'delay')], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Ipv4Header_methods(root_module, cls): ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')]) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor] cls.add_constructor([]) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function] cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function] cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function] cls.add_method('EnableChecksum', 'void', []) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function] cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function] cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function] cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function] cls.add_method('GetIdentification', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function] cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function] cls.add_method('IsChecksumOk', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function] cls.add_method('IsDontFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function] cls.add_method('IsLastFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function] cls.add_method('SetDontFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function] cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function] cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function] cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function] cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function] cls.add_method('SetLastFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function] cls.add_method('SetMayFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function] cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function] cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3Ipv4NixVectorHelper_methods(root_module, cls): ## ipv4-nix-vector-helper.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorHelper::Ipv4NixVectorHelper() [constructor] cls.add_constructor([]) ## ipv4-nix-vector-helper.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorHelper::Ipv4NixVectorHelper(ns3::Ipv4NixVectorHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4NixVectorHelper const &', 'arg0')]) ## ipv4-nix-vector-helper.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorHelper * ns3::Ipv4NixVectorHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4NixVectorHelper *', [], is_const=True, is_virtual=True) ## ipv4-nix-vector-helper.h (module 'nix-vector-routing'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4NixVectorHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function] cls.add_method('GetIpTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function] cls.add_method('GetIpTtl', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function] cls.add_method('GetIpv6HopLimit', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function] cls.add_method('GetIpv6Tclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetPeerName(ns3::Address & address) const [member function] cls.add_method('GetPeerName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address, ns3::Socket::Ipv6MulticastFilterMode filterMode, std::vector<ns3::Ipv6Address,std::allocator<ns3::Ipv6Address> > sourceAddresses) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address'), param('ns3::Socket::Ipv6MulticastFilterMode', 'filterMode'), param('std::vector< ns3::Ipv6Address >', 'sourceAddresses')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6LeaveGroup() [member function] cls.add_method('Ipv6LeaveGroup', 'void', [], is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function] cls.add_method('IsIpRecvTos', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function] cls.add_method('IsIpRecvTtl', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function] cls.add_method('IsIpv6RecvHopLimit', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function] cls.add_method('IsIpv6RecvTclass', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function] cls.add_method('IsRecvPktInfo', 'bool', [], is_const=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function] cls.add_method('SetIpRecvTos', 'void', [param('bool', 'ipv4RecvTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function] cls.add_method('SetIpRecvTtl', 'void', [param('bool', 'ipv4RecvTtl')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function] cls.add_method('SetIpTos', 'void', [param('uint8_t', 'ipTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function] cls.add_method('SetIpTtl', 'void', [param('uint8_t', 'ipTtl')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function] cls.add_method('SetIpv6HopLimit', 'void', [param('uint8_t', 'ipHopLimit')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function] cls.add_method('SetIpv6RecvHopLimit', 'void', [param('bool', 'ipv6RecvHopLimit')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function] cls.add_method('SetIpv6RecvTclass', 'void', [param('bool', 'ipv6RecvTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function] cls.add_method('SetIpv6Tclass', 'void', [param('int', 'ipTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsManualIpTos() const [member function] cls.add_method('IsManualIpTos', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function] cls.add_method('IsManualIpTtl', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function] cls.add_method('IsManualIpv6HopLimit', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function] cls.add_method('IsManualIpv6Tclass', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketAddressTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')]) return def register_Ns3SocketIpTosTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'hopLimit')]) return def register_Ns3SocketIpv6TclassTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function] cls.add_method('GetTclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function] cls.add_method('SetTclass', 'void', [param('uint8_t', 'tclass')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3Channel_methods(root_module, cls): ## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor] cls.add_constructor([param('ns3::Channel const &', 'arg0')]) ## channel.h (module 'network'): ns3::Channel::Channel() [constructor] cls.add_constructor([]) ## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4_methods(root_module, cls): ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')]) ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor] cls.add_constructor([]) ## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'interface'), param('bool', 'val')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'interface'), param('uint16_t', 'metric')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function] cls.add_method('SourceAddressSelection', 'ns3::Ipv4Address', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable] cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], is_pure_virtual=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv4MulticastRoute_methods(root_module, cls): ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function] cls.add_method('GetGroup', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function] cls.add_method('GetOutputTtlMap', 'std::map< unsigned int, unsigned int >', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function] cls.add_method('GetParent', 'uint32_t', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function] cls.add_method('SetGroup', 'void', [param('ns3::Ipv4Address const', 'group')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address const', 'origin')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function] cls.add_method('SetOutputTtl', 'void', [param('uint32_t', 'oif'), param('uint32_t', 'ttl')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function] cls.add_method('SetParent', 'void', [param('uint32_t', 'iif')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable] cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable] cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True) return def register_Ns3Ipv4Route_methods(root_module, cls): cls.add_output_stream_operator() ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function] cls.add_method('GetGateway', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'dest')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function] cls.add_method('SetGateway', 'void', [param('ns3::Ipv4Address', 'gw')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'src')]) return def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls): ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor] cls.add_constructor([]) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')]) ## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NetDeviceQueue_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::HasWakeCallbackSet() const [member function] cls.add_method('HasWakeCallbackSet', 'bool', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function] cls.add_method('IsStopped', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetWakeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function] cls.add_method('Start', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function] cls.add_method('Stop', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function] cls.add_method('Wake', 'void', [], is_virtual=True) return def register_Ns3NetDeviceQueueInterface_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetSelectedQueue(ns3::Ptr<ns3::QueueItem> item) const [member function] cls.add_method('GetSelectedQueue', 'uint8_t', [param('ns3::Ptr< ns3::QueueItem >', 'item')], is_const=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function] cls.add_method('GetTxQueue', 'ns3::Ptr< ns3::NetDeviceQueue >', [param('uint8_t', 'i')], is_const=True) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetTxQueuesN() const [member function] cls.add_method('GetTxQueuesN', 'uint8_t', [], is_const=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDeviceQueueInterface::IsQueueDiscInstalled() const [member function] cls.add_method('IsQueueDiscInstalled', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetQueueDiscInstalled(bool installed) [member function] cls.add_method('SetQueueDiscInstalled', 'void', [param('bool', 'installed')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetSelectQueueCallback', 'void', [param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function] cls.add_method('SetTxQueuesN', 'void', [param('uint8_t', 'numTxQueues')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function] cls.add_method('GetLocalTime', 'ns3::Time', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3QueueItem_methods(root_module, cls): cls.add_output_stream_operator() ## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')]) ## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function] cls.add_method('GetPacketSize', 'uint32_t', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3BridgeChannel_methods(root_module, cls): ## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor] cls.add_constructor([]) ## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function] cls.add_method('AddChannel', 'void', [param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')]) ## bridge-channel.h (module 'bridge'): uint32_t ns3::BridgeChannel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True) ## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True) return def register_Ns3BridgeNetDevice_methods(root_module, cls): ## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor] cls.add_constructor([]) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function] cls.add_method('AddBridgePort', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')]) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function] cls.add_method('GetBridgePort', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'n')], is_const=True) ## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function] cls.add_method('GetNBridgePorts', 'uint32_t', [], is_const=True) ## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function] cls.add_method('ForwardBroadcast', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')], visibility='protected') ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function] cls.add_method('ForwardUnicast', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')], visibility='protected') ## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function] cls.add_method('GetLearnedState', 'ns3::Ptr< ns3::NetDevice >', [param('ns3::Mac48Address', 'source')], visibility='protected') ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function] cls.add_method('Learn', 'void', [param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')], visibility='protected') ## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('ReceiveFromDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')], visibility='protected') return def register_Ns3Ipv4ListRouting_methods(root_module, cls): ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting(ns3::Ipv4ListRouting const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4ListRouting const &', 'arg0')]) ## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting() [constructor] cls.add_constructor([]) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::AddRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol, int16_t priority) [member function] cls.add_method('AddRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol'), param('int16_t', 'priority')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): uint32_t ns3::Ipv4ListRouting::GetNRoutingProtocols() const [member function] cls.add_method('GetNRoutingProtocols', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('uint32_t', 'index'), param('int16_t &', 'priority', direction=2)], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4ListRouting::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True, is_virtual=True) ## ipv4-list-routing.h (module 'internet'): bool ns3::Ipv4ListRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4ListRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Ipv4NixVectorRouting_methods(root_module, cls): ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting(ns3::Ipv4NixVectorRouting const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4NixVectorRouting const &', 'arg0')]) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting() [constructor] cls.add_constructor([]) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::FlushGlobalNixRoutingCache() const [member function] cls.add_method('FlushGlobalNixRoutingCache', 'void', [], is_const=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): static ns3::TypeId ns3::Ipv4NixVectorRouting::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True, visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): bool ns3::Ipv4NixVectorRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4NixVectorRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], visibility='private', is_virtual=True) ## ipv4-nix-vector-routing.h (module 'nix-vector-routing'): void ns3::Ipv4NixVectorRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], visibility='private', is_virtual=True) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
awohns/selection
python_lib/lib/python3.4/site-packages/pip/locations.py
390
6202
"""Locations where we look for configs, install stuff, etc""" import sys import site import os import tempfile from distutils.command.install import install, SCHEME_KEYS import getpass from pip.backwardcompat import get_python_lib, get_path_uid, user_site import pip.exceptions DELETE_MARKER_MESSAGE = '''\ This file is placed here by pip to indicate the source was put here by pip. Once this package is successfully installed this source code will be deleted (unless you remove this file). ''' PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' def write_delete_marker_file(directory): """ Write the pip delete marker file into this directory. """ filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) marker_fp = open(filepath, 'w') marker_fp.write(DELETE_MARKER_MESSAGE) marker_fp.close() def running_under_virtualenv(): """ Return True if we're running inside a virtualenv, False otherwise. """ if hasattr(sys, 'real_prefix'): return True elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): return True return False def virtualenv_no_global(): """ Return True if in a venv and no system site packages. """ #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') if running_under_virtualenv() and os.path.isfile(no_global_file): return True def __get_username(): """ Returns the effective username of the current process. """ if sys.platform == 'win32': return getpass.getuser() import pwd return pwd.getpwuid(os.geteuid()).pw_name def _get_build_prefix(): """ Returns a safe build_prefix """ path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' % __get_username()) if sys.platform == 'win32': """ on windows(tested on 7) temp dirs are isolated """ return path try: os.mkdir(path) write_delete_marker_file(path) except OSError: file_uid = None try: # raises OSError for symlinks # https://github.com/pypa/pip/pull/935#discussion_r5307003 file_uid = get_path_uid(path) except OSError: file_uid = None if file_uid != os.geteuid(): msg = "The temporary folder for building (%s) is either not owned by you, or is a symlink." \ % path print (msg) print("pip will not work until the temporary folder is " + \ "either deleted or is a real directory owned by your user account.") raise pip.exceptions.InstallationError(msg) return path if running_under_virtualenv(): build_prefix = os.path.join(sys.prefix, 'build') src_prefix = os.path.join(sys.prefix, 'src') else: # Note: intentionally NOT using mkdtemp # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp build_prefix = _get_build_prefix() ## FIXME: keep src in cwd for now (it is not a temporary folder) try: src_prefix = os.path.join(os.getcwd(), 'src') except OSError: # In case the current working directory has been renamed or deleted sys.exit("The folder you are executing pip from can no longer be found.") # under Mac OS X + virtualenv sys.prefix is not properly resolved # it is something like /path/to/python/bin/.. # Note: using realpath due to tmp dirs on OSX being symlinks build_prefix = os.path.abspath(os.path.realpath(build_prefix)) src_prefix = os.path.abspath(src_prefix) # FIXME doesn't account for venv linked to global site-packages site_packages = get_python_lib() user_dir = os.path.expanduser('~') if sys.platform == 'win32': bin_py = os.path.join(sys.prefix, 'Scripts') bin_user = os.path.join(user_site, 'Scripts') if user_site else None # buildout uses 'bin' on Windows too? if not os.path.exists(bin_py): bin_py = os.path.join(sys.prefix, 'bin') bin_user = os.path.join(user_site, 'bin') if user_site else None default_storage_dir = os.path.join(user_dir, 'pip') default_config_file = os.path.join(default_storage_dir, 'pip.ini') default_log_file = os.path.join(default_storage_dir, 'pip.log') else: bin_py = os.path.join(sys.prefix, 'bin') bin_user = os.path.join(user_site, 'bin') if user_site else None default_storage_dir = os.path.join(user_dir, '.pip') default_config_file = os.path.join(default_storage_dir, 'pip.conf') default_log_file = os.path.join(default_storage_dir, 'pip.log') # Forcing to use /usr/local/bin for standard Mac OS X framework installs # Also log to ~/Library/Logs/ for use with the Console.app log viewer if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': bin_py = '/usr/local/bin' default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log') def distutils_scheme(dist_name, user=False, home=None, root=None): """ Return a distutils install scheme """ from distutils.dist import Distribution scheme = {} d = Distribution({'name': dist_name}) d.parse_config_files() i = d.get_command_obj('install', create=True) # NOTE: setting user or home has the side-effect of creating the home dir or # user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. i.user = user or i.user i.home = home or i.home i.root = root or i.root i.finalize_options() for key in SCHEME_KEYS: scheme[key] = getattr(i, 'install_'+key) if running_under_virtualenv(): scheme['headers'] = os.path.join(sys.prefix, 'include', 'site', 'python' + sys.version[:3], dist_name) if root is not None: scheme["headers"] = os.path.join( root, os.path.abspath(scheme["headers"])[1:], ) return scheme
mit
avadacatavra/servo
tests/wpt/web-platform-tests/tools/third_party/py/py/_io/terminalwriter.py
54
13486
""" Helper functions for writing to terminals and files. """ import sys, os import py py3k = sys.version_info[0] >= 3 from py.builtin import text, bytes win32_and_ctypes = False colorama = None if sys.platform == "win32": try: import colorama except ImportError: try: import ctypes win32_and_ctypes = True except ImportError: pass def _getdimensions(): import termios,fcntl,struct call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8) height,width = struct.unpack( "hhhh", call ) [:2] return height, width def get_terminal_width(): width = 0 try: _, width = _getdimensions() except py.builtin._sysex: raise except: # pass to fallback below pass if width == 0: # FALLBACK: # * some exception happened # * or this is emacs terminal which reports (0,0) width = int(os.environ.get('COLUMNS', 80)) # XXX the windows getdimensions may be bogus, let's sanify a bit if width < 40: width = 80 return width terminal_width = get_terminal_width() # XXX unify with _escaped func below def ansi_print(text, esc, file=None, newline=True, flush=False): if file is None: file = sys.stderr text = text.rstrip() if esc and not isinstance(esc, tuple): esc = (esc,) if esc and sys.platform != "win32" and file.isatty(): text = (''.join(['\x1b[%sm' % cod for cod in esc]) + text + '\x1b[0m') # ANSI color code "reset" if newline: text += '\n' if esc and win32_and_ctypes and file.isatty(): if 1 in esc: bold = True esc = tuple([x for x in esc if x != 1]) else: bold = False esctable = {() : FOREGROUND_WHITE, # normal (31,): FOREGROUND_RED, # red (32,): FOREGROUND_GREEN, # green (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow (34,): FOREGROUND_BLUE, # blue (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan (37,): FOREGROUND_WHITE, # white (39,): FOREGROUND_WHITE, # reset } attr = esctable.get(esc, FOREGROUND_WHITE) if bold: attr |= FOREGROUND_INTENSITY STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 if file is sys.stderr: handle = GetStdHandle(STD_ERROR_HANDLE) else: handle = GetStdHandle(STD_OUTPUT_HANDLE) oldcolors = GetConsoleInfo(handle).wAttributes attr |= (oldcolors & 0x0f0) SetConsoleTextAttribute(handle, attr) while len(text) > 32768: file.write(text[:32768]) text = text[32768:] if text: file.write(text) SetConsoleTextAttribute(handle, oldcolors) else: file.write(text) if flush: file.flush() def should_do_markup(file): if os.environ.get('PY_COLORS') == '1': return True if os.environ.get('PY_COLORS') == '0': return False return hasattr(file, 'isatty') and file.isatty() \ and os.environ.get('TERM') != 'dumb' \ and not (sys.platform.startswith('java') and os._name == 'nt') class TerminalWriter(object): _esctable = dict(black=30, red=31, green=32, yellow=33, blue=34, purple=35, cyan=36, white=37, Black=40, Red=41, Green=42, Yellow=43, Blue=44, Purple=45, Cyan=46, White=47, bold=1, light=2, blink=5, invert=7) # XXX deprecate stringio argument def __init__(self, file=None, stringio=False, encoding=None): if file is None: if stringio: self.stringio = file = py.io.TextIO() else: from sys import stdout as file elif py.builtin.callable(file) and not ( hasattr(file, "write") and hasattr(file, "flush")): file = WriteFile(file, encoding=encoding) if hasattr(file, "isatty") and file.isatty() and colorama: file = colorama.AnsiToWin32(file).stream self.encoding = encoding or getattr(file, 'encoding', "utf-8") self._file = file self.hasmarkup = should_do_markup(file) self._lastlen = 0 self._chars_on_current_line = 0 @property def fullwidth(self): if hasattr(self, '_terminal_width'): return self._terminal_width return get_terminal_width() @fullwidth.setter def fullwidth(self, value): self._terminal_width = value @property def chars_on_current_line(self): """Return the number of characters written so far in the current line. Please note that this count does not produce correct results after a reline() call, see #164. .. versionadded:: 1.5.0 :rtype: int """ return self._chars_on_current_line def _escaped(self, text, esc): if esc and self.hasmarkup: text = (''.join(['\x1b[%sm' % cod for cod in esc]) + text +'\x1b[0m') return text def markup(self, text, **kw): esc = [] for name in kw: if name not in self._esctable: raise ValueError("unknown markup: %r" %(name,)) if kw[name]: esc.append(self._esctable[name]) return self._escaped(text, tuple(esc)) def sep(self, sepchar, title=None, fullwidth=None, **kw): if fullwidth is None: fullwidth = self.fullwidth # the goal is to have the line be as long as possible # under the condition that len(line) <= fullwidth if sys.platform == "win32": # if we print in the last column on windows we are on a # new line but there is no way to verify/neutralize this # (we may not know the exact line width) # so let's be defensive to avoid empty lines in the output fullwidth -= 1 if title is not None: # we want 2 + 2*len(fill) + len(title) <= fullwidth # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth # 2*len(sepchar)*N <= fullwidth - len(title) - 2 # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) N = (fullwidth - len(title) - 2) // (2*len(sepchar)) fill = sepchar * N line = "%s %s %s" % (fill, title, fill) else: # we want len(sepchar)*N <= fullwidth # i.e. N <= fullwidth // len(sepchar) line = sepchar * (fullwidth // len(sepchar)) # in some situations there is room for an extra sepchar at the right, # in particular if we consider that with a sepchar like "_ " the # trailing space is not important at the end of the line if len(line) + len(sepchar.rstrip()) <= fullwidth: line += sepchar.rstrip() self.line(line, **kw) def write(self, msg, **kw): if msg: if not isinstance(msg, (bytes, text)): msg = text(msg) self._update_chars_on_current_line(msg) if self.hasmarkup and kw: markupmsg = self.markup(msg, **kw) else: markupmsg = msg write_out(self._file, markupmsg) def _update_chars_on_current_line(self, text): fields = text.rsplit('\n', 1) if '\n' in text: self._chars_on_current_line = len(fields[-1]) else: self._chars_on_current_line += len(fields[-1]) def line(self, s='', **kw): self.write(s, **kw) self._checkfill(s) self.write('\n') def reline(self, line, **kw): if not self.hasmarkup: raise ValueError("cannot use rewrite-line without terminal") self.write(line, **kw) self._checkfill(line) self.write('\r') self._lastlen = len(line) def _checkfill(self, line): diff2last = self._lastlen - len(line) if diff2last > 0: self.write(" " * diff2last) class Win32ConsoleWriter(TerminalWriter): def write(self, msg, **kw): if msg: if not isinstance(msg, (bytes, text)): msg = text(msg) self._update_chars_on_current_line(msg) oldcolors = None if self.hasmarkup and kw: handle = GetStdHandle(STD_OUTPUT_HANDLE) oldcolors = GetConsoleInfo(handle).wAttributes default_bg = oldcolors & 0x00F0 attr = default_bg if kw.pop('bold', False): attr |= FOREGROUND_INTENSITY if kw.pop('red', False): attr |= FOREGROUND_RED elif kw.pop('blue', False): attr |= FOREGROUND_BLUE elif kw.pop('green', False): attr |= FOREGROUND_GREEN elif kw.pop('yellow', False): attr |= FOREGROUND_GREEN|FOREGROUND_RED else: attr |= oldcolors & 0x0007 SetConsoleTextAttribute(handle, attr) write_out(self._file, msg) if oldcolors: SetConsoleTextAttribute(handle, oldcolors) class WriteFile(object): def __init__(self, writemethod, encoding=None): self.encoding = encoding self._writemethod = writemethod def write(self, data): if self.encoding: data = data.encode(self.encoding, "replace") self._writemethod(data) def flush(self): return if win32_and_ctypes: TerminalWriter = Win32ConsoleWriter import ctypes from ctypes import wintypes # ctypes access to the Windows console STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 FOREGROUND_BLACK = 0x0000 # black text FOREGROUND_BLUE = 0x0001 # text color contains blue. FOREGROUND_GREEN = 0x0002 # text color contains green. FOREGROUND_RED = 0x0004 # text color contains red. FOREGROUND_WHITE = 0x0007 FOREGROUND_INTENSITY = 0x0008 # text color is intensified. BACKGROUND_BLACK = 0x0000 # background color black BACKGROUND_BLUE = 0x0010 # background color contains blue. BACKGROUND_GREEN = 0x0020 # background color contains green. BACKGROUND_RED = 0x0040 # background color contains red. BACKGROUND_WHITE = 0x0070 BACKGROUND_INTENSITY = 0x0080 # background color is intensified. SHORT = ctypes.c_short class COORD(ctypes.Structure): _fields_ = [('X', SHORT), ('Y', SHORT)] class SMALL_RECT(ctypes.Structure): _fields_ = [('Left', SHORT), ('Top', SHORT), ('Right', SHORT), ('Bottom', SHORT)] class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): _fields_ = [('dwSize', COORD), ('dwCursorPosition', COORD), ('wAttributes', wintypes.WORD), ('srWindow', SMALL_RECT), ('dwMaximumWindowSize', COORD)] _GetStdHandle = ctypes.windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [wintypes.DWORD] _GetStdHandle.restype = wintypes.HANDLE def GetStdHandle(kind): return _GetStdHandle(kind) SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD] SetConsoleTextAttribute.restype = wintypes.BOOL _GetConsoleScreenBufferInfo = \ ctypes.windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL def GetConsoleInfo(handle): info = CONSOLE_SCREEN_BUFFER_INFO() _GetConsoleScreenBufferInfo(handle, ctypes.byref(info)) return info def _getdimensions(): handle = GetStdHandle(STD_OUTPUT_HANDLE) info = GetConsoleInfo(handle) # Substract one from the width, otherwise the cursor wraps # and the ending \n causes an empty line to display. return info.dwSize.Y, info.dwSize.X - 1 def write_out(fil, msg): # XXX sometimes "msg" is of type bytes, sometimes text which # complicates the situation. Should we try to enforce unicode? try: # on py27 and above writing out to sys.stdout with an encoding # should usually work for unicode messages (if the encoding is # capable of it) fil.write(msg) except UnicodeEncodeError: # on py26 it might not work because stdout expects bytes if fil.encoding: try: fil.write(msg.encode(fil.encoding)) except UnicodeEncodeError: # it might still fail if the encoding is not capable pass else: fil.flush() return # fallback: escape all unicode characters msg = msg.encode("unicode-escape").decode("ascii") fil.write(msg) fil.flush()
mpl-2.0
ryfeus/lambda-packs
Selenium_PhantomJS/source/requests/packages/chardet/eucjpprober.py
2919
3678
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
mit
grischard/postcode-centroid
postcode-centroid.py
1
2908
#!/usr/bin/env python """ Return GeoJSON centroids for each postcode in Luxembourg - Downloads the latest geojson from UDATA_ADDRESSES - Average the position of all postcodes - Spit out geojson Run like : python3 postcode-centroid.py > centroids.geojson A sample centroids.geojson.xz (from 2016-07-11) is included. There are 63 postcodes in Luxembourg that contain only one address. Some of these are for residential addresses. You might want to consider merging these points with the nearest neighbour if you need to anonymise the output data of your project. """ import requests import sys import geojson from collections import defaultdict # The API endpoint that contains the link to the most recent version of the # addresses in all available formats (geojson, but also shp). UDATA_ADDRESSES = 'https://data.public.lu/api/1/datasets/adresses-georeferencees-bd-adresses/' # Eugh, magic numbers. # This is just the uuid for the addresses in geojson format. UDATA_ADDRESSES_ID = '7b58cf20-cbb0-4970-83f7-53a277f691b8' # Initialise postcodes = defaultdict(list) # Udata has no permalink. Parse the API to get the latest geojson. udata_json = requests.get(UDATA_ADDRESSES).json() # Find the resource with that ID in the udata json # i.e. our addresses for resource in udata_json['resources']: if resource['id'] == UDATA_ADDRESSES_ID: ADDRESSES_GEOJSON = resource['url'] break else: # Oops, the for loop didn't find anything! raise IOError("Could not find resource id {} in {}".format( UDATA_ADDRESSES_ID, UDATA_ADDRESSES )) # Downloading the addresses might take ~15 seconds. # In the meanwile, shake your wrists and correct your posture. addresses = requests.get(ADDRESSES_GEOJSON).json() # For all addresses, append coordinates to the list of coordinates # of this postcode. for address in addresses['features']: code_postal = address['properties']['code_postal'] coordinates = address['geometry']['coordinates'] #each coordinate is a tuple (long, lat) postcodes[code_postal].append(coordinates) # For all postcodes, calculate the centroid def rounded_location_generator(postcodes): for postcode, points in postcodes.items(): x, y = zip(*points) try: count = len(points) # round to 6 decimals centroid = ( round(sum(x) / count, 6), round(sum(y) / count, 6) ) except ZeroDivisionError: print("No address for postcode {}".format(postcode), file=sys.stderr) pass yield geojson.Feature( geometry=geojson.Point(centroid), properties={"postcode": postcode, "count": count} ) # Dump the json features as a FeatureCollection postcodes_centroids = list(rounded_location_generator(postcodes)) print(geojson.dumps(geojson.FeatureCollection(postcodes_centroids)))
apache-2.0
Opshun/API
venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py
451
11884
# Copyright 2014 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import collections import itertools import re from ._structures import Infinity __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" ] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"], ) def parse(version): """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. """ try: return Version(version) except InvalidVersion: return LegacyVersion(version) class InvalidVersion(ValueError): """ An invalid version was found, users should refer to PEP 440. """ class _BaseVersion(object): def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key) class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "<LegacyVersion({0})>".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile( r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, ) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse VERSION_PATTERN = r""" v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ class Version(_BaseVersion): _regex = re.compile( r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE, ) def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion("Invalid version: '{0}'".format(version)) # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version( match.group("pre_l"), match.group("pre_n"), ), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2"), ), dev=_parse_letter_version( match.group("dev_l"), match.group("dev_n"), ), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self): return "<Version({0})>".format(repr(str(self))) def __str__(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) # Pre-release if self._version.pre is not None: parts.append("".join(str(x) for x in self._version.pre)) # Post-release if self._version.post is not None: parts.append(".post{0}".format(self._version.post[1])) # Development release if self._version.dev is not None: parts.append(".dev{0}".format(self._version.dev[1])) # Local version segment if self._version.local is not None: parts.append( "+{0}".format(".".join(str(x) for x in self._version.local)) ) return "".join(parts) @property def public(self): return str(self).split("+", 1)[0] @property def base_version(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) return "".join(parts) @property def local(self): version_string = str(self) if "+" in version_string: return version_string.split("+", 1)[1] @property def is_prerelease(self): return bool(self._version.dev or self._version.pre) @property def is_postrelease(self): return bool(self._version.post) def _parse_letter_version(letter, number): if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) _local_version_seperators = re.compile(r"[\._-]") def _parse_local_version(local): """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local) ) def _cmpkey(epoch, release, pre, post, dev, local): # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. release = tuple( reversed(list( itertools.dropwhile( lambda x: x == 0, reversed(release), ) )) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: pre = -Infinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: pre = Infinity # Versions without a post segment should sort before those with one. if post is None: post = -Infinity # Versions without a development segment should sort after those with one. if dev is None: dev = Infinity if local is None: # Versions without a local segment should sort before those with one. local = -Infinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly local = tuple( (i, "") if isinstance(i, int) else (-Infinity, i) for i in local ) return epoch, release, pre, post, dev, local
mit
berezovskyi/nikola
nikola/data/themes/base/messages/messages_si_lk.py
28
1044
# -*- encoding:utf-8 -*- from __future__ import unicode_literals MESSAGES = { "%d min remaining to read": "", "(active)": "", "Also available in:": "", "Archive": "", "Authors": "", "Categories": "", "Comments": "", "LANGUAGE": "", "Languages:": "", "More posts about %s": "", "Newer posts": "", "Next post": "", "No posts found.": "", "Nothing found.": "", "Older posts": "", "Original site": "", "Posted:": "", "Posts about %s": "", "Posts by %s": "", "Posts for year %s": "", "Posts for {month} {day}, {year}": "", "Posts for {month} {year}": "", "Previous post": "", "Publication date": "", "RSS feed": "", "Read in English": "", "Read more": "", "Skip to main content": "", "Source": "", "Subcategories:": "", "Tags and Categories": "", "Tags": "", "Uncategorized": "", "Updates": "", "Write your page here.": "", "Write your post here.": "", "old posts, page %d": "", "page %d": "", }
mit
sudheesh001/RFID-DBSync
venv/lib/python2.7/site-packages/jinja2/compiler.py
623
61785
# -*- coding: utf-8 -*- """ jinja2.compiler ~~~~~~~~~~~~~~~ Compiles nodes into python code. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from itertools import chain from copy import deepcopy from keyword import iskeyword as is_python_keyword from jinja2 import nodes from jinja2.nodes import EvalContext from jinja2.visitor import NodeVisitor from jinja2.exceptions import TemplateAssertionError from jinja2.utils import Markup, concat, escape from jinja2._compat import range_type, next, text_type, string_types, \ iteritems, NativeStringIO, imap operators = { 'eq': '==', 'ne': '!=', 'gt': '>', 'gteq': '>=', 'lt': '<', 'lteq': '<=', 'in': 'in', 'notin': 'not in' } # what method to iterate over items do we want to use for dict iteration # in generated code? on 2.x let's go with iteritems, on 3.x with items if hasattr(dict, 'iteritems'): dict_item_iter = 'iteritems' else: dict_item_iter = 'items' # does if 0: dummy(x) get us x into the scope? def unoptimize_before_dead_code(): x = 42 def f(): if 0: dummy(x) return f # The getattr is necessary for pypy which does not set this attribute if # no closure is on the function unoptimize_before_dead_code = bool( getattr(unoptimize_before_dead_code(), '__closure__', None)) def generate(node, environment, name, filename, stream=None, defer_init=False): """Generate the python source for a node tree.""" if not isinstance(node, nodes.Template): raise TypeError('Can\'t compile non template nodes') generator = CodeGenerator(environment, name, filename, stream, defer_init) generator.visit(node) if stream is None: return generator.stream.getvalue() def has_safe_repr(value): """Does the node have a safe representation?""" if value is None or value is NotImplemented or value is Ellipsis: return True if isinstance(value, (bool, int, float, complex, range_type, Markup) + string_types): return True if isinstance(value, (tuple, list, set, frozenset)): for item in value: if not has_safe_repr(item): return False return True elif isinstance(value, dict): for key, value in iteritems(value): if not has_safe_repr(key): return False if not has_safe_repr(value): return False return True return False def find_undeclared(nodes, names): """Check if the names passed are accessed undeclared. The return value is a set of all the undeclared names from the sequence of names found. """ visitor = UndeclaredNameVisitor(names) try: for node in nodes: visitor.visit(node) except VisitorExit: pass return visitor.undeclared class Identifiers(object): """Tracks the status of identifiers in frames.""" def __init__(self): # variables that are known to be declared (probably from outer # frames or because they are special for the frame) self.declared = set() # undeclared variables from outer scopes self.outer_undeclared = set() # names that are accessed without being explicitly declared by # this one or any of the outer scopes. Names can appear both in # declared and undeclared. self.undeclared = set() # names that are declared locally self.declared_locally = set() # names that are declared by parameters self.declared_parameter = set() def add_special(self, name): """Register a special name like `loop`.""" self.undeclared.discard(name) self.declared.add(name) def is_declared(self, name): """Check if a name is declared in this or an outer scope.""" if name in self.declared_locally or name in self.declared_parameter: return True return name in self.declared def copy(self): return deepcopy(self) class Frame(object): """Holds compile time information for us.""" def __init__(self, eval_ctx, parent=None): self.eval_ctx = eval_ctx self.identifiers = Identifiers() # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False # the root frame is basically just the outermost frame, so no if # conditions. This information is used to optimize inheritance # situations. self.rootlevel = False # in some dynamic inheritance situations the compiler needs to add # write tests around output statements. self.require_output_check = parent and parent.require_output_check # inside some tags we are using a buffer rather than yield statements. # this for example affects {% filter %} or {% macro %}. If a frame # is buffered this variable points to the name of the list used as # buffer. self.buffer = None # the name of the block we're in, otherwise None. self.block = parent and parent.block or None # a set of actually assigned names self.assigned_names = set() # the parent of this frame self.parent = parent if parent is not None: self.identifiers.declared.update( parent.identifiers.declared | parent.identifiers.declared_parameter | parent.assigned_names ) self.identifiers.outer_undeclared.update( parent.identifiers.undeclared - self.identifiers.declared ) self.buffer = parent.buffer def copy(self): """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.identifiers = object.__new__(self.identifiers.__class__) rv.identifiers.__dict__.update(self.identifiers.__dict__) return rv def inspect(self, nodes): """Walk the node and check for identifiers. If the scope is hard (eg: enforce on a python level) overrides from outer scopes are tracked differently. """ visitor = FrameIdentifierVisitor(self.identifiers) for node in nodes: visitor.visit(node) def find_shadowed(self, extra=()): """Find all the shadowed names. extra is an iterable of variables that may be defined with `add_special` which may occour scoped. """ i = self.identifiers return (i.declared | i.outer_undeclared) & \ (i.declared_locally | i.declared_parameter) | \ set(x for x in extra if i.is_declared(x)) def inner(self): """Return an inner frame.""" return Frame(self.eval_ctx, self) def soft(self): """Return a soft frame. A soft frame may not be modified as standalone thing as it shares the resources with the frame it was created of, but it's not a rootlevel frame any longer. """ rv = self.copy() rv.rootlevel = False return rv __copy__ = copy class VisitorExit(RuntimeError): """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" class DependencyFinderVisitor(NodeVisitor): """A visitor that collects filter and test calls.""" def __init__(self): self.filters = set() self.tests = set() def visit_Filter(self, node): self.generic_visit(node) self.filters.add(node.name) def visit_Test(self, node): self.generic_visit(node) self.tests.add(node.name) def visit_Block(self, node): """Stop visiting at blocks.""" class UndeclaredNameVisitor(NodeVisitor): """A visitor that checks if a name is accessed without being declared. This is different from the frame visitor as it will not stop at closure frames. """ def __init__(self, names): self.names = set(names) self.undeclared = set() def visit_Name(self, node): if node.ctx == 'load' and node.name in self.names: self.undeclared.add(node.name) if self.undeclared == self.names: raise VisitorExit() else: self.names.discard(node.name) def visit_Block(self, node): """Stop visiting a blocks.""" class FrameIdentifierVisitor(NodeVisitor): """A visitor for `Frame.inspect`.""" def __init__(self, identifiers): self.identifiers = identifiers def visit_Name(self, node): """All assignments to names go through this function.""" if node.ctx == 'store': self.identifiers.declared_locally.add(node.name) elif node.ctx == 'param': self.identifiers.declared_parameter.add(node.name) elif node.ctx == 'load' and not \ self.identifiers.is_declared(node.name): self.identifiers.undeclared.add(node.name) def visit_If(self, node): self.visit(node.test) real_identifiers = self.identifiers old_names = real_identifiers.declared_locally | \ real_identifiers.declared_parameter def inner_visit(nodes): if not nodes: return set() self.identifiers = real_identifiers.copy() for subnode in nodes: self.visit(subnode) rv = self.identifiers.declared_locally - old_names # we have to remember the undeclared variables of this branch # because we will have to pull them. real_identifiers.undeclared.update(self.identifiers.undeclared) self.identifiers = real_identifiers return rv body = inner_visit(node.body) else_ = inner_visit(node.else_ or ()) # the differences between the two branches are also pulled as # undeclared variables real_identifiers.undeclared.update(body.symmetric_difference(else_) - real_identifiers.declared) # remember those that are declared. real_identifiers.declared_locally.update(body | else_) def visit_Macro(self, node): self.identifiers.declared_locally.add(node.name) def visit_Import(self, node): self.generic_visit(node) self.identifiers.declared_locally.add(node.target) def visit_FromImport(self, node): self.generic_visit(node) for name in node.names: if isinstance(name, tuple): self.identifiers.declared_locally.add(name[1]) else: self.identifiers.declared_locally.add(name) def visit_Assign(self, node): """Visit assignments in the correct order.""" self.visit(node.node) self.visit(node.target) def visit_For(self, node): """Visiting stops at for blocks. However the block sequence is visited as part of the outer scope. """ self.visit(node.iter) def visit_CallBlock(self, node): self.visit(node.call) def visit_FilterBlock(self, node): self.visit(node.filter) def visit_Scope(self, node): """Stop visiting at scopes.""" def visit_Block(self, node): """Stop visiting at blocks.""" class CompilerExit(Exception): """Raised if the compiler encountered a situation where it just doesn't make sense to further process the code. Any block that raises such an exception is not further processed. """ class CodeGenerator(NodeVisitor): def __init__(self, environment, name, filename, stream=None, defer_init=False): if stream is None: stream = NativeStringIO() self.environment = environment self.name = name self.filename = filename self.stream = stream self.created_block_context = False self.defer_init = defer_init # aliases for imports self.import_aliases = {} # a registry for all blocks. Because blocks are moved out # into the global python scope they are registered here self.blocks = {} # the number of extends statements so far self.extends_so_far = 0 # some templates have a rootlevel extends. In this case we # can safely assume that we're a child template and do some # more optimizations. self.has_known_extends = False # the current line number self.code_lineno = 1 # registry of all filters and tests (global, not block local) self.tests = {} self.filters = {} # the debug information self.debug_info = [] self._write_debug_info = None # the number of new lines before the next write() self._new_lines = 0 # the line number of the last written statement self._last_line = 0 # true if nothing was written so far. self._first_write = True # used by the `temporary_identifier` method to get new # unique, temporary identifier self._last_identifier = 0 # the current indentation self._indentation = 0 # -- Various compilation helpers def fail(self, msg, lineno): """Fail with a :exc:`TemplateAssertionError`.""" raise TemplateAssertionError(msg, lineno, self.name, self.filename) def temporary_identifier(self): """Get a new unique identifier.""" self._last_identifier += 1 return 't_%d' % self._last_identifier def buffer(self, frame): """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() self.writeline('%s = []' % frame.buffer) def return_buffer_contents(self, frame): """Return the buffer contents of the frame.""" if frame.eval_ctx.volatile: self.writeline('if context.eval_ctx.autoescape:') self.indent() self.writeline('return Markup(concat(%s))' % frame.buffer) self.outdent() self.writeline('else:') self.indent() self.writeline('return concat(%s)' % frame.buffer) self.outdent() elif frame.eval_ctx.autoescape: self.writeline('return Markup(concat(%s))' % frame.buffer) else: self.writeline('return concat(%s)' % frame.buffer) def indent(self): """Indent by one.""" self._indentation += 1 def outdent(self, step=1): """Outdent by step.""" self._indentation -= step def start_write(self, frame, node=None): """Yield or write into the frame buffer.""" if frame.buffer is None: self.writeline('yield ', node) else: self.writeline('%s.append(' % frame.buffer, node) def end_write(self, frame): """End the writing process started by `start_write`.""" if frame.buffer is not None: self.write(')') def simple_write(self, s, frame, node=None): """Simple shortcut for start_write + write + end_write.""" self.start_write(frame, node) self.write(s) self.end_write(frame) def blockvisit(self, nodes, frame): """Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically unless the force_generator parameter is set to False. """ if frame.buffer is None: self.writeline('if 0: yield None') else: self.writeline('pass') try: for node in nodes: self.visit(node, frame) except CompilerExit: pass def write(self, x): """Write a string into the output stream.""" if self._new_lines: if not self._first_write: self.stream.write('\n' * self._new_lines) self.code_lineno += self._new_lines if self._write_debug_info is not None: self.debug_info.append((self._write_debug_info, self.code_lineno)) self._write_debug_info = None self._first_write = False self.stream.write(' ' * self._indentation) self._new_lines = 0 self.stream.write(x) def writeline(self, x, node=None, extra=0): """Combination of newline and write.""" self.newline(node, extra) self.write(x) def newline(self, node=None, extra=0): """Add one or more newlines before the next write.""" self._new_lines = max(self._new_lines, 1 + extra) if node is not None and node.lineno != self._last_line: self._write_debug_info = node.lineno self._last_line = node.lineno def signature(self, node, frame, extra_kwargs=None): """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword # we have to make sure that no invalid call is created. kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write(', ') self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(', ') self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write(', %s=%s' % (key, value)) if node.dyn_args: self.write(', *') self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(', **dict({') else: self.write(', **{') for kwarg in node.kwargs: self.write('%r: ' % kwarg.key) self.visit(kwarg.value, frame) self.write(', ') if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write('%r: %s, ' % (key, value)) if node.dyn_kwargs is not None: self.write('}, **') self.visit(node.dyn_kwargs, frame) self.write(')') else: self.write('}') elif node.dyn_kwargs is not None: self.write(', **') self.visit(node.dyn_kwargs, frame) def pull_locals(self, frame): """Pull all the references identifiers into the local scope.""" for name in frame.identifiers.undeclared: self.writeline('l_%s = context.resolve(%r)' % (name, name)) def pull_dependencies(self, nodes): """Pull all the dependencies.""" visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) for dependency in 'filters', 'tests': mapping = getattr(self, dependency) for name in getattr(visitor, dependency): if name not in mapping: mapping[name] = self.temporary_identifier() self.writeline('%s = environment.%s[%r]' % (mapping[name], dependency, name)) def unoptimize_scope(self, frame): """Disable Python optimizations for the frame.""" # XXX: this is not that nice but it has no real overhead. It # mainly works because python finds the locals before dead code # is removed. If that breaks we have to add a dummy function # that just accepts the arguments and does nothing. if frame.identifiers.declared: self.writeline('%sdummy(%s)' % ( unoptimize_before_dead_code and 'if 0: ' or '', ', '.join('l_' + name for name in frame.identifiers.declared) )) def push_scope(self, frame, extra_vars=()): """This function returns all the shadowed variables in a dict in the form name: alias and will write the required assignments into the current scope. No indentation takes place. This also predefines locally declared variables from the loop body because under some circumstances it may be the case that `extra_vars` is passed to `Frame.find_shadowed`. """ aliases = {} for name in frame.find_shadowed(extra_vars): aliases[name] = ident = self.temporary_identifier() self.writeline('%s = l_%s' % (ident, name)) to_declare = set() for name in frame.identifiers.declared_locally: if name not in aliases: to_declare.add('l_' + name) if to_declare: self.writeline(' = '.join(to_declare) + ' = missing') return aliases def pop_scope(self, aliases, frame): """Restore all aliases and delete unused variables.""" for name, alias in iteritems(aliases): self.writeline('l_%s = %s' % (name, alias)) to_delete = set() for name in frame.identifiers.declared_locally: if name not in aliases: to_delete.add('l_' + name) if to_delete: # we cannot use the del statement here because enclosed # scopes can trigger a SyntaxError: # a = 42; b = lambda: a; del a self.writeline(' = '.join(to_delete) + ' = missing') def function_scoping(self, node, frame, children=None, find_special=True): """In Jinja a few statements require the help of anonymous functions. Those are currently macros and call blocks and in the future also recursive loops. As there is currently technical limitation that doesn't allow reading and writing a variable in a scope where the initial value is coming from an outer scope, this function tries to fall back with a common error message. Additionally the frame passed is modified so that the argumetns are collected and callers are looked up. This will return the modified frame. """ # we have to iterate twice over it, make sure that works if children is None: children = node.iter_child_nodes() children = list(children) func_frame = frame.inner() func_frame.inspect(children) # variables that are undeclared (accessed before declaration) and # declared locally *and* part of an outside scope raise a template # assertion error. Reason: we can't generate reasonable code from # it without aliasing all the variables. # this could be fixed in Python 3 where we have the nonlocal # keyword or if we switch to bytecode generation overridden_closure_vars = ( func_frame.identifiers.undeclared & func_frame.identifiers.declared & (func_frame.identifiers.declared_locally | func_frame.identifiers.declared_parameter) ) if overridden_closure_vars: self.fail('It\'s not possible to set and access variables ' 'derived from an outer scope! (affects: %s)' % ', '.join(sorted(overridden_closure_vars)), node.lineno) # remove variables from a closure from the frame's undeclared # identifiers. func_frame.identifiers.undeclared -= ( func_frame.identifiers.undeclared & func_frame.identifiers.declared ) # no special variables for this scope, abort early if not find_special: return func_frame func_frame.accesses_kwargs = False func_frame.accesses_varargs = False func_frame.accesses_caller = False func_frame.arguments = args = ['l_' + x.name for x in node.args] undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs')) if 'caller' in undeclared: func_frame.accesses_caller = True func_frame.identifiers.add_special('caller') args.append('l_caller') if 'kwargs' in undeclared: func_frame.accesses_kwargs = True func_frame.identifiers.add_special('kwargs') args.append('l_kwargs') if 'varargs' in undeclared: func_frame.accesses_varargs = True func_frame.identifiers.add_special('varargs') args.append('l_varargs') return func_frame def macro_body(self, node, frame, children=None): """Dump the function def of a macro or call block.""" frame = self.function_scoping(node, frame, children) # macros are delayed, they never require output checks frame.require_output_check = False args = frame.arguments # XXX: this is an ugly fix for the loop nesting bug # (tests.test_old_bugs.test_loop_call_bug). This works around # a identifier nesting problem we have in general. It's just more # likely to happen in loops which is why we work around it. The # real solution would be "nonlocal" all the identifiers that are # leaking into a new python frame and might be used both unassigned # and assigned. if 'loop' in frame.identifiers.declared: args = args + ['l_loop=l_loop'] self.writeline('def macro(%s):' % ', '.join(args), node) self.indent() self.buffer(frame) self.pull_locals(frame) self.blockvisit(node.body, frame) self.return_buffer_contents(frame) self.outdent() return frame def macro_def(self, node, frame): """Dump the macro definition for the def created by macro_body.""" arg_tuple = ', '.join(repr(x.name) for x in node.args) name = getattr(node, 'name', None) if len(node.args) == 1: arg_tuple += ',' self.write('Macro(environment, macro, %r, (%s), (' % (name, arg_tuple)) for arg in node.defaults: self.visit(arg, frame) self.write(', ') self.write('), %r, %r, %r)' % ( bool(frame.accesses_kwargs), bool(frame.accesses_varargs), bool(frame.accesses_caller) )) def position(self, node): """Return a human readable position for the node.""" rv = 'line %d' % node.lineno if self.name is not None: rv += ' in ' + repr(self.name) return rv # -- Statement Visitors def visit_Template(self, node, frame=None): assert frame is None, 'no root frame allowed' eval_ctx = EvalContext(self.environment, self.name) from jinja2.runtime import __all__ as exported self.writeline('from __future__ import division') self.writeline('from jinja2.runtime import ' + ', '.join(exported)) if not unoptimize_before_dead_code: self.writeline('dummy = lambda *x: None') # if we want a deferred initialization we cannot move the # environment into a local name envenv = not self.defer_init and ', environment=environment' or '' # do we have an extends tag at all? If not, we can save some # overhead by just not processing any inheritance code. have_extends = node.find(nodes.Extends) is not None # find all blocks for block in node.find_all(nodes.Block): if block.name in self.blocks: self.fail('block %r defined twice' % block.name, block.lineno) self.blocks[block.name] = block # find all imports and import them for import_ in node.find_all(nodes.ImportedName): if import_.importname not in self.import_aliases: imp = import_.importname self.import_aliases[imp] = alias = self.temporary_identifier() if '.' in imp: module, obj = imp.rsplit('.', 1) self.writeline('from %s import %s as %s' % (module, obj, alias)) else: self.writeline('import %s as %s' % (imp, alias)) # add the load name self.writeline('name = %r' % self.name) # generate the root render function. self.writeline('def root(context%s):' % envenv, extra=1) # process the root frame = Frame(eval_ctx) frame.inspect(node.body) frame.toplevel = frame.rootlevel = True frame.require_output_check = have_extends and not self.has_known_extends self.indent() if have_extends: self.writeline('parent_template = None') if 'self' in find_undeclared(node.body, ('self',)): frame.identifiers.add_special('self') self.writeline('l_self = TemplateReference(context)') self.pull_locals(frame) self.pull_dependencies(node.body) self.blockvisit(node.body, frame) self.outdent() # make sure that the parent root is called. if have_extends: if not self.has_known_extends: self.indent() self.writeline('if parent_template is not None:') self.indent() self.writeline('for event in parent_template.' 'root_render_func(context):') self.indent() self.writeline('yield event') self.outdent(2 + (not self.has_known_extends)) # at this point we now have the blocks collected and can visit them too. for name, block in iteritems(self.blocks): block_frame = Frame(eval_ctx) block_frame.inspect(block.body) block_frame.block = name self.writeline('def block_%s(context%s):' % (name, envenv), block, 1) self.indent() undeclared = find_undeclared(block.body, ('self', 'super')) if 'self' in undeclared: block_frame.identifiers.add_special('self') self.writeline('l_self = TemplateReference(context)') if 'super' in undeclared: block_frame.identifiers.add_special('super') self.writeline('l_super = context.super(%r, ' 'block_%s)' % (name, name)) self.pull_locals(block_frame) self.pull_dependencies(block.body) self.blockvisit(block.body, block_frame) self.outdent() self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x) for x in self.blocks), extra=1) # add a function that returns the debug info self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x in self.debug_info)) def visit_Block(self, node, frame): """Call a block and register it for the template.""" level = 1 if frame.toplevel: # if we know that we are a child template, there is no need to # check if we are one if self.has_known_extends: return if self.extends_so_far > 0: self.writeline('if parent_template is None:') self.indent() level += 1 context = node.scoped and 'context.derived(locals())' or 'context' self.writeline('for event in context.blocks[%r][0](%s):' % ( node.name, context), node) self.indent() self.simple_write('event', frame) self.outdent(level) def visit_Extends(self, node, frame): """Calls the extender.""" if not frame.toplevel: self.fail('cannot use extend from a non top-level scope', node.lineno) # if the number of extends statements in general is zero so # far, we don't have to add a check if something extended # the template before this one. if self.extends_so_far > 0: # if we have a known extends we just add a template runtime # error into the generated code. We could catch that at compile # time too, but i welcome it not to confuse users by throwing the # same error at different times just "because we can". if not self.has_known_extends: self.writeline('if parent_template is not None:') self.indent() self.writeline('raise TemplateRuntimeError(%r)' % 'extended multiple times') # if we have a known extends already we don't need that code here # as we know that the template execution will end here. if self.has_known_extends: raise CompilerExit() else: self.outdent() self.writeline('parent_template = environment.get_template(', node) self.visit(node.template, frame) self.write(', %r)' % self.name) self.writeline('for name, parent_block in parent_template.' 'blocks.%s():' % dict_item_iter) self.indent() self.writeline('context.blocks.setdefault(name, []).' 'append(parent_block)') self.outdent() # if this extends statement was in the root level we can take # advantage of that information and simplify the generated code # in the top level from this point onwards if frame.rootlevel: self.has_known_extends = True # and now we have one more self.extends_so_far += 1 def visit_Include(self, node, frame): """Handles includes.""" if node.with_context: self.unoptimize_scope(frame) if node.ignore_missing: self.writeline('try:') self.indent() func_name = 'get_or_select_template' if isinstance(node.template, nodes.Const): if isinstance(node.template.value, string_types): func_name = 'get_template' elif isinstance(node.template.value, (tuple, list)): func_name = 'select_template' elif isinstance(node.template, (nodes.Tuple, nodes.List)): func_name = 'select_template' self.writeline('template = environment.%s(' % func_name, node) self.visit(node.template, frame) self.write(', %r)' % self.name) if node.ignore_missing: self.outdent() self.writeline('except TemplateNotFound:') self.indent() self.writeline('pass') self.outdent() self.writeline('else:') self.indent() if node.with_context: self.writeline('for event in template.root_render_func(' 'template.new_context(context.parent, True, ' 'locals())):') else: self.writeline('for event in template.module._body_stream:') self.indent() self.simple_write('event', frame) self.outdent() if node.ignore_missing: self.outdent() def visit_Import(self, node, frame): """Visit regular imports.""" if node.with_context: self.unoptimize_scope(frame) self.writeline('l_%s = ' % node.target, node) if frame.toplevel: self.write('context.vars[%r] = ' % node.target) self.write('environment.get_template(') self.visit(node.template, frame) self.write(', %r).' % self.name) if node.with_context: self.write('make_module(context.parent, True, locals())') else: self.write('module') if frame.toplevel and not node.target.startswith('_'): self.writeline('context.exported_vars.discard(%r)' % node.target) frame.assigned_names.add(node.target) def visit_FromImport(self, node, frame): """Visit named imports.""" self.newline(node) self.write('included_template = environment.get_template(') self.visit(node.template, frame) self.write(', %r).' % self.name) if node.with_context: self.write('make_module(context.parent, True)') else: self.write('module') var_names = [] discarded_names = [] for name in node.names: if isinstance(name, tuple): name, alias = name else: alias = name self.writeline('l_%s = getattr(included_template, ' '%r, missing)' % (alias, name)) self.writeline('if l_%s is missing:' % alias) self.indent() self.writeline('l_%s = environment.undefined(%r %% ' 'included_template.__name__, ' 'name=%r)' % (alias, 'the template %%r (imported on %s) does ' 'not export the requested name %s' % ( self.position(node), repr(name) ), name)) self.outdent() if frame.toplevel: var_names.append(alias) if not alias.startswith('_'): discarded_names.append(alias) frame.assigned_names.add(alias) if var_names: if len(var_names) == 1: name = var_names[0] self.writeline('context.vars[%r] = l_%s' % (name, name)) else: self.writeline('context.vars.update({%s})' % ', '.join( '%r: l_%s' % (name, name) for name in var_names )) if discarded_names: if len(discarded_names) == 1: self.writeline('context.exported_vars.discard(%r)' % discarded_names[0]) else: self.writeline('context.exported_vars.difference_' 'update((%s))' % ', '.join(imap(repr, discarded_names))) def visit_For(self, node, frame): # when calculating the nodes for the inner frame we have to exclude # the iterator contents from it children = node.iter_child_nodes(exclude=('iter',)) if node.recursive: loop_frame = self.function_scoping(node, frame, children, find_special=False) else: loop_frame = frame.inner() loop_frame.inspect(children) # try to figure out if we have an extended loop. An extended loop # is necessary if the loop is in recursive mode if the special loop # variable is accessed in the body. extended_loop = node.recursive or 'loop' in \ find_undeclared(node.iter_child_nodes( only=('body',)), ('loop',)) # if we don't have an recursive loop we have to find the shadowed # variables at that point. Because loops can be nested but the loop # variable is a special one we have to enforce aliasing for it. if not node.recursive: aliases = self.push_scope(loop_frame, ('loop',)) # otherwise we set up a buffer and add a function def else: self.writeline('def loop(reciter, loop_render_func, depth=0):', node) self.indent() self.buffer(loop_frame) aliases = {} # make sure the loop variable is a special one and raise a template # assertion error if a loop tries to write to loop if extended_loop: self.writeline('l_loop = missing') loop_frame.identifiers.add_special('loop') for name in node.find_all(nodes.Name): if name.ctx == 'store' and name.name == 'loop': self.fail('Can\'t assign to special loop variable ' 'in for-loop target', name.lineno) self.pull_locals(loop_frame) if node.else_: iteration_indicator = self.temporary_identifier() self.writeline('%s = 1' % iteration_indicator) # Create a fake parent loop if the else or test section of a # loop is accessing the special loop variable and no parent loop # exists. if 'loop' not in aliases and 'loop' in find_undeclared( node.iter_child_nodes(only=('else_', 'test')), ('loop',)): self.writeline("l_loop = environment.undefined(%r, name='loop')" % ("'loop' is undefined. the filter section of a loop as well " "as the else block don't have access to the special 'loop'" " variable of the current loop. Because there is no parent " "loop it's undefined. Happened in loop on %s" % self.position(node))) self.writeline('for ', node) self.visit(node.target, loop_frame) self.write(extended_loop and ', l_loop in LoopContext(' or ' in ') # if we have an extened loop and a node test, we filter in the # "outer frame". if extended_loop and node.test is not None: self.write('(') self.visit(node.target, loop_frame) self.write(' for ') self.visit(node.target, loop_frame) self.write(' in ') if node.recursive: self.write('reciter') else: self.visit(node.iter, loop_frame) self.write(' if (') test_frame = loop_frame.copy() self.visit(node.test, test_frame) self.write('))') elif node.recursive: self.write('reciter') else: self.visit(node.iter, loop_frame) if node.recursive: self.write(', loop_render_func, depth):') else: self.write(extended_loop and '):' or ':') # tests in not extended loops become a continue if not extended_loop and node.test is not None: self.indent() self.writeline('if not ') self.visit(node.test, loop_frame) self.write(':') self.indent() self.writeline('continue') self.outdent(2) self.indent() self.blockvisit(node.body, loop_frame) if node.else_: self.writeline('%s = 0' % iteration_indicator) self.outdent() if node.else_: self.writeline('if %s:' % iteration_indicator) self.indent() self.blockvisit(node.else_, loop_frame) self.outdent() # reset the aliases if there are any. if not node.recursive: self.pop_scope(aliases, loop_frame) # if the node was recursive we have to return the buffer contents # and start the iteration code if node.recursive: self.return_buffer_contents(loop_frame) self.outdent() self.start_write(frame, node) self.write('loop(') self.visit(node.iter, frame) self.write(', loop)') self.end_write(frame) def visit_If(self, node, frame): if_frame = frame.soft() self.writeline('if ', node) self.visit(node.test, if_frame) self.write(':') self.indent() self.blockvisit(node.body, if_frame) self.outdent() if node.else_: self.writeline('else:') self.indent() self.blockvisit(node.else_, if_frame) self.outdent() def visit_Macro(self, node, frame): macro_frame = self.macro_body(node, frame) self.newline() if frame.toplevel: if not node.name.startswith('_'): self.write('context.exported_vars.add(%r)' % node.name) self.writeline('context.vars[%r] = ' % node.name) self.write('l_%s = ' % node.name) self.macro_def(node, macro_frame) frame.assigned_names.add(node.name) def visit_CallBlock(self, node, frame): children = node.iter_child_nodes(exclude=('call',)) call_frame = self.macro_body(node, frame, children) self.writeline('caller = ') self.macro_def(node, call_frame) self.start_write(frame, node) self.visit_Call(node.call, call_frame, forward_caller=True) self.end_write(frame) def visit_FilterBlock(self, node, frame): filter_frame = frame.inner() filter_frame.inspect(node.iter_child_nodes()) aliases = self.push_scope(filter_frame) self.pull_locals(filter_frame) self.buffer(filter_frame) self.blockvisit(node.body, filter_frame) self.start_write(frame, node) self.visit_Filter(node.filter, filter_frame) self.end_write(frame) self.pop_scope(aliases, filter_frame) def visit_ExprStmt(self, node, frame): self.newline(node) self.visit(node.node, frame) def visit_Output(self, node, frame): # if we have a known extends statement, we don't output anything # if we are in a require_output_check section if self.has_known_extends and frame.require_output_check: return if self.environment.finalize: finalize = lambda x: text_type(self.environment.finalize(x)) else: finalize = text_type # if we are inside a frame that requires output checking, we do so outdent_later = False if frame.require_output_check: self.writeline('if parent_template is None:') self.indent() outdent_later = True # try to evaluate as many chunks as possible into a static # string at compile time. body = [] for child in node.nodes: try: const = child.as_const(frame.eval_ctx) except nodes.Impossible: body.append(child) continue # the frame can't be volatile here, becaus otherwise the # as_const() function would raise an Impossible exception # at that point. try: if frame.eval_ctx.autoescape: if hasattr(const, '__html__'): const = const.__html__() else: const = escape(const) const = finalize(const) except Exception: # if something goes wrong here we evaluate the node # at runtime for easier debugging body.append(child) continue if body and isinstance(body[-1], list): body[-1].append(const) else: body.append([const]) # if we have less than 3 nodes or a buffer we yield or extend/append if len(body) < 3 or frame.buffer is not None: if frame.buffer is not None: # for one item we append, for more we extend if len(body) == 1: self.writeline('%s.append(' % frame.buffer) else: self.writeline('%s.extend((' % frame.buffer) self.indent() for item in body: if isinstance(item, list): val = repr(concat(item)) if frame.buffer is None: self.writeline('yield ' + val) else: self.writeline(val + ', ') else: if frame.buffer is None: self.writeline('yield ', item) else: self.newline(item) close = 1 if frame.eval_ctx.volatile: self.write('(context.eval_ctx.autoescape and' ' escape or to_string)(') elif frame.eval_ctx.autoescape: self.write('escape(') else: self.write('to_string(') if self.environment.finalize is not None: self.write('environment.finalize(') close += 1 self.visit(item, frame) self.write(')' * close) if frame.buffer is not None: self.write(', ') if frame.buffer is not None: # close the open parentheses self.outdent() self.writeline(len(body) == 1 and ')' or '))') # otherwise we create a format string as this is faster in that case else: format = [] arguments = [] for item in body: if isinstance(item, list): format.append(concat(item).replace('%', '%%')) else: format.append('%s') arguments.append(item) self.writeline('yield ') self.write(repr(concat(format)) + ' % (') idx = -1 self.indent() for argument in arguments: self.newline(argument) close = 0 if frame.eval_ctx.volatile: self.write('(context.eval_ctx.autoescape and' ' escape or to_string)(') close += 1 elif frame.eval_ctx.autoescape: self.write('escape(') close += 1 if self.environment.finalize is not None: self.write('environment.finalize(') close += 1 self.visit(argument, frame) self.write(')' * close + ', ') self.outdent() self.writeline(')') if outdent_later: self.outdent() def visit_Assign(self, node, frame): self.newline(node) # toplevel assignments however go into the local namespace and # the current template's context. We create a copy of the frame # here and add a set so that the Name visitor can add the assigned # names here. if frame.toplevel: assignment_frame = frame.copy() assignment_frame.toplevel_assignments = set() else: assignment_frame = frame self.visit(node.target, assignment_frame) self.write(' = ') self.visit(node.node, frame) # make sure toplevel assignments are added to the context. if frame.toplevel: public_names = [x for x in assignment_frame.toplevel_assignments if not x.startswith('_')] if len(assignment_frame.toplevel_assignments) == 1: name = next(iter(assignment_frame.toplevel_assignments)) self.writeline('context.vars[%r] = l_%s' % (name, name)) else: self.writeline('context.vars.update({') for idx, name in enumerate(assignment_frame.toplevel_assignments): if idx: self.write(', ') self.write('%r: l_%s' % (name, name)) self.write('})') if public_names: if len(public_names) == 1: self.writeline('context.exported_vars.add(%r)' % public_names[0]) else: self.writeline('context.exported_vars.update((%s))' % ', '.join(imap(repr, public_names))) # -- Expression Visitors def visit_Name(self, node, frame): if node.ctx == 'store' and frame.toplevel: frame.toplevel_assignments.add(node.name) self.write('l_' + node.name) frame.assigned_names.add(node.name) def visit_Const(self, node, frame): val = node.value if isinstance(val, float): self.write(str(val)) else: self.write(repr(val)) def visit_TemplateData(self, node, frame): try: self.write(repr(node.as_const(frame.eval_ctx))) except nodes.Impossible: self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)' % node.data) def visit_Tuple(self, node, frame): self.write('(') idx = -1 for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item, frame) self.write(idx == 0 and ',)' or ')') def visit_List(self, node, frame): self.write('[') for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item, frame) self.write(']') def visit_Dict(self, node, frame): self.write('{') for idx, item in enumerate(node.items): if idx: self.write(', ') self.visit(item.key, frame) self.write(': ') self.visit(item.value, frame) self.write('}') def binop(operator, interceptable=True): def visitor(self, node, frame): if self.environment.sandboxed and \ operator in self.environment.intercepted_binops: self.write('environment.call_binop(context, %r, ' % operator) self.visit(node.left, frame) self.write(', ') self.visit(node.right, frame) else: self.write('(') self.visit(node.left, frame) self.write(' %s ' % operator) self.visit(node.right, frame) self.write(')') return visitor def uaop(operator, interceptable=True): def visitor(self, node, frame): if self.environment.sandboxed and \ operator in self.environment.intercepted_unops: self.write('environment.call_unop(context, %r, ' % operator) self.visit(node.node, frame) else: self.write('(' + operator) self.visit(node.node, frame) self.write(')') return visitor visit_Add = binop('+') visit_Sub = binop('-') visit_Mul = binop('*') visit_Div = binop('/') visit_FloorDiv = binop('//') visit_Pow = binop('**') visit_Mod = binop('%') visit_And = binop('and', interceptable=False) visit_Or = binop('or', interceptable=False) visit_Pos = uaop('+') visit_Neg = uaop('-') visit_Not = uaop('not ', interceptable=False) del binop, uaop def visit_Concat(self, node, frame): if frame.eval_ctx.volatile: func_name = '(context.eval_ctx.volatile and' \ ' markup_join or unicode_join)' elif frame.eval_ctx.autoescape: func_name = 'markup_join' else: func_name = 'unicode_join' self.write('%s((' % func_name) for arg in node.nodes: self.visit(arg, frame) self.write(', ') self.write('))') def visit_Compare(self, node, frame): self.visit(node.expr, frame) for op in node.ops: self.visit(op, frame) def visit_Operand(self, node, frame): self.write(' %s ' % operators[node.op]) self.visit(node.expr, frame) def visit_Getattr(self, node, frame): self.write('environment.getattr(') self.visit(node.node, frame) self.write(', %r)' % node.attr) def visit_Getitem(self, node, frame): # slices bypass the environment getitem method. if isinstance(node.arg, nodes.Slice): self.visit(node.node, frame) self.write('[') self.visit(node.arg, frame) self.write(']') else: self.write('environment.getitem(') self.visit(node.node, frame) self.write(', ') self.visit(node.arg, frame) self.write(')') def visit_Slice(self, node, frame): if node.start is not None: self.visit(node.start, frame) self.write(':') if node.stop is not None: self.visit(node.stop, frame) if node.step is not None: self.write(':') self.visit(node.step, frame) def visit_Filter(self, node, frame): self.write(self.filters[node.name] + '(') func = self.environment.filters.get(node.name) if func is None: self.fail('no filter named %r' % node.name, node.lineno) if getattr(func, 'contextfilter', False): self.write('context, ') elif getattr(func, 'evalcontextfilter', False): self.write('context.eval_ctx, ') elif getattr(func, 'environmentfilter', False): self.write('environment, ') # if the filter node is None we are inside a filter block # and want to write to the current buffer if node.node is not None: self.visit(node.node, frame) elif frame.eval_ctx.volatile: self.write('(context.eval_ctx.autoescape and' ' Markup(concat(%s)) or concat(%s))' % (frame.buffer, frame.buffer)) elif frame.eval_ctx.autoescape: self.write('Markup(concat(%s))' % frame.buffer) else: self.write('concat(%s)' % frame.buffer) self.signature(node, frame) self.write(')') def visit_Test(self, node, frame): self.write(self.tests[node.name] + '(') if node.name not in self.environment.tests: self.fail('no test named %r' % node.name, node.lineno) self.visit(node.node, frame) self.signature(node, frame) self.write(')') def visit_CondExpr(self, node, frame): def write_expr2(): if node.expr2 is not None: return self.visit(node.expr2, frame) self.write('environment.undefined(%r)' % ('the inline if-' 'expression on %s evaluated to false and ' 'no else section was defined.' % self.position(node))) self.write('(') self.visit(node.expr1, frame) self.write(' if ') self.visit(node.test, frame) self.write(' else ') write_expr2() self.write(')') def visit_Call(self, node, frame, forward_caller=False): if self.environment.sandboxed: self.write('environment.call(context, ') else: self.write('context.call(') self.visit(node.node, frame) extra_kwargs = forward_caller and {'caller': 'caller'} or None self.signature(node, frame, extra_kwargs) self.write(')') def visit_Keyword(self, node, frame): self.write(node.key + '=') self.visit(node.value, frame) # -- Unused nodes for extensions def visit_MarkSafe(self, node, frame): self.write('Markup(') self.visit(node.expr, frame) self.write(')') def visit_MarkSafeIfAutoescape(self, node, frame): self.write('(context.eval_ctx.autoescape and Markup or identity)(') self.visit(node.expr, frame) self.write(')') def visit_EnvironmentAttribute(self, node, frame): self.write('environment.' + node.name) def visit_ExtensionAttribute(self, node, frame): self.write('environment.extensions[%r].%s' % (node.identifier, node.name)) def visit_ImportedName(self, node, frame): self.write(self.import_aliases[node.importname]) def visit_InternalName(self, node, frame): self.write(node.name) def visit_ContextReference(self, node, frame): self.write('context') def visit_Continue(self, node, frame): self.writeline('continue', node) def visit_Break(self, node, frame): self.writeline('break', node) def visit_Scope(self, node, frame): scope_frame = frame.inner() scope_frame.inspect(node.iter_child_nodes()) aliases = self.push_scope(scope_frame) self.pull_locals(scope_frame) self.blockvisit(node.body, scope_frame) self.pop_scope(aliases, scope_frame) def visit_EvalContextModifier(self, node, frame): for keyword in node.options: self.writeline('context.eval_ctx.%s = ' % keyword.key) self.visit(keyword.value, frame) try: val = keyword.value.as_const(frame.eval_ctx) except nodes.Impossible: frame.eval_ctx.volatile = True else: setattr(frame.eval_ctx, keyword.key, val) def visit_ScopedEvalContextModifier(self, node, frame): old_ctx_name = self.temporary_identifier() safed_ctx = frame.eval_ctx.save() self.writeline('%s = context.eval_ctx.save()' % old_ctx_name) self.visit_EvalContextModifier(node, frame) for child in node.body: self.visit(child, frame) frame.eval_ctx.revert(safed_ctx) self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
gpl-2.0
innainu/climatechangebot
climatechangebot/nyt_interface/nyt_interface.py
1
3194
""" Searches nyt API for articles related to climate change and returns them. API returns pages, each page has 10 articles. This currently only returns one page. Cannot specify how many articles per page in api. get_trending function returns articles for the past 3 days. """ from nytimesarticle import articleAPI from datetime import datetime, timedelta import random class NytimesApi(object): def __init__(self, key): self.api = articleAPI(key) self.secret_keyword = "climate change and " self.num_days_trending = 3 def return_all(self, query): """ Keep query as is if climate change is in the query If not, add climate change """ if 'climate change' in query: return self.api.search(q=query) return self.api.search(q=self.secret_keyword + query) def return_trending(self): """ NYT API only accepts dates that are of format: YYYYMMDD """ first_date = datetime.today() - timedelta(self.num_days_trending) first_date = first_date.strftime("%Y%m%d") return self.api.search(q=self.secret_keyword, begin_date=first_date) def return_content(self, res): article = {} # print res if res.get('headline'): article['title'] = res['headline'].get('main') else: # Don't include article if there is no title return None if res.get('abstract'): article['abstract'] = res.get('abstract') else: if res.get('lead_paragraph'): article['abstract'] = res.get('lead_paragraph') elif res.get('snippet'): article['abstract'] = res.get('snippet') else: # Don't include article if abstract, lead_paragraph or snippet don't exist return None article['_id'] = res.get('_id') article['source'] = res.get('source') article['web_url'] = res.get('web_url') if len(res.get('multimedia')) > 0: article['image_url'] = 'http://nytimes.com/' + res['multimedia'][0]['url'] article['date'] = res.get('pub_date') return article def clean_response(self, results, num, randomize=False): articles = [] count = 0 if results.get('response'): for doc in results['response'].get('docs'): article_content = self.return_content(doc) if article_content is None: continue articles.append(article_content) count += 1 if count == num: break if randomize: random.shuffle(articles) return articles def return_article_list(self, query, num=1, randomize=False): results = self.return_all(query) print results articles = self.clean_response(results, num, randomize) return articles def return_trending_list(self, num=6, randomize=False): results = self.return_trending() articles = self.clean_response(results, num, randomize) return articles
mit
lshain-android-source/external-chromium_org
third_party/jinja2/environment.py
111
44067
# -*- coding: utf-8 -*- """ jinja2.environment ~~~~~~~~~~~~~~~~~~ Provides a class that holds runtime and parsing time options. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import sys from jinja2 import nodes from jinja2.defaults import * from jinja2.lexer import get_lexer, TokenStream from jinja2.parser import Parser from jinja2.optimizer import optimize from jinja2.compiler import generate from jinja2.runtime import Undefined, new_context from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ TemplatesNotFound from jinja2.utils import import_string, LRUCache, Markup, missing, \ concat, consume, internalcode, _encode_filename # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) # the function to create jinja traceback objects. This is dynamically # imported on the first exception in the exception handler. _make_traceback = None def get_spontaneous_environment(*args): """Return a new spontaneous environment. A spontaneous environment is an unnamed and unaccessible (in theory) environment that is used for templates generated from a string and not from the file system. """ try: env = _spontaneous_environments.get(args) except TypeError: return Environment(*args) if env is not None: return env _spontaneous_environments[args] = env = Environment(*args) env.shared = True return env def create_cache(size): """Return the cache class for the given size.""" if size == 0: return None if size < 0: return {} return LRUCache(size) def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity) def load_extensions(environment, extensions): """Load the extensions from the list and bind it to the environment. Returns a dict of instanciated environments. """ result = {} for extension in extensions: if isinstance(extension, basestring): extension = import_string(extension) result[extension.identifier] = extension(environment) return result def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" assert issubclass(environment.undefined, Undefined), 'undefined must ' \ 'be a subclass of undefined because filters depend on it.' assert environment.block_start_string != \ environment.variable_start_string != \ environment.comment_start_string, 'block, variable and comment ' \ 'start strings must be different' assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ 'newline_sequence set to unknown line ending string.' return environment class Environment(object): r"""The core component of Jinja is the `Environment`. It contains important shared variables like configuration, filters, tests, globals and others. Instances of this class may be modified if they are not shared and if no template was loaded so far. Modifications on environments after the first template was loaded will lead to surprising effects and undefined behavior. Here the possible initialization parameters: `block_start_string` The string marking the begin of a block. Defaults to ``'{%'``. `block_end_string` The string marking the end of a block. Defaults to ``'%}'``. `variable_start_string` The string marking the begin of a print statement. Defaults to ``'{{'``. `variable_end_string` The string marking the end of a print statement. Defaults to ``'}}'``. `comment_start_string` The string marking the begin of a comment. Defaults to ``'{#'``. `comment_end_string` The string marking the end of a comment. Defaults to ``'#}'``. `line_statement_prefix` If given and a string, this will be used as prefix for line based statements. See also :ref:`line-statements`. `line_comment_prefix` If given and a string, this will be used as prefix for line based based comments. See also :ref:`line-statements`. .. versionadded:: 2.2 `trim_blocks` If this is set to ``True`` the first newline after a block is removed (block, not variable tag!). Defaults to `False`. `newline_sequence` The sequence that starts a newline. Must be one of ``'\r'``, ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a useful default for Linux and OS X systems as well as web applications. `extensions` List of Jinja extensions to use. This can either be import paths as strings or extension classes. For more information have a look at :ref:`the extensions documentation <jinja-extensions>`. `optimized` should the optimizer be enabled? Default is `True`. `undefined` :class:`Undefined` or a subclass of it that is used to represent undefined values in the template. `finalize` A callable that can be used to process the result of a variable expression before it is output. For example one can convert `None` implicitly into an empty string here. `autoescape` If set to true the XML/HTML autoescaping feature is enabled by default. For more details about auto escaping see :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return `True` or `False` depending on autoescape should be enabled by default. .. versionchanged:: 2.4 `autoescape` can now be a function `loader` The template loader for this environment. `cache_size` The size of the cache. Per default this is ``50`` which means that if more than 50 templates are loaded the loader will clean out the least recently used template. If the cache size is set to ``0`` templates are recompiled all the time, if the cache size is ``-1`` the cache will not be cleaned. `auto_reload` Some loaders load templates from locations where the template sources may change (ie: file system or database). If `auto_reload` is set to `True` (default) every time a template is requested the loader checks if the source changed and if yes, it will reload the template. For higher performance it's possible to disable that. `bytecode_cache` If set to a bytecode cache object, this object will provide a cache for the internal Jinja bytecode so that templates don't have to be parsed if they were not changed. See :ref:`bytecode-cache` for more information. """ #: if this environment is sandboxed. Modifying this variable won't make #: the environment sandboxed though. For a real sandboxed environment #: have a look at jinja2.sandbox. This flag alone controls the code #: generation by the compiler. sandboxed = False #: True if the environment is just an overlay overlayed = False #: the environment this environment is linked to if it is an overlay linked_to = None #: shared environments have this set to `True`. A shared environment #: must not be modified shared = False #: these are currently EXPERIMENTAL undocumented features. exception_handler = None exception_formatter = None def __init__(self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=50, auto_reload=True, bytecode_cache=None): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to # not change the order of arguments because it's used at least # internally in those cases: # - spontaneus environments (i18n extension and Template) # - unittests # If parameter changes are required only add parameters at the end # and don't change the arguments (or the defaults!) of the arguments # existing already. # lexer / parser information self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.newline_sequence = newline_sequence # runtime information self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape # defaults self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() # set the loader provided self.loader = loader self.bytecode_cache = None self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload # load extensions self.extensions = load_extensions(self, extensions) _environment_sanity_check(self) def add_extension(self, extension): """Adds an extension after the environment was created. .. versionadded:: 2.5 """ self.extensions.update(load_extensions(self, [extension])) def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in attributes.iteritems(): if not hasattr(self, key): setattr(self, key, value) def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing): """Create a new overlay environment that shares all the data with the current environment except of cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed environment automatically gets all the extensions of the environment it is linked to plus optional extra extensions. Creating overlays should happen after the initial environment was set up completely. Not all attributes are truly linked, some are just copied over so modifications on the original environment may not shine through. """ args = dict(locals()) del args['self'], args['cache_size'], args['extensions'] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for key, value in args.iteritems(): if value is not missing: setattr(rv, key, value) if cache_size is not missing: rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for key, value in self.extensions.iteritems(): rv.extensions[key] = value.bind(rv) if extensions is not missing: rv.extensions.update(load_extensions(rv, extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc="The lexer for this environment.") def iter_extensions(self): """Iterates over the extensions by priority.""" return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, basestring): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) @internalcode def parse(self, source, name=None, filename=None): """Parse the sourcecode and return the abstract syntax tree. This tree of nodes is used by the compiler to convert the template into executable source- or bytecode. This is useful for debugging or to extract information from templates. If you are :ref:`developing Jinja2 extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, _encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): """Lex the given sourcecode and return a generator that yields tokens as tuples in the form ``(lineno, token_type, value)``. This can be useful for :ref:`extension development <writing-extensions>` and debugging templates. This does not perform preprocessing. If you want the preprocessing of the extensions to be applied you have to filter source through the :meth:`preprocess` method. """ source = unicode(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), unicode(source)) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream def _generate(self, source, name, filename, defer_init=False): """Internal hook that can be overriden to hook a different generate method in. .. versionadded:: 2.5 """ return generate(source, self, name, filename, defer_init=defer_init) def _compile(self, source, filename): """Internal hook that can be overriden to hook a different compile method in. .. versionadded:: 2.5 """ return compile(source, filename, 'exec') @internalcode def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, basestring): source_hint = source source = self._parse(source, name, filename) if self.optimized: source = optimize(source, self) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = _encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword arguments that appear as variables in the expression. If called it returns the result of the expression. This is useful if applications want to use the same rules as Jinja in template "configuration files" or similar situations. Example usage: >>> env = Environment() >>> expr = env.compile_expression('foo == 42') >>> expr(foo=23) False >>> expr(foo=42) True Per default the return value is converted to `None` if the expression returns an undefined value. This can be changed by setting `undefined_to_none` to `False`. >>> env.compile_expression('var')() is None True >>> env.compile_expression('var', undefined_to_none=False)() Undefined .. versionadded:: 2.1 """ parser = Parser(self, source, state='variable') exc_info = None try: expr = parser.parse_expression() if not parser.stream.eos: raise TemplateSyntaxError('chunk after expression', parser.stream.current.lineno, None, None) expr.set_environment(self) except TemplateSyntaxError: exc_info = sys.exc_info() if exc_info is not None: self.handle_exception(exc_info, source_hint=source) body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates(self, target, extensions=None, filter_func=None, zip='deflated', log_function=None, ignore_errors=True, py_compile=False): """Finds all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be will be stored in a directory. By default a deflate zip algorithm is used, to switch to the stored algorithm, `zip` can be set to ``'stored'``. `extensions` and `filter_func` are passed to :meth:`list_templates`. Each template returned will be compiled to the target folder or zipfile. By default template compilation errors are ignored. In case a log function is provided, errors are logged. If you want template syntax errors to abort the compilation you can set `ignore_errors` to `False` and you will get an exception on syntax errors. If `py_compile` is set to `True` .pyc files will be written to the target instead of standard .py files. .. versionadded:: 2.4 """ from jinja2.loaders import ModuleLoader if log_function is None: log_function = lambda x: None if py_compile: import imp, marshal py_header = imp.get_magic() + \ u'\xff\xff\xff\xff'.encode('iso-8859-15') def write_file(filename, data, mode): if zip: info = ZipInfo(filename) info.external_attr = 0755 << 16L zip_file.writestr(info, data) else: f = open(os.path.join(target, filename), mode) try: f.write(data) finally: f.close() if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): os.makedirs(target) log_function('Compiling into folder "%s"' % target) try: for name in self.list_templates(extensions, filter_func): source, filename, _ = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError, e: if not ignore_errors: raise log_function('Could not compile "%s": %s' % (name, e)) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = self._compile(code, _encode_filename(filename)) write_file(filename + 'c', py_header + marshal.dumps(c), 'wb') log_function('Byte-compiled "%s" as %s' % (name, filename + 'c')) else: write_file(filename, code, 'w') log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() log_function('Finished compiling templates') def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires that the loader supports the loader's :meth:`~BaseLoader.list_templates` method. If there are other files in the template folder besides the actual templates, the returned list can be filtered. There are two ways: either `extensions` is set to a list of file extensions for templates, or a `filter_func` can be provided which is a callable that is passed a template name and should return `True` if it should end up in the result list. If the loader does not support that, a :exc:`TypeError` is raised. .. versionadded:: 2.4 """ x = self.loader.list_templates() if extensions is not None: if filter_func is not None: raise TypeError('either extensions or filter_func ' 'can be passed, but not both') filter_func = lambda x: '.' in x and \ x.rsplit('.', 1)[1] in extensions if filter_func is not None: x = filter(filter_func, x) return x def handle_exception(self, exc_info=None, rendered=False, source_hint=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ global _make_traceback if exc_info is None: exc_info = sys.exc_info() # the debugging module is imported when it's used for the first time. # we're doing a lot of stuff there and for applications that do not # get any exceptions in template rendering there is no need to load # all of that. if _make_traceback is None: from jinja2.debug import make_traceback as _make_traceback traceback = _make_traceback(exc_info, source_hint) if rendered and self.exception_formatter is not None: return self.exception_formatter(traceback) if self.exception_handler is not None: self.exception_handler(traceback) exc_type, exc_value, tb = traceback.standard_exc_info raise exc_type, exc_value, tb def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are relative to the loader root so this method returns the `template` parameter unchanged, but if the paths should be relative to the parent template, this function can be used to calculate the real template name. Subclasses may override this method and implement template path joining here. """ return template @internalcode def _load_template(self, name, globals): if self.loader is None: raise TypeError('no loader for this environment specified') if self.cache is not None: template = self.cache.get(name) if template is not None and (not self.auto_reload or \ template.is_up_to_date): return template template = self.loader.load(self, name, globals) if self.cache is not None: self.cache[name] = template return template @internalcode def get_template(self, name, parent=None, globals=None): """Load a template from the loader. If a loader is configured this method ask the loader for the template and returns a :class:`Template`. If the `parent` parameter is not `None`, :meth:`join_path` is called to get the real template name before loading. The `globals` parameter can be used to provide template wide globals. These variables are available in the context at render time. If the template does not exist a :exc:`TemplateNotFound` exception is raised. .. versionchanged:: 2.4 If `name` is a :class:`Template` object it is returned from the function unchanged. """ if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) @internalcode def select_template(self, names, parent=None, globals=None): """Works like :meth:`get_template` but tries a number of templates before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. .. versionadded:: 2.3 .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. """ if not names: raise TemplatesNotFound(message=u'Tried to select from an empty list ' u'of templates.') globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) try: return self._load_template(name, globals) except TemplateNotFound: pass raise TemplatesNotFound(names) @internalcode def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ if isinstance(template_name_or_list, basestring): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): """Load a template from a string. This parses the source given and returns a :class:`Template` object. """ globals = self.make_globals(globals) cls = template_class or self.template_class return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): """Return a dict for the globals.""" if not d: return self.globals return dict(self.globals, **d) class Template(object): """The central template object. This class represents a compiled template and is used to evaluate it. Normally the template object is generated from an :class:`Environment` but it also has a constructor that makes it possible to create a template instance directly using the constructor. It takes the same arguments as the environment constructor but it's not possible to specify a loader. Every template object has a few methods and members that are guaranteed to exist. However it's important that a template object should be considered immutable. Modifications on the object are not supported. Template objects created from the constructor rather than an environment do have an `environment` attribute that points to a temporary environment that is probably shared with other templates created with the constructor and compatible settings. >>> template = Template('Hello {{ name }}!') >>> template.render(name='John Doe') u'Hello John Doe!' >>> stream = template.stream(name='John Doe') >>> stream.next() u'Hello John Doe!' >>> stream.next() Traceback (most recent call last): ... StopIteration """ def __new__(cls, source, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False): env = get_spontaneous_environment( block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, newline_sequence, frozenset(extensions), optimized, undefined, finalize, autoescape, None, 0, False, None) return env.from_string(source, template_class=cls) @classmethod def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ namespace = { 'environment': environment, '__file__': code.co_filename } exec code in namespace rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate return rv @classmethod def from_module_dict(cls, environment, module_dict, globals): """Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4 """ return cls._from_namespace(environment, module_dict, globals) @classmethod def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals t.name = namespace['name'] t.filename = namespace['__file__'] t.blocks = namespace['blocks'] # render function and module t.root_render_func = namespace['root'] t._module = None # debug and loader helpers t._debug_info = namespace['debug_info'] t._uptodate = None # store the reference namespace['environment'] = environment namespace['__jinja_template__'] = t return t def render(self, *args, **kwargs): """This method accepts the same arguments as the `dict` constructor: A dict, a dict subclass or some keyword arguments. If no arguments are given the context will be empty. These two calls do the same:: template.render(knights='that say nih') template.render({'knights': 'that say nih'}) This will return the rendered template as unicode string. """ vars = dict(*args, **kwargs) try: return concat(self.root_render_func(self.new_context(vars))) except Exception: exc_info = sys.exc_info() return self.environment.handle_exception(exc_info, True) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a :class:`TemplateStream`. """ return TemplateStream(self.generate(*args, **kwargs)) def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except Exception: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as it to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ return new_context(self.environment, self.name, self.blocks, vars, shared, self.globals, locals) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called without arguments but it will evaluate the template on every call rather than caching it. It's also possible to provide a dict which is then used as context. The arguments are the same as for the :meth:`new_context` method. """ return TemplateModule(self, self.new_context(vars, shared, locals)) @property def module(self): """The template as module. This is used for imports in the template runtime but is also useful if one wants to access exported template variables from the Python layer: >>> t = Template('{% macro foo() %}42{% endmacro %}23') >>> unicode(t.module) u'23' >>> t.module.foo() u'42' """ if self._module is not None: return self._module self._module = rv = self.make_module() return rv def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1 @property def is_up_to_date(self): """If this variable is `False` there is a newer version available.""" if self._uptodate is None: return True return self._uptodate() @property def debug_info(self): """The debug info mapping.""" return [tuple(map(int, x.split('='))) for x in self._debug_info.split('&')] def __repr__(self): if self.name is None: name = 'memory:%x' % id(self) else: name = repr(self.name) return '<%s %s>' % (self.__class__.__name__, name) class TemplateModule(object): """Represents an imported template. All the exported names of the template are available as attributes on this object. Additionally converting it into an unicode- or bytestrings renders the contents. """ def __init__(self, template, context): self._body_stream = list(template.root_render_func(context)) self.__dict__.update(context.get_exported()) self.__name__ = template.name def __html__(self): return Markup(concat(self._body_stream)) def __str__(self): return unicode(self).encode('utf-8') # unicode goes after __str__ because we configured 2to3 to rename # __unicode__ to __str__. because the 2to3 tree is not designed to # remove nodes from it, we leave the above __str__ around and let # it override at runtime. def __unicode__(self): return concat(self._body_stream) def __repr__(self): if self.__name__ is None: name = 'memory:%x' % id(self) else: name = repr(self.__name__) return '<%s %s>' % (self.__class__.__name__, name) class TemplateExpression(object): """The :meth:`jinja2.Environment.compile_expression` method returns an instance of this object. It encapsulates the expression-like access to the template with an expression it wraps. """ def __init__(self, template, undefined_to_none): self._template = template self._undefined_to_none = undefined_to_none def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) rv = context.vars['result'] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv class TemplateStream(object): """A template stream works pretty much like an ordinary python generator but it can buffer multiple items to reduce the number of total iterations. Per default the output is unbuffered which means that for every unbuffered instruction in the template one unicode string is yielded. If buffering is enabled with a buffer size of 5, five items are combined into a new unicode string. This is mainly useful if you are streaming big templates to a client via WSGI which flushes after each iteration. """ def __init__(self, gen): self._gen = gen self.disable_buffering() def dump(self, fp, encoding=None, errors='strict'): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specifiy an `encoding`. Example usage:: Template('Hello {{ name }}!').stream(name='foo').dump('hello.html') """ close = False if isinstance(fp, basestring): fp = file(fp, 'w') close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self if hasattr(fp, 'writelines'): fp.writelines(iterable) else: for item in iterable: fp.write(item) finally: if close: fp.close() def disable_buffering(self): """Disable the output buffering.""" self._next = self._gen.next self.buffered = False def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: raise ValueError('buffer size too small') def generator(next): buf = [] c_size = 0 push = buf.append while 1: try: while c_size < size: c = next() push(c) if c: c_size += 1 except StopIteration: if not c_size: return yield concat(buf) del buf[:] c_size = 0 self.buffered = True self._next = generator(self._gen.next).next def __iter__(self): return self def next(self): return self._next() # hook in default template class. if anyone reads this comment: ignore that # it's possible to use custom templates ;-) Environment.template_class = Template
bsd-3-clause
andir/ganeti
test/py/ganeti.server.rapi_unittest.py
2
10157
#!/usr/bin/python # # Copyright (C) 2012 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Script for testing ganeti.server.rapi""" import re import unittest import random import mimetools import base64 from cStringIO import StringIO from ganeti import constants from ganeti import utils from ganeti import compat from ganeti import errors from ganeti import serializer from ganeti import rapi from ganeti import http from ganeti import objects from ganeti.rapi.auth.basic_auth import BasicAuthenticator from ganeti.rapi.auth import users_file import ganeti.rapi.testutils import ganeti.rapi.rlib2 import ganeti.http.auth import testutils class TestRemoteApiHandler(unittest.TestCase): @staticmethod def _LookupWrongUser(_): return None def _Test(self, method, path, headers, reqbody, user_fn=NotImplemented, luxi_client=NotImplemented, reqauth=False): rm = rapi.testutils._RapiMock(BasicAuthenticator(user_fn), luxi_client, reqauth=reqauth) (resp_code, resp_headers, resp_body) = \ rm.FetchResponse(path, method, http.ParseHeaders(StringIO(headers)), reqbody) self.assertTrue(resp_headers[http.HTTP_DATE]) self.assertEqual(resp_headers[http.HTTP_CONNECTION], "close") self.assertEqual(resp_headers[http.HTTP_CONTENT_TYPE], http.HTTP_APP_JSON) self.assertEqual(resp_headers[http.HTTP_SERVER], http.HTTP_GANETI_VERSION) return (resp_code, resp_headers, serializer.LoadJson(resp_body)) def testRoot(self): (code, _, data) = self._Test(http.HTTP_GET, "/", "", None) self.assertEqual(code, http.HTTP_OK) self.assertTrue(data is None) def testRootReqAuth(self): (code, _, _) = self._Test(http.HTTP_GET, "/", "", None, reqauth=True) self.assertEqual(code, http.HttpUnauthorized.code) def testVersion(self): (code, _, data) = self._Test(http.HTTP_GET, "/version", "", None) self.assertEqual(code, http.HTTP_OK) self.assertEqual(data, constants.RAPI_VERSION) def testSlashTwo(self): (code, _, data) = self._Test(http.HTTP_GET, "/2", "", None) self.assertEqual(code, http.HTTP_OK) self.assertTrue(data is None) def testFeatures(self): (code, _, data) = self._Test(http.HTTP_GET, "/2/features", "", None) self.assertEqual(code, http.HTTP_OK) self.assertEqual(set(data), set(rapi.rlib2.ALL_FEATURES)) def testPutInstances(self): (code, _, data) = self._Test(http.HTTP_PUT, "/2/instances", "", None) self.assertEqual(code, http.HttpNotImplemented.code) self.assertTrue(data["message"].startswith("Method PUT is unsupported")) def testPostInstancesNoAuth(self): (code, _, _) = self._Test(http.HTTP_POST, "/2/instances", "", None) self.assertEqual(code, http.HttpUnauthorized.code) def testRequestWithUnsupportedMediaType(self): for fn in [lambda s: s, lambda s: s.upper(), lambda s: s.title()]: headers = rapi.testutils._FormatHeaders([ "%s: %s" % (http.HTTP_CONTENT_TYPE, fn("un/supported/media/type")), ]) (code, _, data) = self._Test(http.HTTP_GET, "/", headers, "body") self.assertEqual(code, http.HttpUnsupportedMediaType.code) self.assertEqual(data["message"], "Unsupported Media Type") def testRequestWithInvalidJsonData(self): body = "_this/is/no'valid.json" self.assertRaises(Exception, serializer.LoadJson, body) headers = rapi.testutils._FormatHeaders([ "%s: %s" % (http.HTTP_CONTENT_TYPE, http.HTTP_APP_JSON), ]) (code, _, data) = self._Test(http.HTTP_GET, "/", headers, body) self.assertEqual(code, http.HttpBadRequest.code) self.assertEqual(data["message"], "Unable to parse JSON data") def testUnsupportedAuthScheme(self): headers = rapi.testutils._FormatHeaders([ "%s: %s" % (http.HTTP_AUTHORIZATION, "Unsupported scheme"), ]) (code, _, _) = self._Test(http.HTTP_POST, "/2/instances", headers, "") self.assertEqual(code, http.HttpUnauthorized.code) def testIncompleteBasicAuth(self): headers = rapi.testutils._FormatHeaders([ "%s: Basic" % http.HTTP_AUTHORIZATION, ]) (code, _, data) = self._Test(http.HTTP_POST, "/2/instances", headers, "") self.assertEqual(code, http.HttpBadRequest.code) self.assertEqual(data["message"], "Basic authentication requires credentials") def testInvalidBasicAuth(self): for auth in ["!invalid=base!64.", base64.b64encode(" "), base64.b64encode("missingcolonchar")]: headers = rapi.testutils._FormatHeaders([ "%s: Basic %s" % (http.HTTP_AUTHORIZATION, auth), ]) (code, _, data) = self._Test(http.HTTP_POST, "/2/instances", headers, "") self.assertEqual(code, http.HttpBadRequest.code) @staticmethod def _MakeAuthHeaders(username, password, correct_password): if correct_password: pw = password else: pw = "wrongpass" return rapi.testutils._FormatHeaders([ "%s: Basic %s" % (http.HTTP_AUTHORIZATION, base64.b64encode("%s:%s" % (username, pw))), "%s: %s" % (http.HTTP_CONTENT_TYPE, http.HTTP_APP_JSON), ]) def testQueryAuth(self): username = "admin" password = "2046920054" header_fn = compat.partial(self._MakeAuthHeaders, username, password) def _LookupUserNoWrite(name): if name == username: return users_file.PasswordFileUser(name, password, []) else: return None for access in [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]: def _LookupUserWithWrite(name): if name == username: return users_file.PasswordFileUser(name, password, [access]) else: return None for qr in constants.QR_VIA_RAPI: # The /2/query resource has somewhat special rules for authentication as # it can be used to retrieve critical information path = "/2/query/%s" % qr for method in rapi.baserlib._SUPPORTED_METHODS: # No authorization (code, _, _) = self._Test(method, path, "", "") if method in (http.HTTP_DELETE, http.HTTP_POST): self.assertEqual(code, http.HttpNotImplemented.code) continue self.assertEqual(code, http.HttpUnauthorized.code) # Incorrect user (code, _, _) = self._Test(method, path, header_fn(True), "", user_fn=self._LookupWrongUser) self.assertEqual(code, http.HttpUnauthorized.code) # User has no write access, but the password is correct (code, _, _) = self._Test(method, path, header_fn(True), "", user_fn=_LookupUserNoWrite) self.assertEqual(code, http.HttpForbidden.code) # Wrong password and no write access (code, _, _) = self._Test(method, path, header_fn(False), "", user_fn=_LookupUserNoWrite) self.assertEqual(code, http.HttpUnauthorized.code) # Wrong password with write access (code, _, _) = self._Test(method, path, header_fn(False), "", user_fn=_LookupUserWithWrite) self.assertEqual(code, http.HttpUnauthorized.code) # Prepare request information if method == http.HTTP_PUT: reqpath = path body = serializer.DumpJson({ "fields": ["name"], }) elif method == http.HTTP_GET: reqpath = "%s?fields=name" % path body = "" else: self.fail("Unknown method '%s'" % method) # User has write access, password is correct (code, _, data) = self._Test(method, reqpath, header_fn(True), body, user_fn=_LookupUserWithWrite, luxi_client=_FakeLuxiClientForQuery) self.assertEqual(code, http.HTTP_OK) self.assertTrue(objects.QueryResponse.FromDict(data)) def testConsole(self): path = "/2/instances/inst1.example.com/console" for method in rapi.baserlib._SUPPORTED_METHODS: for reqauth in [False, True]: if method == http.HTTP_GET and not reqauth: # we don't have a mock client to test this case continue # No authorization (code, _, _) = self._Test(method, path, "", "", user_fn=lambda _ : None, reqauth=reqauth) if method == http.HTTP_GET and reqauth: self.assertEqual(code, http.HttpUnauthorized.code) else: self.assertEqual(code, http.HttpNotImplemented.code) class _FakeLuxiClientForQuery: def __init__(self, *args, **kwargs): pass def Query(self, *args): return objects.QueryResponse(fields=[]) if __name__ == "__main__": testutils.GanetiTestProgram()
bsd-2-clause
XisoDev/xisoPos
node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py
1569
23354
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions to perform Xcode-style build steps. These functions are executed via gyp-mac-tool when using the Makefile generator. """ import fcntl import fnmatch import glob import json import os import plistlib import re import shutil import string import subprocess import sys import tempfile def main(args): executor = MacTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class MacTool(object): """This class performs all the Mac tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like copy-info-plist to CopyInfoPlist""" return name_string.title().replace('-', '') def ExecCopyBundleResource(self, source, dest, convert_to_binary): """Copies a resource file to the bundle/Resources directory, performing any necessary compilation on each resource.""" extension = os.path.splitext(source)[1].lower() if os.path.isdir(source): # Copy tree. # TODO(thakis): This copies file attributes like mtime, while the # single-file branch below doesn't. This should probably be changed to # be consistent with the single-file branch. if os.path.exists(dest): shutil.rmtree(dest) shutil.copytree(source, dest) elif extension == '.xib': return self._CopyXIBFile(source, dest) elif extension == '.storyboard': return self._CopyXIBFile(source, dest) elif extension == '.strings': self._CopyStringsFile(source, dest, convert_to_binary) else: shutil.copy(source, dest) def _CopyXIBFile(self, source, dest): """Compiles a XIB file with ibtool into a binary plist in the bundle.""" # ibtool sometimes crashes with relative paths. See crbug.com/314728. base = os.path.dirname(os.path.realpath(__file__)) if os.path.relpath(source): source = os.path.join(base, source) if os.path.relpath(dest): dest = os.path.join(base, dest) args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices', '--output-format', 'human-readable-text', '--compile', dest, source] ibtool_section_re = re.compile(r'/\*.*\*/') ibtool_re = re.compile(r'.*note:.*is clipping its content') ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE) current_section_header = None for line in ibtoolout.stdout: if ibtool_section_re.match(line): current_section_header = line elif not ibtool_re.match(line): if current_section_header: sys.stdout.write(current_section_header) current_section_header = None sys.stdout.write(line) return ibtoolout.returncode def _ConvertToBinary(self, dest): subprocess.check_call([ 'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest]) def _CopyStringsFile(self, source, dest, convert_to_binary): """Copies a .strings file using iconv to reconvert the input into UTF-16.""" input_code = self._DetectInputEncoding(source) or "UTF-8" # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing # semicolon in dictionary. # on invalid files. Do the same kind of validation. import CoreFoundation s = open(source, 'rb').read() d = CoreFoundation.CFDataCreate(None, s, len(s)) _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None) if error: return fp = open(dest, 'wb') fp.write(s.decode(input_code).encode('UTF-16')) fp.close() if convert_to_binary == 'True': self._ConvertToBinary(dest) def _DetectInputEncoding(self, file_name): """Reads the first few bytes from file_name and tries to guess the text encoding. Returns None as a guess if it can't detect it.""" fp = open(file_name, 'rb') try: header = fp.read(3) except e: fp.close() return None fp.close() if header.startswith("\xFE\xFF"): return "UTF-16" elif header.startswith("\xFF\xFE"): return "UTF-16" elif header.startswith("\xEF\xBB\xBF"): return "UTF-8" else: return None def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys): """Copies the |source| Info.plist to the destination directory |dest|.""" # Read the source Info.plist into memory. fd = open(source, 'r') lines = fd.read() fd.close() # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild). plist = plistlib.readPlistFromString(lines) if keys: plist = dict(plist.items() + json.loads(keys[0]).items()) lines = plistlib.writePlistToString(plist) # Go through all the environment variables and replace them as variables in # the file. IDENT_RE = re.compile(r'[/\s]') for key in os.environ: if key.startswith('_'): continue evar = '${%s}' % key evalue = os.environ[key] lines = string.replace(lines, evar, evalue) # Xcode supports various suffices on environment variables, which are # all undocumented. :rfc1034identifier is used in the standard project # template these days, and :identifier was used earlier. They are used to # convert non-url characters into things that look like valid urls -- # except that the replacement character for :identifier, '_' isn't valid # in a URL either -- oops, hence :rfc1034identifier was born. evar = '${%s:identifier}' % key evalue = IDENT_RE.sub('_', os.environ[key]) lines = string.replace(lines, evar, evalue) evar = '${%s:rfc1034identifier}' % key evalue = IDENT_RE.sub('-', os.environ[key]) lines = string.replace(lines, evar, evalue) # Remove any keys with values that haven't been replaced. lines = lines.split('\n') for i in range(len(lines)): if lines[i].strip().startswith("<string>${"): lines[i] = None lines[i - 1] = None lines = '\n'.join(filter(lambda x: x is not None, lines)) # Write out the file with variables replaced. fd = open(dest, 'w') fd.write(lines) fd.close() # Now write out PkgInfo file now that the Info.plist file has been # "compiled". self._WritePkgInfo(dest) if convert_to_binary == 'True': self._ConvertToBinary(dest) def _WritePkgInfo(self, info_plist): """This writes the PkgInfo file from the data stored in Info.plist.""" plist = plistlib.readPlist(info_plist) if not plist: return # Only create PkgInfo for executable types. package_type = plist['CFBundlePackageType'] if package_type != 'APPL': return # The format of PkgInfo is eight characters, representing the bundle type # and bundle signature, each four characters. If that is missing, four # '?' characters are used instead. signature_code = plist.get('CFBundleSignature', '????') if len(signature_code) != 4: # Wrong length resets everything, too. signature_code = '?' * 4 dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo') fp = open(dest, 'w') fp.write('%s%s' % (package_type, signature_code)) fp.close() def ExecFlock(self, lockfile, *cmd_list): """Emulates the most basic behavior of Linux's flock(1).""" # Rely on exception handling to report errors. fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666) fcntl.flock(fd, fcntl.LOCK_EX) return subprocess.call(cmd_list) def ExecFilterLibtool(self, *cmd_list): """Calls libtool and filters out '/path/to/libtool: file: foo.o has no symbols'.""" libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$') libtool_re5 = re.compile( r'^.*libtool: warning for library: ' + r'.* the table of contents is empty ' + r'\(no object file members in the library define global symbols\)$') env = os.environ.copy() # Ref: # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c # The problem with this flag is that it resets the file mtime on the file to # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. env['ZERO_AR_DATE'] = '1' libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) _, err = libtoolout.communicate() for line in err.splitlines(): if not libtool_re.match(line) and not libtool_re5.match(line): print >>sys.stderr, line # Unconditionally touch the output .a file on the command line if present # and the command succeeded. A bit hacky. if not libtoolout.returncode: for i in range(len(cmd_list) - 1): if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'): os.utime(cmd_list[i+1], None) break return libtoolout.returncode def ExecPackageFramework(self, framework, version): """Takes a path to Something.framework and the Current version of that and sets up all the symlinks.""" # Find the name of the binary based on the part before the ".framework". binary = os.path.basename(framework).split('.')[0] CURRENT = 'Current' RESOURCES = 'Resources' VERSIONS = 'Versions' if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)): # Binary-less frameworks don't seem to contain symlinks (see e.g. # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle). return # Move into the framework directory to set the symlinks correctly. pwd = os.getcwd() os.chdir(framework) # Set up the Current version. self._Relink(version, os.path.join(VERSIONS, CURRENT)) # Set up the root symlinks. self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary) self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES) # Back to where we were before! os.chdir(pwd) def _Relink(self, dest, link): """Creates a symlink to |dest| named |link|. If |link| already exists, it is overwritten.""" if os.path.lexists(link): os.remove(link) os.symlink(dest, link) def ExecCompileXcassets(self, keys, *inputs): """Compiles multiple .xcassets files into a single .car file. This invokes 'actool' to compile all the inputs .xcassets files. The |keys| arguments is a json-encoded dictionary of extra arguments to pass to 'actool' when the asset catalogs contains an application icon or a launch image. Note that 'actool' does not create the Assets.car file if the asset catalogs does not contains imageset. """ command_line = [ 'xcrun', 'actool', '--output-format', 'human-readable-text', '--compress-pngs', '--notices', '--warnings', '--errors', ] is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ if is_iphone_target: platform = os.environ['CONFIGURATION'].split('-')[-1] if platform not in ('iphoneos', 'iphonesimulator'): platform = 'iphonesimulator' command_line.extend([ '--platform', platform, '--target-device', 'iphone', '--target-device', 'ipad', '--minimum-deployment-target', os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']), ]) else: command_line.extend([ '--platform', 'macosx', '--target-device', 'mac', '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']), ]) if keys: keys = json.loads(keys) for key, value in keys.iteritems(): arg_name = '--' + key if isinstance(value, bool): if value: command_line.append(arg_name) elif isinstance(value, list): for v in value: command_line.append(arg_name) command_line.append(str(v)) else: command_line.append(arg_name) command_line.append(str(value)) # Note: actool crashes if inputs path are relative, so use os.path.abspath # to get absolute path name for inputs. command_line.extend(map(os.path.abspath, inputs)) subprocess.check_call(command_line) def ExecMergeInfoPlist(self, output, *inputs): """Merge multiple .plist files into a single .plist file.""" merged_plist = {} for path in inputs: plist = self._LoadPlistMaybeBinary(path) self._MergePlist(merged_plist, plist) plistlib.writePlist(merged_plist, output) def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning): """Code sign a bundle. This function tries to code sign an iOS bundle, following the same algorithm as Xcode: 1. copy ResourceRules.plist from the user or the SDK into the bundle, 2. pick the provisioning profile that best match the bundle identifier, and copy it into the bundle as embedded.mobileprovision, 3. copy Entitlements.plist from user or SDK next to the bundle, 4. code sign the bundle. """ resource_rules_path = self._InstallResourceRules(resource_rules) substitutions, overrides = self._InstallProvisioningProfile( provisioning, self._GetCFBundleIdentifier()) entitlements_path = self._InstallEntitlements( entitlements, substitutions, overrides) subprocess.check_call([ 'codesign', '--force', '--sign', key, '--resource-rules', resource_rules_path, '--entitlements', entitlements_path, os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['FULL_PRODUCT_NAME'])]) def _InstallResourceRules(self, resource_rules): """Installs ResourceRules.plist from user or SDK into the bundle. Args: resource_rules: string, optional, path to the ResourceRules.plist file to use, default to "${SDKROOT}/ResourceRules.plist" Returns: Path to the copy of ResourceRules.plist into the bundle. """ source_path = resource_rules target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'ResourceRules.plist') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'ResourceRules.plist') shutil.copy2(source_path, target_path) return target_path def _InstallProvisioningProfile(self, profile, bundle_identifier): """Installs embedded.mobileprovision into the bundle. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple containing two dictionary: variables substitutions and values to overrides when generating the entitlements file. """ source_path, provisioning_data, team_id = self._FindProvisioningProfile( profile, bundle_identifier) target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'embedded.mobileprovision') shutil.copy2(source_path, target_path) substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.') return substitutions, provisioning_data['Entitlements'] def _FindProvisioningProfile(self, profile, bundle_identifier): """Finds the .mobileprovision file to use for signing the bundle. Checks all the installed provisioning profiles (or if the user specified the PROVISIONING_PROFILE variable, only consult it) and select the most specific that correspond to the bundle identifier. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple of the path to the selected provisioning profile, the data of the embedded plist in the provisioning profile and the team identifier to use for code signing. Raises: SystemExit: if no .mobileprovision can be used to sign the bundle. """ profiles_dir = os.path.join( os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') if not os.path.isdir(profiles_dir): print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) provisioning_profiles = None if profile: profile_path = os.path.join(profiles_dir, profile + '.mobileprovision') if os.path.exists(profile_path): provisioning_profiles = [profile_path] if not provisioning_profiles: provisioning_profiles = glob.glob( os.path.join(profiles_dir, '*.mobileprovision')) valid_provisioning_profiles = {} for profile_path in provisioning_profiles: profile_data = self._LoadProvisioningProfile(profile_path) app_id_pattern = profile_data.get( 'Entitlements', {}).get('application-identifier', '') for team_identifier in profile_data.get('TeamIdentifier', []): app_id = '%s.%s' % (team_identifier, bundle_identifier) if fnmatch.fnmatch(app_id, app_id_pattern): valid_provisioning_profiles[app_id_pattern] = ( profile_path, profile_data, team_identifier) if not valid_provisioning_profiles: print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) # If the user has multiple provisioning profiles installed that can be # used for ${bundle_identifier}, pick the most specific one (ie. the # provisioning profile whose pattern is the longest). selected_key = max(valid_provisioning_profiles, key=lambda v: len(v)) return valid_provisioning_profiles[selected_key] def _LoadProvisioningProfile(self, profile_path): """Extracts the plist embedded in a provisioning profile. Args: profile_path: string, path to the .mobileprovision file Returns: Content of the plist embedded in the provisioning profile as a dictionary. """ with tempfile.NamedTemporaryFile() as temp: subprocess.check_call([ 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name]) return self._LoadPlistMaybeBinary(temp.name) def _MergePlist(self, merged_plist, plist): """Merge |plist| into |merged_plist|.""" for key, value in plist.iteritems(): if isinstance(value, dict): merged_value = merged_plist.get(key, {}) if isinstance(merged_value, dict): self._MergePlist(merged_value, value) merged_plist[key] = merged_value else: merged_plist[key] = value else: merged_plist[key] = value def _LoadPlistMaybeBinary(self, plist_path): """Loads into a memory a plist possibly encoded in binary format. This is a wrapper around plistlib.readPlist that tries to convert the plist to the XML format if it can't be parsed (assuming that it is in the binary format). Args: plist_path: string, path to a plist file, in XML or binary format Returns: Content of the plist as a dictionary. """ try: # First, try to read the file using plistlib that only supports XML, # and if an exception is raised, convert a temporary copy to XML and # load that copy. return plistlib.readPlist(plist_path) except: pass with tempfile.NamedTemporaryFile() as temp: shutil.copy2(plist_path, temp.name) subprocess.check_call(['plutil', '-convert', 'xml1', temp.name]) return plistlib.readPlist(temp.name) def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix): """Constructs a dictionary of variable substitutions for Entitlements.plist. Args: bundle_identifier: string, value of CFBundleIdentifier from Info.plist app_identifier_prefix: string, value for AppIdentifierPrefix Returns: Dictionary of substitutions to apply when generating Entitlements.plist. """ return { 'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': app_identifier_prefix, } def _GetCFBundleIdentifier(self): """Extracts CFBundleIdentifier value from Info.plist in the bundle. Returns: Value of CFBundleIdentifier in the Info.plist located in the bundle. """ info_plist_path = os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['INFOPLIST_PATH']) info_plist_data = self._LoadPlistMaybeBinary(info_plist_path) return info_plist_data['CFBundleIdentifier'] def _InstallEntitlements(self, entitlements, substitutions, overrides): """Generates and install the ${BundleName}.xcent entitlements file. Expands variables "$(variable)" pattern in the source entitlements file, add extra entitlements defined in the .mobileprovision file and the copy the generated plist to "${BundlePath}.xcent". Args: entitlements: string, optional, path to the Entitlements.plist template to use, defaults to "${SDKROOT}/Entitlements.plist" substitutions: dictionary, variable substitutions overrides: dictionary, values to add to the entitlements Returns: Path to the generated entitlements file. """ source_path = entitlements target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['PRODUCT_NAME'] + '.xcent') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'Entitlements.plist') shutil.copy2(source_path, target_path) data = self._LoadPlistMaybeBinary(target_path) data = self._ExpandVariables(data, substitutions) if overrides: for key in overrides: if key not in data: data[key] = overrides[key] plistlib.writePlist(data, target_path) return target_path def _ExpandVariables(self, data, substitutions): """Expands variables "$(variable)" in data. Args: data: object, can be either string, list or dictionary substitutions: dictionary, variable substitutions to perform Returns: Copy of data where each references to "$(variable)" has been replaced by the corresponding value found in substitutions, or left intact if the key was not found. """ if isinstance(data, str): for key, value in substitutions.iteritems(): data = data.replace('$(%s)' % key, value) return data if isinstance(data, list): return [self._ExpandVariables(v, substitutions) for v in data] if isinstance(data, dict): return {k: self._ExpandVariables(data[k], substitutions) for k in data} return data if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
gpl-3.0
alxgu/ansible
lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py
26
21404
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: clc_firewall_policy short_description: Create/delete/update firewall policies description: - Create or delete or update firewall polices on Centurylink Cloud version_added: "2.0" options: location: description: - Target datacenter for the firewall policy required: True state: description: - Whether to create or delete the firewall policy default: present choices: ['present', 'absent'] source: description: - The list of source addresses for traffic on the originating firewall. This is required when state is 'present' destination: description: - The list of destination addresses for traffic on the terminating firewall. This is required when state is 'present' ports: description: - The list of ports associated with the policy. TCP and UDP can take in single ports or port ranges. choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'] firewall_policy_id: description: - Id of the firewall policy. This is required to update or delete an existing firewall policy source_account_alias: description: - CLC alias for the source account required: True destination_account_alias: description: - CLC alias for the destination account wait: description: - Whether to wait for the provisioning tasks to finish before returning. type: bool default: 'yes' enabled: description: - Whether the firewall policy is enabled or disabled choices: [True, False] default: 'yes' requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' --- - name: Create Firewall Policy hosts: localhost gather_facts: False connection: local tasks: - name: Create / Verify an Firewall Policy at CenturyLink Cloud clc_firewall: source_account_alias: WFAD location: VA1 state: present source: 10.128.216.0/24 destination: 10.128.216.0/24 ports: Any destination_account_alias: WFAD --- - name: Delete Firewall Policy hosts: localhost gather_facts: False connection: local tasks: - name: Delete an Firewall Policy at CenturyLink Cloud clc_firewall: source_account_alias: WFAD location: VA1 state: absent firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 ''' RETURN = ''' firewall_policy_id: description: The fire wall policy id returned: success type: str sample: fc36f1bfd47242e488a9c44346438c05 firewall_policy: description: The fire wall policy information returned: success type: dict sample: { "destination":[ "10.1.1.0/24", "10.2.2.0/24" ], "destinationAccount":"wfad", "enabled":true, "id":"fc36f1bfd47242e488a9c44346438c05", "links":[ { "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", "rel":"self", "verbs":[ "GET", "PUT", "DELETE" ] } ], "ports":[ "any" ], "source":[ "10.1.1.0/24", "10.2.2.0/24" ], "status":"active" } ''' __version__ = '${version}' import os import traceback from ansible.module_utils.six.moves.urllib.parse import urlparse from time import sleep from distutils.version import LooseVersion REQUESTS_IMP_ERR = None try: import requests except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() REQUESTS_FOUND = False else: REQUESTS_FOUND = True CLC_IMP_ERR = None try: import clc as clc_sdk from clc import APIFailedResponse except ImportError: CLC_IMP_ERR = traceback.format_exc() CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib class ClcFirewallPolicy: clc = None def __init__(self, module): """ Construct module """ self.clc = clc_sdk self.module = module self.firewall_dict = {} if not CLC_FOUND: self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) if not REQUESTS_FOUND: self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) if requests.__version__ and LooseVersion( requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) @staticmethod def _define_module_argument_spec(): """ Define the argument spec for the ansible module :return: argument spec dictionary """ argument_spec = dict( location=dict(required=True), source_account_alias=dict(required=True, default=None), destination_account_alias=dict(default=None), firewall_policy_id=dict(default=None), ports=dict(default=None, type='list'), source=dict(default=None, type='list'), destination=dict(default=None, type='list'), wait=dict(default=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(default=True, choices=[True, False]) ) return argument_spec def process_request(self): """ Execute the main code path, and handle the request :return: none """ changed = False firewall_policy = None location = self.module.params.get('location') source_account_alias = self.module.params.get('source_account_alias') destination_account_alias = self.module.params.get( 'destination_account_alias') firewall_policy_id = self.module.params.get('firewall_policy_id') ports = self.module.params.get('ports') source = self.module.params.get('source') destination = self.module.params.get('destination') wait = self.module.params.get('wait') state = self.module.params.get('state') enabled = self.module.params.get('enabled') self.firewall_dict = { 'location': location, 'source_account_alias': source_account_alias, 'destination_account_alias': destination_account_alias, 'firewall_policy_id': firewall_policy_id, 'ports': ports, 'source': source, 'destination': destination, 'wait': wait, 'state': state, 'enabled': enabled} self._set_clc_credentials_from_env() if state == 'absent': changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( source_account_alias, location, self.firewall_dict) elif state == 'present': changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( source_account_alias, location, self.firewall_dict) return self.module.exit_json( changed=changed, firewall_policy_id=firewall_policy_id, firewall_policy=firewall_policy) @staticmethod def _get_policy_id_from_response(response): """ Method to parse out the policy id from creation response :param response: response from firewall creation API call :return: policy_id: firewall policy id from creation call """ url = response.get('links')[0]['href'] path = urlparse(url).path path_list = os.path.split(path) policy_id = path_list[-1] return policy_id def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") def _ensure_firewall_policy_is_present( self, source_account_alias, location, firewall_dict): """ Ensures that a given firewall policy is present :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: dictionary of request parameters for firewall policy :return: (changed, firewall_policy_id, firewall_policy) changed: flag for if a change occurred firewall_policy_id: the firewall policy id that was created/updated firewall_policy: The firewall_policy object """ firewall_policy = None firewall_policy_id = firewall_dict.get('firewall_policy_id') if firewall_policy_id is None: if not self.module.check_mode: response = self._create_firewall_policy( source_account_alias, location, firewall_dict) firewall_policy_id = self._get_policy_id_from_response( response) changed = True else: firewall_policy = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) if not firewall_policy: return self.module.fail_json( msg='Unable to find the firewall policy id : {0}'.format( firewall_policy_id)) changed = self._compare_get_request_with_dict( firewall_policy, firewall_dict) if not self.module.check_mode and changed: self._update_firewall_policy( source_account_alias, location, firewall_policy_id, firewall_dict) if changed and firewall_policy_id: firewall_policy = self._wait_for_requests_to_complete( source_account_alias, location, firewall_policy_id) return changed, firewall_policy_id, firewall_policy def _ensure_firewall_policy_is_absent( self, source_account_alias, location, firewall_dict): """ Ensures that a given firewall policy is removed if present :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: firewall policy to delete :return: (changed, firewall_policy_id, response) changed: flag for if a change occurred firewall_policy_id: the firewall policy id that was deleted response: response from CLC API call """ changed = False response = [] firewall_policy_id = firewall_dict.get('firewall_policy_id') result = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) if result: if not self.module.check_mode: response = self._delete_firewall_policy( source_account_alias, location, firewall_policy_id) changed = True return changed, firewall_policy_id, response def _create_firewall_policy( self, source_account_alias, location, firewall_dict): """ Creates the firewall policy for the given account alias :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: dictionary of request parameters for firewall policy :return: response from CLC API call """ payload = { 'destinationAccount': firewall_dict.get('destination_account_alias'), 'source': firewall_dict.get('source'), 'destination': firewall_dict.get('destination'), 'ports': firewall_dict.get('ports')} try: response = self.clc.v2.API.Call( 'POST', '/v2-experimental/firewallPolicies/%s/%s' % (source_account_alias, location), payload) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to create firewall policy. %s" % str(e.response_text)) return response def _delete_firewall_policy( self, source_account_alias, location, firewall_policy_id): """ Deletes a given firewall policy for an account alias in a datacenter :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: firewall policy id to delete :return: response: response from CLC API call """ try: response = self.clc.v2.API.Call( 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id)) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to delete the firewall policy id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response def _update_firewall_policy( self, source_account_alias, location, firewall_policy_id, firewall_dict): """ Updates a firewall policy for a given datacenter and account alias :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: firewall policy id to update :param firewall_dict: dictionary of request parameters for firewall policy :return: response: response from CLC API call """ try: response = self.clc.v2.API.Call( 'PUT', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id), firewall_dict) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to update the firewall policy id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response @staticmethod def _compare_get_request_with_dict(response, firewall_dict): """ Helper method to compare the json response for getting the firewall policy with the request parameters :param response: response from the get method :param firewall_dict: dictionary of request parameters for firewall policy :return: changed: Boolean that returns true if there are differences between the response parameters and the playbook parameters """ changed = False response_dest_account_alias = response.get('destinationAccount') response_enabled = response.get('enabled') response_source = response.get('source') response_dest = response.get('destination') response_ports = response.get('ports') request_dest_account_alias = firewall_dict.get( 'destination_account_alias') request_enabled = firewall_dict.get('enabled') if request_enabled is None: request_enabled = True request_source = firewall_dict.get('source') request_dest = firewall_dict.get('destination') request_ports = firewall_dict.get('ports') if ( response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( response_enabled != request_enabled) or ( response_source and response_source != request_source) or ( response_dest and response_dest != request_dest) or ( response_ports and response_ports != request_ports): changed = True return changed def _get_firewall_policy( self, source_account_alias, location, firewall_policy_id): """ Get back details for a particular firewall policy :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: id of the firewall policy to get :return: response - The response from CLC API call """ response = None try: response = self.clc.v2.API.Call( 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id)) except APIFailedResponse as e: if e.response_status_code != 404: self.module.fail_json( msg="Unable to fetch the firewall policy with id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response def _wait_for_requests_to_complete( self, source_account_alias, location, firewall_policy_id, wait_limit=50): """ Waits until the CLC requests are complete if the wait argument is True :param source_account_alias: The source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: The firewall policy id :param wait_limit: The number of times to check the status for completion :return: the firewall_policy object """ wait = self.module.params.get('wait') count = 0 firewall_policy = None while wait: count += 1 firewall_policy = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) status = firewall_policy.get('status') if status == 'active' or count > wait_limit: wait = False else: # wait for 2 seconds sleep(2) return firewall_policy @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ The main function. Instantiates the module and calls process_request. :return: none """ module = AnsibleModule( argument_spec=ClcFirewallPolicy._define_module_argument_spec(), supports_check_mode=True) clc_firewall = ClcFirewallPolicy(module) clc_firewall.process_request() if __name__ == '__main__': main()
gpl-3.0
blackzw/openwrt_sdk_dev1
staging_dir/host/lib/python2.7/encodings/ptcp154.py
647
8950
""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py. Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. (c) Copyright 2000 Guido van Rossum. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_map) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_map)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='ptcp154', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER 0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE 0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON 0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE 0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x0085: 0x2026, # HORIZONTAL ELLIPSIS 0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER 0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U 0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER 0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U 0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA 0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON 0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER 0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER 0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA 0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE 0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER 0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK 0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x0095: 0x2022, # BULLET 0x0096: 0x2013, # EN DASH 0x0097: 0x2014, # EM DASH 0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER 0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER 0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA 0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON 0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER 0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER 0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA 0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE 0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian) 0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian) 0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE 0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O 0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER 0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE 0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO 0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA 0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON 0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE 0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE 0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I 0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I 0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER 0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O 0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO 0x00b9: 0x2116, # NUMERO SIGN 0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA 0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE 0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER 0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER 0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE 0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A 0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE 0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE 0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE 0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE 0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE 0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE 0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE 0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I 0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I 0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA 0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL 0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM 0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN 0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O 0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE 0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER 0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES 0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE 0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U 0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF 0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA 0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE 0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE 0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA 0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA 0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN 0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU 0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E 0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU 0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA 0x00e0: 0x0430, # CYRILLIC SMALL LETTER A 0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE 0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE 0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE 0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE 0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE 0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE 0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE 0x00e8: 0x0438, # CYRILLIC SMALL LETTER I 0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I 0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA 0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL 0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM 0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN 0x00ee: 0x043e, # CYRILLIC SMALL LETTER O 0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE 0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER 0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES 0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE 0x00f3: 0x0443, # CYRILLIC SMALL LETTER U 0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF 0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA 0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE 0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE 0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA 0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN 0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU 0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN 0x00fd: 0x044d, # CYRILLIC SMALL LETTER E 0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU 0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA }) ### Encoding Map encoding_map = codecs.make_encoding_map(decoding_map)
gpl-2.0
elishowk/django-poser
poser/appresolver.py
1
7288
# -*- coding: utf-8 -*- from poser.apphook_pool import apphook_pool from poser.utils.page_resolver import get_page_queryset from django.conf import settings from django.conf.urls.defaults import patterns from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.core.urlresolvers import RegexURLResolver, Resolver404, reverse, \ RegexURLPattern from django.utils.importlib import import_module APP_RESOLVERS = [] def clear_app_resolvers(): global APP_RESOLVERS APP_RESOLVERS = [] def applications_page_check(request, current_page=None, path=None): """Tries to find if given path was resolved over application. Applications have higher priority than other poser pages. """ if current_page: return current_page if path is None: # We should get in this branch only if an apphook is active on / # This removes the non-POSER part of the URL. path = request.path.replace(reverse('pages-root'), '', 1) # check if application resolver can resolve this for resolver in APP_RESOLVERS: try: page_id = resolver.resolve_page_id(path) # yes, it is application page page = get_page_queryset(request).get(id=page_id) # If current page was matched, then we have some override for content # from poser, but keep current page. Otherwise return page to which was application assigned. return page except Resolver404: # Raised if the page is not managed by an apphook pass return None class AppRegexURLResolver(RegexURLResolver): page_id = None url_patterns = None def resolve_page_id(self, path): """Resolves requested path similar way how resolve does, but instead of return callback,.. returns page_id to which was application assigned. """ tried = [] match = self.regex.search(path) if match: new_path = path[match.end():] for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404, e: if 'tried' in e.args[0]: tried.extend([(pattern.regex.pattern + ' ' + t) for t in e.args[0]['tried']]) elif 'path' in e.args[0]: tried.extend([(pattern.regex.pattern + ' ' + t) for t in e.args[0]['path']]) else: if sub_match: return pattern.page_id tried.append(pattern.regex.pattern) raise Resolver404, {'tried': tried, 'path': new_path} def recurse_patterns(path, pattern_list, page_id): """ Recurse over a list of to-be-hooked patterns for a given path prefix """ newpatterns = [] for pattern in pattern_list: app_pat = pattern.regex.pattern # make sure we don't get patterns that start with more than one '^'! if not app_pat.endswith('$'): return newpatterns app_pat = app_pat.lstrip('^') path = path.lstrip('^') regex = r'^%s%s' % (path, app_pat) if isinstance(pattern, RegexURLResolver): # this is an 'include', recurse! resolver = RegexURLResolver(regex, 'poser_appresolver', pattern.default_kwargs, pattern.app_name, pattern.namespace) resolver.page_id = page_id # see lines 243 and 236 of urlresolvers.py to understand the next line resolver._urlconf_module = recurse_patterns(regex, pattern.url_patterns, page_id) else: # Re-do the RegexURLPattern with the new regular expression resolver = RegexURLPattern(regex, pattern.callback, pattern.default_args, pattern.name) resolver.page_id = page_id newpatterns.append(resolver) return newpatterns def _flatten_patterns(patternstoflatten): flat = [] for pattern in patternstoflatten: if isinstance(pattern, RegexURLResolver): flat += _flatten_patterns(pattern.url_patterns) else: flat.append(pattern) return flat def get_app_urls(urls): for urlconf in urls: if isinstance(urlconf, basestring): mod = import_module(urlconf) if not hasattr(mod, 'urlpatterns'): raise ImproperlyConfigured( "URLConf `%s` has no urlpatterns attribute" % urlconf) yield getattr(mod, 'urlpatterns') else: yield urlconf def get_patterns_for_page(path, page): """ Resolve the urlconf module for a path+title combination Returns a list of url objects. """ app = apphook_pool.get_apphook(page.application) patterns_page = [] for pattern_list in get_app_urls(app.urls): if path and not path.endswith('/'): path += '/' page_id = page.id patterns_page += recurse_patterns(path, pattern_list, page_id) patterns_page = _flatten_patterns(patterns_page) return patterns_page def get_app_patterns(): """ Get a list of patterns for all hooked apps at start-up time. How this works: By looking through all pages with an app hook (application_urls) we find all urlconf modules we have to hook into. All 'normal' patterns from the urlconf get re-written by prefixing them with the title path and then included into the poser url patterns. """ from poser.models import Page try: current_site = Site.objects.get_current() except Site.DoesNotExist: current_site = None included = [] page_qs = Page.objects.filter(site=current_site) use_namespaces = False hooked_applications = [] # Loop over all titles with an application hooked to them for page in page_qs.exclude(application=None).exclude(application='').select_related(): path = page.path if use_namespaces: mixid = "%s:%s:%s" % (path + "/", page.application) else: mixid = "%s:%s" % (path + "/", page.application) if mixid in included: # don't add the same thing twice continue if not settings.APPEND_SLASH: path += '/' hooked_applications += get_patterns_for_page(path, page) included.append(mixid) # Build the app patterns to be included in the poser urlconfs app_patterns = [] if use_namespaces: for ns, currentpatterns in hooked_applications.items(): extra_patterns = patterns('', *currentpatterns) resolver = AppRegexURLResolver(r'', 'app_resolver', namespace=ns) resolver.url_patterns = extra_patterns app_patterns.append(resolver) APP_RESOLVERS.append(resolver) else: extra_patterns = patterns('', *hooked_applications) resolver = AppRegexURLResolver(r'', 'app_resolver') resolver.url_patterns = extra_patterns app_patterns.append(resolver) APP_RESOLVERS.append(resolver) return app_patterns
agpl-3.0
pbombnz/IRCBot
modules/quiz.py
1
11722
import os import json import time import re import random from modules import userDatabase class CaseInsensitiveDict(dict): def __contains__(self, key): return dict.__contains__(self, key.lower()) def __getitem__(self, key): return dict.__getitem__(self, key.lower()) def __setitem__(self, key, value): return dict.__setitem__(self, key.lower(), value) tmp_quiz_data = CaseInsensitiveDict() quiz_data = {'questions': list()} # quizData = { "questions" : [{ "question" : cmd[1], # "answers" : [ cmd[3] ], # "bestTime" : [ "" , 0.0 ], # "answeredCorrect" : 0, # "answeredIncorrect" : 0, # "answeredCorrectMessage" : "", # "showAnswers" : True, # # "timePeriod" : float(cmd[2]), # "creator" : sender[0], # "createdTime" : time.time() # } ]} def save_quiz_database(): file = open('./resources/quiz.dat', 'w') json.dump(quiz_data, file) file.close() def load_quiz_database(): global quiz_data file = open('./resources/quiz.dat', 'r') quiz_data = json.load(file) file.close() def on_init(irc): if os.path.isfile("./resources/quiz.dat"): load_quiz_database() else: save_quiz_database() def on_process_forever(bot): current_time = time.time() for channel in tmp_quiz_data: if tmp_quiz_data[channel.lower()]['isActive']: time_difference = current_time - tmp_quiz_data[channel]["startTime"] if time_difference >= tmp_quiz_data[channel]["timePeriod"]: if len(tmp_quiz_data[channel]["players"].keys()) > 1: bot.send_private_message(channel, '5Quiz automatically finished - No one got the right answer.') else: bot.send_private_message(channel, '5Quiz automatically finished - Time in the round ended.') save_quiz_database() del tmp_quiz_data[channel] return def on_channel_pm(irc, user_mask, user, channel, message): global tmp_quiz_data, quiz_data command = message.split() for chan in tmp_quiz_data: if message.lower() in tmp_quiz_data[chan]["wrongAnswers"] or message.lower() == tmp_quiz_data[chan]["rightAnswer"]: if user.lower() not in tmp_quiz_data[chan]['players']: tmp_quiz_data[chan]['players'][user.lower()] = 0 if tmp_quiz_data[chan]['players'][user.lower()] == 0: if message.lower() == tmp_quiz_data[chan]["rightAnswer"].lower(): real_time_secs = time.time() irc.userData[user.lower()]["quiz"]["correct"] += 1 userDatabase.save_user_database(irc) irc.send_private_message(channel, '3Congrats 1' + user + ', 3You have correctly answered the question.') quiz_id = tmp_quiz_data[channel.lower()]['id'] if round(real_time_secs - tmp_quiz_data[chan]["startTime"], 2) < \ quiz_data["questions"][quiz_id]["bestTime"][1]: time_dif = quiz_data["questions"][quiz_id]["bestTime"][1] - ( real_time_secs - tmp_quiz_data[chan]["startTime"]) quiz_data["questions"][quiz_id]["bestTime"][1] = round( real_time_secs - tmp_quiz_data[chan]["startTime"], 2) quiz_data["questions"][quiz_id]["bestTime"][0] = user irc.send_private_message(channel, user + '3 has just set the new best time of ' + str( quiz_data["questions"][quiz_id]["bestTime"][ 1]) + ' 3secs. ' + user + ' 3beat the old best time by ' + str( round(time_dif, 2)) + ' 3secs.') elif quiz_data["questions"][quiz_id]["bestTime"][0] == "": quiz_data["questions"][quiz_id]["bestTime"][1] = round( real_time_secs - tmp_quiz_data[chan]["startTime"], 2) quiz_data["questions"][quiz_id]["bestTime"][0] = user irc.send_private_message(channel, user + '3 has just set the new best time of ' + str( quiz_data["questions"][quiz_id]["bestTime"][1]) + ' 3secs.') quiz_data["questions"][quiz_id]["answeredCorrect"] += 1 save_quiz_database() del tmp_quiz_data[chan] else: quiz_id = tmp_quiz_data[channel.lower()]['id'] irc.send_private_message(channel, '5Sorry 1' + user + ', 5that is the wrong answer. You cannot attempt anymore for this round.') tmp_quiz_data[chan]["players"][user.lower()] += 1 quiz_data["questions"][quiz_id]["answeredIncorrect"] += 1 irc.user_info[user.lower()]["quiz"]["incorrect"] += 1 irc.userData[user.lower()]["quiz"]["participated"] += 1 userDatabase.save_user_database(irc) return if command[0].lower() == '!quizhelp': irc.send_private_message(channel, user + ', basically you get given a multi-choice question and your job is to carefully type in what you think is the right answer before the time runs out and before any other IRC users guess the right answer. You can only guess once, so double check that you are right. So what are you waiting for? start a !quiz.') elif command[0].lower() == '!quiz': if len(quiz_data['questions']) == 0: irc.send_private_message(channel, '5ERROR: No quiz questions in database.') return #if len(quiz_data['questions']) in range(0, 10): # irc.send_private_message(channel, # '5ERROR: There are only a few quiz questions in database. Until more are added, the quiz will be unavailable.') # return if channel in tmp_quiz_data: return random_quiz_id = random.randint(0, len(quiz_data['questions'])) # print quizQuestionID # print "creating tmp data" tmp_quiz_data[channel] = dict() tmp_quiz_data[channel]['isActive'] = False tmp_quiz_data[channel]["numOfPlayers"] = 0 tmp_quiz_data[channel]["players"] = {} tmp_quiz_data[channel]["timePeriod"] = float(quiz_data['questions'][random_quiz_id]["timePeriod"]) tmp_quiz_data[channel]["rightAnswer"] = quiz_data['questions'][random_quiz_id]['answers'][0].lower() tmp_quiz_data[channel]["wrongAnswers"] = [] for i in range(1, len(quiz_data['questions'][random_quiz_id]['answers'])): tmp_quiz_data[channel]["wrongAnswers"].append(quiz_data['questions'][random_quiz_id]['answers'][i].lower()) tmp_quiz_data[channel]["startTime"] = round(time.time(), 1) tmp_quiz_data[channel]['id'] = random_quiz_id # print "creating tmp data (part 2)" quiz_answers = quiz_data['questions'][random_quiz_id]['answers'] quiz_answers = sorted(quiz_answers, key=lambda k: random.random()) tmp_quiz_data[channel.lower()]['isActive'] = True irc.send_private_message(channel, '6Question: "' + str( quiz_data['questions'][random_quiz_id]["question"]) + '" 6Answers: ' + str(quiz_answers).strip( '[]') + '.') if quiz_data['questions'][random_quiz_id]["bestTime"][0] != "": irc.send_private_message(channel, '\u00036Best time set by\u0003 {0} \u00036in\u0003 {1} \u00036secs.'.format( str(quiz_data['questions'][random_quiz_id]["bestTime"][0]), str(quiz_data['questions'][random_quiz_id]["bestTime"][1]))) elif command[0].lower() == '!numberofquizquestions': irc.send_private_message(channel, "There are " + str(len(quiz_data['questions'])) + " questions in the Quiz database.") elif command[0].lower() == '!createquizquestion' or command[0].lower() == '!cqq': if irc.user_info[user.lower()]["access_level"] >= 1: question_re = re.compile("!c(?:(?:reate)?)q(?:(?:uiz)?)q(?:(?:uestion)?)\s(.*\?)\s([0-9]+[0-9]?)\s(.*)", re.IGNORECASE) match = question_re.match(message) if match: command_params = question_re.findall(message)[0] print(command_params) question = command_params[0] time_period = command_params[1] answer_str = command_params[2] if re.match('"([\w\s]*)"', answer_str): answers = re.findall('"([\w\s]*)"', answer_str) else: irc.send_private_message(channel, "USAGE: !c[reate]q[uiz]q[uestion] (Question)? " "(Question Time Period (in Secs)) \"(Correct Answer)\" " "\"(Wrong Answer)\" [\"(Wrong Answer)\" \"(...)\"]") irc.send_private_message(channel, "EXAMPLE: !cqq 1 + 1 = ? 2 1 3 4 5") return if int(time_period) < 5 or int(time_period) > 60: irc.send_private_message(channel, '5ERROR: The time period is not pratical. Set a more appropriate time period (between 5 - 60 seconds).') return for question_data in quiz_data['questions']: if question_data['question'].lower() == question.lower(): irc.send_private_message(channel, '5ERROR: The question has already been created.') return question_data = {"question": question, "answers": [], "bestTime": ["", 0.0], "answeredCorrect": 0, "answeredIncorrect": 0, "answeredCorrectMessage": "", "showAnswers": True, "timePeriod": float(time_period), "creator": user, "createdTime": time.time() } for i in range(0, len(answers)): question_data['answers'].append(answers[i]) quiz_data['questions'].append(question_data) save_quiz_database() irc.send_private_message(channel, '3SUCCESS: Quiz Question, "' + question + '" (ID: ' + str( len(quiz_data['questions']) - 1) + ') has been added into the quiz database.') else: irc.send_private_message(channel, "USAGE: !c[reate]q[uiz]q[uestion] (Question)? " "(Question Time Period (in Secs)) \"(Correct Answer)\" " "\"(Wrong Answer)\" [\"(Wrong Answer)\" \"(...)\"]") irc.send_private_message(channel, "EXAMPLE: !cqq 1 + 1 = ? 2 1 3 4 5")
gpl-2.0
technologiescollege/Blockly-rduino-communication
scripts_XP/Lib/multiprocessing/dummy/__init__.py
79
2881
# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] # # Imports # import threading import sys import weakref import array from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue # # # class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): assert self._parent is current_process() self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None # # # Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass # # # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def _get(self): return self._value def _set(self, value): self._value = value value = property(_get, _set) def __repr__(self): return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue
gpl-3.0
EvanK/ansible-modules-core
cloud/amazon/iam_cert.py
17
11879
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: iam_cert short_description: Manage server certificates for use on ELBs and CloudFront description: - Allows for the management of server certificates version_added: "2.0" options: name: description: - Name of certificate to add, update or remove. required: true aliases: [] new_name: description: - When present, this will update the name of the cert with the value passed here. required: false aliases: [] new_path: description: - When present, this will update the path of the cert with the value passed here. required: false aliases: [] state: description: - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified. required: true default: null choices: [ "present", "absent" ] aliases: [] path: description: - When creating or updating, specify the desired path of the certificate required: false default: "/" aliases: [] cert_chain: description: - The path to the CA certificate chain in PEM encoded format. required: false default: null aliases: [] cert: description: - The path to the certificate body in PEM encoded format. required: false aliases: [] key: description: - The path to the private key of the certificate in PEM encoded format. dup_ok: description: - By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as long as the name is unique. required: false default: False aliases: [] aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] requirements: [ "boto" ] author: Jonathan I. Davila extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic server certificate upload tasks: - name: Upload Certificate iam_cert: name: very_ssl state: present cert: somecert.pem key: privcertkey cert_chain: myverytrustedchain ''' import json import sys try: import boto import boto.iam import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): error = err.message else: error = '%s: %s' % (Exception, err) return error def cert_meta(iam, name): opath = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ path ocert = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ certificate_body ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ server_certificate_id upload_date = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ upload_date exp = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ expiration return opath, ocert, ocert_id, upload_date, exp def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): update=False if any(ct in orig_cert_names for ct in [name, new_name]): for i_name in [name, new_name]: if i_name is None: continue if cert is not None: try: c_index=orig_cert_names.index(i_name) except NameError: continue else: if orig_cert_bodies[c_index] == cert: update=True break elif orig_cert_bodies[c_index] != cert: module.fail_json(changed=False, msg='A cert with the name %s already exists and' ' has a different certificate body associated' ' with it. Certificates cannot have the same name' % i_name) else: update=True break elif cert in orig_cert_bodies and not dup_ok: for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): if crt_body == cert: module.fail_json(changed=False, msg='This certificate already' ' exists under the name %s' % crt_name) return update def cert_action(module, iam, name, cpath, new_name, new_path, state, cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok): if state == 'present': update = dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok) if update: opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) changed=True if new_name and new_path: iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) module.exit_json(changed=changed, original_name=name, new_name=new_name, original_path=opath, new_path=new_path, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif new_name and not new_path: iam.update_server_cert(name, new_cert_name=new_name) module.exit_json(changed=changed, original_name=name, new_name=new_name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif not new_name and new_path: iam.update_server_cert(name, new_path=new_path) module.exit_json(changed=changed, name=new_name, original_path=opath, new_path=new_path, cert_body=ocert, upload_date=upload_date, expiration_date=exp) else: changed=False module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp, msg='No new path or name specified. No changes made') else: changed=True iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath) opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif state == 'absent': if name in orig_cert_names: changed=True iam.delete_server_cert(name) module.exit_json(changed=changed, deleted_cert=name) else: changed=False module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict( default=None, required=True, choices=['present', 'absent']), name=dict(default=None, required=False), cert=dict(default=None, required=False, type='path'), key=dict(default=None, required=False, type='path'), cert_chain=dict(default=None, required=False, type='path'), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False), dup_ok=dict(default=False, required=False, type='bool') ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[], ) if not HAS_BOTO: module.fail_json(msg="Boto is required for this module") region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: if region: iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) state = module.params.get('state') name = module.params.get('name') path = module.params.get('path') new_name = module.params.get('new_name') new_path = module.params.get('new_path') cert_chain = module.params.get('cert_chain') dup_ok = module.params.get('dup_ok') if state == 'present': cert = open(module.params.get('cert'), 'r').read().rstrip() key = open(module.params.get('key'), 'r').read().rstrip() if cert_chain is not None: cert_chain = open(module.params.get('cert_chain'), 'r').read() else: key=cert=chain=None orig_certs = [ctb['server_certificate_name'] for ctb in \ iam.get_all_server_certs().\ list_server_certificates_result.\ server_certificate_metadata_list] orig_bodies = [iam.get_server_certificate(thing).\ get_server_certificate_result.\ certificate_body \ for thing in orig_certs] if new_name == name: new_name = None if new_path == path: new_path = None changed = False try: cert_action(module, iam, name, path, new_name, new_path, state, cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
josenavas/labman
labman/db/process.py
1
115578
# ---------------------------------------------------------------------------- # Copyright (c) 2017-, labman development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from datetime import date, datetime from io import StringIO from itertools import chain import re from json import dumps import numpy as np import pandas as pd from . import base from . import sql_connection from . import user as user_module from . import plate as plate_module from . import container as container_module from . import composition as composition_module from . import equipment as equipment_module from .study import Study class Process(base.LabmanObject): """Base process object Attributes ---------- id date personnel """ @staticmethod def factory(process_id): """Initializes the correct Process subclass Parameters ---------- process_id : int The process id Returns ------- An instance of a subclass of Process """ factory_classes = { # 'primer template creation': TODO, 'primer working plate creation': PrimerWorkingPlateCreationProcess, 'sample plating': SamplePlatingProcess, 'reagent creation': ReagentCreationProcess, 'gDNA extraction': GDNAExtractionProcess, '16S library prep': LibraryPrep16SProcess, 'shotgun library prep': LibraryPrepShotgunProcess, 'quantification': QuantificationProcess, 'gDNA normalization': NormalizationProcess, 'compress gDNA plates': GDNAPlateCompressionProcess, 'pooling': PoolingProcess, 'sequencing': SequencingProcess} with sql_connection.TRN as TRN: sql = """SELECT description FROM qiita.process_type JOIN qiita.process USING (process_type_id) WHERE process_id = %s""" TRN.add(sql, [process_id]) p_type = TRN.execute_fetchlast() constructor = factory_classes[p_type] if constructor._table == 'qiita.process': instance = constructor(process_id) else: sql = """SELECT {} FROM {} WHERE process_id = %s""".format( constructor._id_column, constructor._table) TRN.add(sql, [process_id]) subclass_id = TRN.execute_fetchlast() instance = constructor(subclass_id) return instance @classmethod def _common_creation_steps(cls, user, process_date=None): if process_date is None: process_date = date.today() with sql_connection.TRN as TRN: sql = """SELECT process_type_id FROM qiita.process_type WHERE description = %s""" TRN.add(sql, [cls._process_type]) pt_id = TRN.execute_fetchlast() sql = """INSERT INTO qiita.process (process_type_id, run_date, run_personnel_id) VALUES (%s, %s, %s) RETURNING process_id""" TRN.add(sql, [pt_id, process_date, user.id]) p_id = TRN.execute_fetchlast() return p_id def _get_process_attr(self, attr): """Returns the value of the given process attribute Parameters ---------- attr : str The attribute to retrieve Returns ------- Object The attribute """ with sql_connection.TRN as TRN: sql = """SELECT {} FROM qiita.process JOIN {} USING (process_id) WHERE {} = %s""".format(attr, self._table, self._id_column) TRN.add(sql, [self.id]) return TRN.execute_fetchlast() @property def date(self): return self._get_process_attr('run_date') @property def personnel(self): return user_module.User(self._get_process_attr('run_personnel_id')) @property def process_id(self): return self._get_process_attr('process_id') @property def plates(self): """The plates being extracted by this process Returns ------- plate : list of labman.db.Plate The extracted plates """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.container LEFT JOIN qiita.well USING (container_id) WHERE latest_upstream_process_id = %s ORDER BY plate_id""" TRN.add(sql, [self.process_id]) plate_ids = TRN.execute_fetchflatten() return [plate_module.Plate(plate_id) for plate_id in plate_ids] class _Process(Process): """Process object Not all processes have a specific subtable, so we need to override the date and personnel attributes Attributes ---------- id date personnel """ _table = 'qiita.process' _id_column = 'process_id' @property def date(self): return self._get_attr('run_date') @property def personnel(self): return user_module.User(self._get_attr('run_personnel_id')) @property def process_id(self): return self._get_attr('process_id') class SamplePlatingProcess(_Process): """Sample plating process""" _process_type = 'sample plating' @classmethod def create(cls, user, plate_config, plate_ext_id, volume=None): """Creates a new sample plating process Parameters ---------- user : labman.db.user.User User performing the plating plate_config : labman.db.PlateConfiguration The sample plate configuration plate_ext_id : str The external plate id volume : float, optional Starting well volume Returns ------- SamplePlatingProcess """ with sql_connection.TRN: volume = volume if volume else 0 # Add the row to the process table instance = cls(cls._common_creation_steps(user)) # Create the plate plate = plate_module.Plate.create(plate_ext_id, plate_config) # By definition, all well plates are blank at the beginning # so populate all the wells in the plate with BLANKS for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): well = container_module.Well.create( plate, instance, volume, i + 1, j + 1) composition_module.SampleComposition.create( instance, well, volume) return instance @property def plate(self): """The plate being plated by this process Returns ------- plate : labman.db.Plate The plate being plated """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.container LEFT JOIN qiita.well USING (container_id) LEFT JOIN qiita.plate USING (plate_id) WHERE latest_upstream_process_id = %s""" TRN.add(sql, [self.id]) plate_id = TRN.execute_fetchlast() return plate_module.Plate(plate_id) def update_well(self, row, col, content): """Updates the content of a well Parameters ---------- row: int The well row col: int The well column content: str The new contents of the well Returns ------- str The new contents of the well """ return self.plate.get_well(row, col).composition.update(content) def comment_well(self, row, col, comment): """Updates the comment of a well Parameters ---------- row: int The well row col: int The well column content: str The new contents of the well """ self.plate.get_well(row, col).composition.notes = comment class ReagentCreationProcess(_Process): """Reagent creation process""" _process_type = 'reagent creation' @classmethod def create(cls, user, external_id, volume, reagent_type): """Creates a new reagent creation process Parameters ---------- user : labman.db.user.User User adding the reagent to the system external_id: str The external id of the reagent volume: float Initial reagent volume reagent_type : str The type of the reagent Returns ------- ReagentCreationProce """ with sql_connection.TRN: # Add the row to the process table instance = cls(cls._common_creation_steps(user)) # Create the tube and the composition tube = container_module.Tube.create(instance, external_id, volume) composition_module.ReagentComposition.create( instance, tube, volume, reagent_type, external_id) return instance @property def tube(self): """The tube storing the reagent""" with sql_connection.TRN as TRN: sql = """SELECT tube_id FROM qiita.tube LEFT JOIN qiita.container USING (container_id) WHERE latest_upstream_process_id = %s""" TRN.add(sql, [self.process_id]) tube_id = TRN.execute_fetchlast() return container_module.Tube(tube_id) class PrimerWorkingPlateCreationProcess(Process): """Primer working plate creation process object Attributes ---------- primer_set master_set_order_number """ _table = 'qiita.primer_working_plate_creation_process' _id_column = 'primer_working_plate_creation_process_id' _process_type = 'primer working plate creation' @classmethod def create(cls, user, primer_set, master_set_order, creation_date=None): """Creates a new set of working primer plates Parameters ---------- user : labman.db.user.User User creating the new set of primer plates primer_set : labman.composition.PrimerSet The primer set master_set_order : str The master set order creation_date: datetime.date, optional The creation date. Default: today Returns ------- PrimerWorkingPlateCreationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=creation_date) sql = """INSERT INTO qiita.primer_working_plate_creation_process (process_id, primer_set_id, master_set_order_number) VALUES (%s, %s, %s) RETURNING primer_working_plate_creation_process_id""" TRN.add(sql, [process_id, primer_set.id, master_set_order]) instance = cls(TRN.execute_fetchlast()) creation_date = instance.date plate_name_suffix = creation_date.strftime('%Y-%m-%d') primer_set_plates = primer_set.plates check_name = '%s %s' % (primer_set_plates[0].external_id, plate_name_suffix) if plate_module.Plate.external_id_exists(check_name): # The likelihood of this happening in the real system is really # low, but better be safe than sorry plate_name_suffix = datetime.now().strftime('%Y-%m-%d %H:%M') for ps_plate in primer_set_plates: # Create a new working primer plate plate_name = '%s %s' % (ps_plate.external_id, plate_name_suffix) plate_config = ps_plate.plate_configuration work_plate = plate_module.Plate.create( plate_name, plate_config) # Add the wells to the new plate for row in ps_plate.layout: for ps_well in row: w_well = container_module.Well.create( work_plate, instance, 10, ps_well.row, ps_well.column) composition_module.PrimerComposition.create( instance, w_well, 10, ps_well.composition) return instance @property def primer_set(self): """The primer set template from which the working plates are created Returns ------- PrimerSet """ return composition_module.PrimerSet(self._get_attr('primer_set_id')) @property def master_set_order(self): """The master set order Returns ------- str """ return self._get_attr('master_set_order_number') class GDNAExtractionProcess(Process): """gDNA extraction process object Attributes ---------- kingfisher epmotion epmotion_tool extraction_kit sample_plate volume See Also -------- Process """ _table = 'qiita.gdna_extraction_process' _id_column = 'gdna_extraction_process_id' _process_type = 'gDNA extraction' @property def kingfisher(self): """The King Fisher robot used during extraction Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('kingfisher_robot_id')) @property def epmotion(self): """The EpMotion robot used during extraction Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_robot_id')) @property def epmotion_tool(self): """The EpMotion tool used during extraction Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_tool_id')) @property def extraction_kit(self): """The extraction kit used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('extraction_kit_id')) @property def sample_plate(self): """The source sample plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition gc JOIN qiita.gdna_composition gdc ON gc.composition_id = gdc.composition_id JOIN qiita.sample_composition ssc USING (sample_composition_id) JOIN qiita.composition sc ON ssc.composition_id = sc.composition_id JOIN qiita.well w ON sc.container_id = w.container_id WHERE gc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The elution volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() @classmethod def create(cls, user, plate, kingfisher, epmotion, epmotion_tool, extraction_kit, volume, gdna_plate_name, extraction_date=None): """Creates a new gDNA extraction process Parameters ---------- user : labman.db.user.User User performing the gDNA extraction plate: labman.db.plate.Plate The plate being extracted kingfisher: labman.db.equipment.Equipment The KingFisher used epmotion: labman.db.equipment.Equipment The EpMotion used epmotion_tool: labman.db.equipment.Equipment The EpMotion tool used extraciton_kit: labman.db.composition.ReagentComposition The extraction kit used volume : float The elution extracted gdna_plate_name : str The name for the gdna plate extraction_date : datetime.date, optional The extraction date. Default: today Returns ------- GDNAExtractionProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=extraction_date) # Add the row to the gdna_extraction_process table sql = """INSERT INTO qiita.gdna_extraction_process (process_id, epmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id) VALUES (%s, %s, %s, %s, %s) RETURNING gdna_extraction_process_id""" TRN.add(sql, [process_id, epmotion.id, epmotion_tool.id, kingfisher.id, extraction_kit.id]) instance = cls(TRN.execute_fetchlast()) # Create the extracted plate plate_config = plate.plate_configuration gdna_plate = plate_module.Plate.create( gdna_plate_name, plate_config) plate_layout = plate.layout # Add the wells to the new plate for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): plated_sample = plate_layout[i][j].composition if plated_sample.sample_composition_type != 'empty': well = container_module.Well.create( gdna_plate, instance, volume, i + 1, j + 1) composition_module.GDNAComposition.create( instance, well, volume, plated_sample) return instance class GDNAPlateCompressionProcess(Process): """Gets 1 to 4 96-well gDNA plates and remaps them in a 384-well plate The remapping schema follows this strucutre: A B A B A B A B ... C D C D C D C D ... A B A B A B A B ... C D C D C D C D ... ... """ _table = 'qiita.compression_process' _id_column = 'compression_process_id' _process_type = "compress gDNA plates" def _compress_plate(self, out_plate, in_plate, row_pad, col_pad, volume=1): """Compresses the 96-well in_plate into the 384-well out_plate""" with sql_connection.TRN: layout = in_plate.layout for row in layout: for well in row: if well is not None: # The row/col pair is stored in the DB starting at 1 # subtract 1 to make it start at 0 so the math works # and re-add 1 at the end out_well_row = (((well.row - 1) * 2) + row_pad) + 1 out_well_col = (((well.column - 1) * 2) + col_pad) + 1 out_well = container_module.Well.create( out_plate, self, volume, out_well_row, out_well_col) composition_module.CompressedGDNAComposition.create( self, out_well, volume, well.composition) @classmethod def create(cls, user, plates, plate_ext_id, robot): """Creates a new gDNA compression process Parameters ---------- user : labman.db.user.User User performing the plating plates: list of labman.db.plate.Plate The plates to compress plate_ext_id : str The external plate id robot: Equipment The robot performing the compression Raises ------ ValueError Returns ------- GDNAPlateCompressionProcess """ if not (1 <= len(plates) <= 4): raise ValueError( 'Cannot compress %s gDNA plates. Please provide 1 to 4 ' 'gDNA plates' % len(plates)) with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the compression_process table sql = """INSERT INTO qiita.compression_process (process_id, robot_id) VALUES (%s, %s) RETURNING compression_process_id""" TRN.add(sql, [process_id, robot.id]) instance = cls(TRN.execute_fetchlast()) # Create the output plate # Magic number 3 -> 384-well plate plate = plate_module.Plate.create( plate_ext_id, plate_module.PlateConfiguration(3)) # Compress the plates for i, in_plate in enumerate(plates): row_pad = int(np.floor(i / 2)) col_pad = i % 2 instance._compress_plate(plate, in_plate, row_pad, col_pad) return instance @property def robot(self): """The robot performing the compression""" return equipment_module.Equipment(self._get_attr('robot_id')) @property def gdna_plates(self): """The input gdna plates""" with sql_connection.TRN as TRN: # Rationale: giving the compression algorithm, we only need to look # at the 4 wells on the top left corner (1, 1), (1, 2), (2, 1) and # (2, 2), and in that order, to know which plates have been # compressed sql = """SELECT gw.plate_id FROM qiita.composition cc JOIN qiita.well cw ON cc.container_id = cw.container_id JOIN qiita.compressed_gdna_composition cgc ON cc.composition_id = cgc.composition_id JOIN qiita.gdna_composition gdnac ON cgc.gdna_composition_id = gdnac.gdna_composition_id JOIN qiita.composition gc ON gdnac.composition_id = gc.composition_id JOIN qiita.well gw ON gc.container_id = gw.container_id WHERE cc.upstream_process_id = %s AND cw.row_num IN (1, 2) AND cw.col_num IN (1, 2) ORDER BY cw.row_num, cw.col_num""" TRN.add(sql, [self.process_id]) return [plate_module.Plate(pid) for pid in TRN.execute_fetchflatten()] class LibraryPrep16SProcess(Process): """16S Library Prep process object Attributes ---------- mastermix_lots water_lots epmotions See Also -------- Process """ _table = 'qiita.library_prep_16s_process' _id_column = 'library_prep_16s_process_id' _process_type = '16S library prep' @classmethod def create(cls, user, plate, primer_plate, lib_plate_name, epmotion, epmotion_tool_tm300, epmotion_tool_tm50, master_mix, water_lot, volume, preparation_date=None): """Creates a new 16S library prep process Parameters ---------- user : labman.db.user.User User performing the library prep plate: labman.db.plate.Plate The plate being prepared for amplicon sequencing primer_plate: labman.db.plate.Plate The primer plate lib_plate_name: str The name of the prepared plate epmotion: labman.db.equipment.Equipment The EpMotion epmotion_tool_tm300: labman.db.equipment.Equipment The EpMotion TM300 8 tool epmotion_tool_tm50: labman.db.equipment.Equipment The EpMotion TM300 8 tool master_mix: labman.db.composition.ReagentComposition The mastermix used water_lot: labman.db.composition.ReagentComposition The water lot used volume : float The PCR total volume in the wells preparation_date : datetime.date, optional The preparation date. Default: today Returns ------- LibraryPrep16SProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=preparation_date) # Add the row to the library_prep_16s_process sql = """INSERT INTO qiita.library_prep_16s_process (process_id, epmotion_robot_id, epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id, master_mix_id, water_lot_id) VALUES (%s, %s, %s, %s, %s, %s) RETURNING library_prep_16s_process_id""" TRN.add(sql, [process_id, epmotion.id, epmotion_tool_tm300.id, epmotion_tool_tm50.id, master_mix.id, water_lot.id]) instance = cls(TRN.execute_fetchlast()) # Create the library plate plate_config = plate.plate_configuration library_plate = plate_module.Plate.create(lib_plate_name, plate_config) gdna_layout = plate.layout primer_layout = primer_plate.layout for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): if gdna_layout[i][j] is not None: well = container_module.Well.create( library_plate, instance, volume, i + 1, j + 1) composition_module.LibraryPrep16SComposition.create( instance, well, volume, gdna_layout[i][j].composition, primer_layout[i][j].composition) return instance @property def mastermix(self): """The master mix lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('master_mix_id')) @property def water_lot(self): """The water lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('water_lot_id')) @property def epmotion(self): """The EpMotion robot used Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_robot_id')) @property def epmotion_tm300_tool(self): """The EpMotion tm300 tool used Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('epmotion_tm300_8_tool_id')) @property def epmotion_tm50_tool(self): """The EpMotion tm50 tool used Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('epmotion_tm50_8_tool_id')) @property def gdna_plate(self): """The input gdna plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_16s_composition l16sc ON lc.composition_id = l16sc.composition_id JOIN qiita.gdna_composition gdc USING (gdna_composition_id) JOIN qiita.composition gc ON gc.composition_id = gdc.composition_id JOIN qiita.well w ON gc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def primer_plate(self): """The primer plate Returns ------- plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_16s_composition l16sc ON lc.composition_id = l16sc.composition_id JOIN qiita.primer_composition prc USING (primer_composition_id) JOIN qiita.composition pc ON pc.composition_id = prc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The PCR Total volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() class NormalizationProcess(Process): """Normalization process object Attributes ---------- quantification_process water_lot See Also -------- Process """ _table = 'qiita.normalization_process' _id_column = 'normalization_process_id' _process_type = 'gDNA normalization' @staticmethod def _calculate_norm_vol(dna_concs, ng=5, min_vol=2.5, max_vol=3500, resolution=2.5): """Calculates nanoliters of each sample to add to get a normalized pool Parameters ---------- dna_concs : numpy array of float The concentrations calculated via PicoGreen (ng/uL) ng : float, optional The amount of DNA to pool (ng). Default: 5 min_vol : float, optional The minimum volume to pool (nL). Default: 2.5 max_vol : float, optional The maximum volume to pool (nL). Default: 3500 resolution: float, optional Resolution to use (nL). Default: 2.5 Returns ------- sample_vols : numpy array of float The volumes to pool (nL) """ sample_vols = ng / np.nan_to_num(dna_concs) * 1000 sample_vols = np.clip(sample_vols, min_vol, max_vol) sample_vols = np.round(sample_vols / resolution) * resolution return sample_vols @classmethod def create(cls, user, quant_process, water, plate_name, total_vol=3500, ng=5, min_vol=2.5, max_vol=3500, resolution=2.5, reformat=False): """Creates a new normalization process Parameters ---------- user : labman.db.user.User User performing the gDNA extraction quant_process : QuantificationProcess The quantification process to use for normalization water: ReagentComposition The water lot used for the normalization plate_name: str The output plate name total_vol: float, optional The total volume of normalized DNA (nL). Default: 3500 ng : float, optional The amount of DNA to pool (ng). Default: 5 min_vol : float, optional The minimum volume to pool (nL). Default: 2.5 max_vol : float, optional The maximum volume to pool (nL). Default: 3500 resolution: float, optional Resolution to use. Default: 2.5 reformat: bool, optional If true, reformat the plate from the interleaved format to the column format. Useful when 384-well plate is not full to save reagents. Default: False Returns ------- NormalizationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the normalization_process tables func_data = { 'function': 'default', 'parameters': {'total_volume': total_vol, 'target_dna': ng, 'min_vol': min_vol, 'max_volume': max_vol, 'resolution': resolution, 'reformat': reformat}} sql = """INSERT INTO qiita.normalization_process (process_id, quantitation_process_id, water_lot_id, normalization_function_data) VALUES (%s, %s, %s, %s) RETURNING normalization_process_id""" TRN.add(sql, [process_id, quant_process.id, water.id, dumps(func_data)]) instance = cls(TRN.execute_fetchlast()) # Retrieve all the concentration values concs = quant_process.concentrations # Transform the concentrations to a numpy array np_conc = np.asarray([raw_con for _, raw_con, _ in concs]) dna_v = NormalizationProcess._calculate_norm_vol( np_conc, ng, min_vol, max_vol, resolution) water_v = total_vol - dna_v # Create the plate. 3 -> 384-well plate plate_config = plate_module.PlateConfiguration(3) plate = plate_module.Plate.create(plate_name, plate_config) for (comp, _, _), dna_vol, water_vol in zip(concs, dna_v, water_v): comp_well = comp.container row = comp_well.row column = comp_well.column if reformat: row = row - 1 column = column - 1 roffset = row % 2 row = int(row - roffset + np.floor(column / 12)) + 1 coffset = column % 2 + (row % 2) * 2 column = int(coffset * 6 + (column / 2) % 6) + 1 well = container_module.Well.create( plate, instance, total_vol, row, column) composition_module.NormalizedGDNAComposition.create( instance, well, total_vol, comp, dna_vol, water_vol) return instance @property def quantification_process(self): """The quantification process used Returns ------- QuantificationProcess """ return QuantificationProcess(self._get_attr('quantitation_process_id')) @property def water_lot(self): """The water lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('water_lot_id')) @property def compressed_plate(self): """The input compressed plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition nc JOIN qiita.normalized_gdna_composition ngc ON nc.composition_id = ngc.composition_id JOIN qiita.compressed_gdna_composition cgdnac USING (compressed_gdna_composition_id) JOIN qiita.composition cc ON cc.composition_id = cgdnac.composition_id JOIN qiita.well w ON cc.container_id = w.container_id WHERE nc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def normalization_function_data(self): """The information about the normalization function Returns ------- str """ return self._get_attr('normalization_function_data') @staticmethod def _format_picklist(dna_vols, water_vols, wells, dest_wells=None, dna_concs=None, sample_names=None, dna_plate_name='Sample', water_plate_name='Water', dna_plate_type='384PP_AQ_BP2_HT', water_plate_type='384PP_AQ_BP2_HT', dest_plate_name='NormalizedDNA', dna_plate_names=None): """Formats Echo pick list to achieve a normalized input DNA pool Parameters ---------- dna_vols: numpy array of float The volumes of dna to add water_vols: numpy array of float The volumes of water to add wells: numpy array of str The well codes in the same orientation as the DNA concentrations dest_wells: numpy array of str The well codes, in the same orientation as `wells`, in which to place each sample if reformatting dna_concs: numpy array of float The concentrations calculated via PicoGreen (ng/uL) sample_names: numpy array of str The sample names in the same orientation as the DNA concentrations Returns ------- picklist : str The Echo formatted pick list """ # check that arrays are the right size if dna_vols.shape != wells.shape != water_vols.shape: raise ValueError( 'dna_vols %r has a size different from wells %r or water_vols' % (dna_vols.shape, wells.shape, water_vols.shape)) # if destination wells not specified, use source wells if dest_wells is None: dest_wells = wells if sample_names is None: sample_names = np.empty(dna_vols.shape) * np.nan if dna_concs is None: dna_concs = np.empty(dna_vols.shape) * np.nan if dna_concs.shape != sample_names.shape != dna_vols.shape: raise ValueError( 'dna_vols %r has a size different from dna_concs %r or ' 'sample_names' % (dna_vols.shape, dna_concs.shape, sample_names.shape)) # header picklist = [ 'Sample\tSource Plate Name\tSource Plate Type\tSource Well' '\tConcentration\tTransfer Volume\tDestination Plate Name' '\tDestination Well'] # water additions for index, sample in np.ndenumerate(sample_names): picklist.append('\t'.join( [str(sample), water_plate_name, water_plate_type, str(wells[index]), str(dna_concs[index]), str(water_vols[index]), dest_plate_name, str(dest_wells[index])])) # DNA additions for index, sample in np.ndenumerate(sample_names): if dna_plate_names is not None: dna_plate_name = dna_plate_names[index] picklist.append('\t'.join( [str(sample), dna_plate_name, dna_plate_type, str(wells[index]), str(dna_concs[index]), str(dna_vols[index]), dest_plate_name, str(dest_wells[index])])) return '\n'.join(picklist) def generate_echo_picklist(self): """Generates Echo pick list to achieve a normalized input DNA pool Returns ------- str The echo-formatted pick list """ concentrations = { comp: conc for comp, conc, _ in self.quantification_process.concentrations} dna_vols = [] water_vols = [] wells = [] dest_wells = [] sample_names = [] dna_concs = [] layout = self.plates[0].layout for row in layout: for well in row: if well: composition = well.composition dna_vols.append(composition.dna_volume) water_vols.append(composition.water_volume) # For the source well we need to take a look at the # gdna comp c_gdna_comp = composition.compressed_gdna_composition wells.append(c_gdna_comp.container.well_id) dest_wells.append(well.well_id) # For the sample name we need to check the sample # composition sample_comp = c_gdna_comp.gdna_composition.\ sample_composition sample_names.append(sample_comp.content) # For the DNA concentrations we need to look at # the quantification process dna_concs.append(concentrations[c_gdna_comp]) # _format_picklist expects numpy arrays dna_vols = np.asarray(dna_vols) water_vols = np.asarray(water_vols) wells = np.asarray(wells) dest_wells = np.asarray(dest_wells) sample_names = np.asarray(sample_names) dna_concs = np.asarray(dna_concs) return NormalizationProcess._format_picklist( dna_vols, water_vols, wells, dest_wells=dest_wells, sample_names=sample_names, dna_concs=dna_concs) class LibraryPrepShotgunProcess(Process): """Shotgun Library Prep process object Attributes ---------- kappa_hyper_plus_kit stub_lot normalization_process See Also -------- Process """ _table = 'qiita.library_prep_shotgun_process' _id_column = 'library_prep_shotgun_process_id' _process_type = 'shotgun library prep' @classmethod def create(cls, user, plate, plate_name, kappa_hyper_plus_kit, stub_lot, volume, i5_plate, i7_plate): """Creats a new LibraryPrepShotgunProcess Parameters ---------- user : labman.db.user.User User performing the library prep plate: labman.db.plate.Plate The normalized gDNA plate of origin plate_name: str The library kappa_hyper_plus_kit: labman.db.composition.ReagentComposition The Kappa Hyper Plus kit used stub_lot: labman.db.composition.ReagentComposition The stub lot used volume : float The initial volume in the wells i5_plate: labman.db.plate.Plate The i5 primer working plate i7_plate: labman.db.plate.Plate The i7 primer working plate Returns ------- LibraryPrepShotgunProcess The newly created process """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the library_prep_shotgun_process sql = """INSERT INTO qiita.library_prep_shotgun_process (process_id, kappa_hyper_plus_kit_id, stub_lot_id, normalization_process_id) VALUES (%s, %s, %s, ( SELECT DISTINCT normalization_process_id FROM qiita.normalization_process np JOIN qiita.container c ON np.process_id = c.latest_upstream_process_id JOIN qiita.well USING (container_id) WHERE plate_id = %s)) RETURNING library_prep_shotgun_process_id""" TRN.add(sql, [process_id, kappa_hyper_plus_kit.id, stub_lot.id, plate.id]) instance = cls(TRN.execute_fetchlast()) # Get the primer set for the plates sql = """SELECT DISTINCT shotgun_primer_set_id FROM qiita.shotgun_combo_primer_set cps JOIN qiita.primer_set_composition psc ON cps.i5_primer_set_composition_id = psc.primer_set_composition_id JOIN qiita.primer_composition pc USING (primer_set_composition_id) JOIN qiita.composition c ON pc.composition_id = c.composition_id JOIN qiita.well USING (container_id) WHERE plate_id = %s""" TRN.add(sql, [i5_plate.id]) primer_set = composition_module.ShotgunPrimerSet( TRN.execute_fetchlast()) # Get a list of wells that actually contain information wells = [well for well in chain.from_iterable(plate.layout) if well is not None] # Get the list of index pairs to use idx_combos = primer_set.get_next_combos(len(wells)) i5_layout = i5_plate.layout i7_layout = i7_plate.layout # Create the library plate lib_plate = plate_module.Plate.create( plate_name, plate.plate_configuration) for well, idx_combo in zip(wells, idx_combos): i5_well = idx_combo[0].container i7_well = idx_combo[1].container i5_comp = i5_layout[ i5_well.row - 1][i5_well.column - 1].composition i7_comp = i7_layout[ i7_well.row - 1][i7_well.column - 1].composition lib_well = container_module.Well.create( lib_plate, instance, volume, well.row, well.column) composition_module.LibraryPrepShotgunComposition.create( instance, lib_well, volume, well.composition, i5_comp, i7_comp) return instance @property def kappa_hyper_plus_kit(self): """The Kappa Hyper plus kit used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('kappa_hyper_plus_kit_id')) @property def stub_lot(self): """The stub lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('stub_lot_id')) @property def normalization_process(self): """The normalization process used Returns ------- NormalizationProcess """ return NormalizationProcess(self._get_attr('normalization_process_id')) @property def normalized_plate(self): """The input normalized plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lpsc ON lc.composition_id = lpsc.composition_id JOIN qiita.normalized_gdna_composition ngdnac USING (normalized_gdna_composition_id) JOIN qiita.composition nc ON ngdnac.composition_id = nc.composition_id JOIN qiita.well w ON nc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def i5_primer_plate(self): """The i5 primer plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lsc ON lc.composition_id = lsc.composition_id JOIN qiita.primer_composition prc ON lsc.i5_primer_composition_id = prc.primer_composition_id JOIN qiita.composition pc ON prc.composition_id = pc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def i7_primer_plate(self): """The i7 primer plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lsc ON lc.composition_id = lsc.composition_id JOIN qiita.primer_composition prc ON lsc.i7_primer_composition_id = prc.primer_composition_id JOIN qiita.composition pc ON prc.composition_id = pc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() @staticmethod def _format_picklist(sample_names, sample_wells, indices, i5_vol=250, i7_vol=250, i5_plate_type='384LDV_AQ_B2_HT', i7_plate_type='384LDV_AQ_B2_HT', dest_plate_name='IndexPCRPlate'): """Formats Echo-format pick list for preparing the shotgun library Parameters ---------- sample_names: array-like of str The sample names matching index order of indices sample_wells: array-like of str The wells matching sample name order indices: pandas DataFrame The dataframe with index info matching sample_names i5_vol: int, optional The volume of i5 index to transfer. Default: 250 i7_vol: int, optional The volume of i7 index to transfer. Default: 250 i5_plate_type: str, optional The i5 plate type. Default: 384LDV_AQ_B2_HT i7_plate_type: str, optional The i7 plate type. Default: 384LDV_AQ_B2_HT dest_plate_name: str, optional The name of the destination plate. Default: IndexPCRPlate Returns ------- str The Echo formatted pick list """ # check that arrays are the right size if len(sample_names) != len(sample_wells) != len(indices): raise ValueError( 'sample_names (%s) has a size different from sample_wells ' '(%s) or index list (%s)' % (len(sample_names), len(sample_wells), len(indices))) # header picklist = [ 'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t' 'Transfer Volume\tIndex Name\tIndex Sequence\t' 'Destination Plate Name\tDestination Well'] # i5 additions for i, (sample, well) in enumerate(zip(sample_names, sample_wells)): picklist.append('\t'.join([ str(sample), indices.iloc[i]['i5 plate'], i5_plate_type, indices.iloc[i]['i5 well'], str(i5_vol), indices.iloc[i]['i5 name'], indices.iloc[i]['i5 sequence'], dest_plate_name, well])) # i7 additions for i, (sample, well) in enumerate(zip(sample_names, sample_wells)): picklist.append('\t'.join([ str(sample), indices.iloc[i]['i7 plate'], i7_plate_type, indices.iloc[i]['i7 well'], str(i7_vol), indices.iloc[i]['i7 name'], indices.iloc[i]['i7 sequence'], dest_plate_name, well])) return '\n'.join(picklist) def generate_echo_picklist(self): """Generates Echo pick list for preparing the shotgun library Returns ------- str The echo-formatted pick list """ sample_names = [] sample_wells = [] indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {}, 'i5 well': {}, 'i7 name': {}, 'i7 plate': {}, 'i7 sequence': {}, 'i7 well': {}, 'index combo': {}, 'index combo seq': {}} for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)): if well is None: continue # Add the sample well sample_wells.append(well.well_id) # Get the sample name - we need to go back to the SampleComposition lib_comp = well.composition sample_comp = lib_comp.normalized_gdna_composition\ .compressed_gdna_composition.gdna_composition\ .sample_composition sample_names.append(sample_comp.content) # Retrieve all the information about the indices i5_comp = lib_comp.i5_composition.primer_set_composition i5_well = i5_comp.container indices['i5 name'][idx] = i5_comp.external_id indices['i5 plate'][idx] = i5_well.plate.external_id indices['i5 sequence'][idx] = i5_comp.barcode indices['i5 well'][idx] = i5_well.well_id i7_comp = lib_comp.i7_composition.primer_set_composition i7_well = i7_comp.container indices['i7 name'][idx] = i7_comp.external_id indices['i7 plate'][idx] = i7_well.plate.external_id indices['i7 sequence'][idx] = i7_comp.barcode indices['i7 well'][idx] = i7_well.well_id indices['index combo seq'][idx] = '%s%s' % ( indices['i5 sequence'][idx], indices['i7 sequence'][idx]) sample_names = np.asarray(sample_names) sample_wells = np.asarray(sample_wells) indices = pd.DataFrame(indices) return LibraryPrepShotgunProcess._format_picklist( sample_names, sample_wells, indices) class QuantificationProcess(Process): """Quantification process object Attributes ---------- concentrations See Also -------- Process """ _table = 'qiita.quantification_process' _id_column = 'quantification_process_id' _process_type = 'quantification' @staticmethod def _compute_shotgun_pico_concentration(dna_vals, size=500): """Computes molar concentration of libraries from library DNA concentration values. Parameters ---------- dna_vals : numpy array of float The DNA concentration in ng/uL size : int The average library molecule size in bp Returns ------- np.array of floats Array of calculated concentrations, in nanomolar units """ lib_concentration = (dna_vals / (660 * float(size))) * 10**6 return lib_concentration @staticmethod def _make_2D_array(df, data_col='Sample DNA Concentration', well_col='Well', rows=8, cols=12): """Pulls a column of data out of a dataframe and puts into array format based on well IDs in another column Parameters ---------- df: Pandas DataFrame dataframe from which to pull values data_col: str, optional name of column with data. Default: Sample DNA Concentration well_col: str, optional name of column with well IDs, in 'A1,B12' format. Default: Well rows: int, optional number of rows in array to return. Default: 8 cols: int, optional number of cols in array to return. Default: 12 Returns ------- numpy 2D array """ # initialize empty Cp array cp_array = np.empty((rows, cols), dtype=object) # fill Cp array with the post-cleaned values from the right half of the # plate for record in df.iterrows(): row = ord(str.upper(record[1][well_col][0])) - ord('A') col = int(record[1][well_col][1:]) - 1 cp_array[row, col] = record[1][data_col] return cp_array @staticmethod def _parse_pico_csv(contents, sep='\t', conc_col_name='Sample DNA Concentration'): """Reads tab-delimited pico quant Parameters ---------- contents: fp or open filehandle pico quant file sep: str sep char used in quant file conc_col_name: str name to use for concentration column output Returns ------- pico_df: pandas DataFrame object DataFrame relating well location and DNA concentration """ raw_df = pd.read_csv(contents, sep=sep, skiprows=2, skipfooter=5, engine='python') pico_df = raw_df[['Well', '[Concentration]']] pico_df = pico_df.rename(columns={'[Concentration]': conc_col_name}) # coerce oddball concentrations to np.nan pico_df[conc_col_name] = pd.to_numeric(pico_df[conc_col_name], errors='coerce') return pico_df @staticmethod def parse(contents, file_format="minipico", rows=8, cols=12): """Parses the quantification output Parameters ---------- contents : str The contents of the plate reader output file_format: str The quantification file format rows: int, optional The number of rows in the plate. Default: 8 cols: int, optional The number of cols in the plate. Default: 12 Returns ------- DataFrame """ parsers = {'minipico': QuantificationProcess._parse_pico_csv} contents_io = StringIO(contents) if file_format not in parsers: raise ValueError( 'File format %s not recognized. Supported file formats: %s' % (file_format, ', '.join(parsers))) df = parsers[file_format](contents_io) array = QuantificationProcess._make_2D_array(df, rows=rows, cols=cols) return array.astype(float) @classmethod def create_manual(cls, user, quantifications): """Creates a new manual quantification process Parameters ---------- user: labman.db.user.User User performing the quantification process quantifications: list of dict The quantifications in the form of {'composition': Composition, 'conenctration': float} Returns ------- QuantificationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the quantification process table sql = """INSERT INTO qiita.quantification_process (process_id) VALUES (%s) RETURNING quantification_process_id""" TRN.add(sql, [process_id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.concentration_calculation (quantitated_composition_id, upstream_process_id, raw_concentration) VALUES (%s, %s, %s)""" sql_args = [] for quant in quantifications: sql_args.append([quant['composition'].composition_id, instance.id, quant['concentration']]) TRN.add(sql, sql_args, many=True) TRN.execute() return instance @classmethod def create(cls, user, plate, concentrations): """Creates a new quantification process Parameters ---------- user: labman.db.user.User User performing the quantification process plate: labman.db.plate.Plate The plate being quantified concentrations: 2D np.array The plate concentrations Returns ------- QuantificationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the quantification process table sql = """INSERT INTO qiita.quantification_process (process_id) VALUES (%s) RETURNING quantification_process_id""" TRN.add(sql, [process_id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.concentration_calculation (quantitated_composition_id, upstream_process_id, raw_concentration) VALUES (%s, %s, %s)""" sql_args = [] layout = plate.layout for p_row, c_row in zip(layout, concentrations): for well, conc in zip(p_row, c_row): if well is not None: sql_args.append([well.composition.composition_id, instance.id, conc]) if len(sql_args) == 0: raise ValueError('No concentration values have been provided') TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def concentrations(self): """The concentrations measured Returns ------- list of (Composition, float, float) """ with sql_connection.TRN as TRN: sql = """SELECT quantitated_composition_id, raw_concentration, computed_concentration FROM qiita.concentration_calculation WHERE upstream_process_id = %s ORDER BY concentration_calculation_id""" TRN.add(sql, [self._id]) return [ (composition_module.Composition.factory(comp_id), r_con, c_con) for comp_id, r_con, c_con in TRN.execute_fetchindex()] def compute_concentrations(self, dna_amount=240, min_val=1, max_val=15, blank_volume=2, size=500): """Compute the normalized concentrations Parameters ---------- dna_amount: float, optional (Amplicon) Total amount of DNA, in ng. Default: 240 min_val: float, optional (Amplicon) Minimum amount of DNA to normalize to (nM). Default: 1 max_val: float, optional (Amplicon) Maximum value. Wells above this number will be excluded (nM). Default: 15 blank_volume: float, optional (Amplicon) Amount to pool for the blanks (nM). Default: 2. size: int, optional (Shotgun) The average library molecule size, in bp. """ concentrations = self.concentrations layout = concentrations[0][0].container.plate.layout res = None if isinstance(concentrations[0][0], composition_module.LibraryPrep16SComposition): # Amplicon sample_concs = np.zeros_like(layout, dtype=float) is_blank = np.zeros_like(layout, dtype=bool) for comp, r_conc, _ in concentrations: well = comp.container row = well.row - 1 col = well.column - 1 sample_concs[row][col] = r_conc sc = comp.gdna_composition.sample_composition is_blank[row][col] = sc.sample_composition_type == 'blank' res = QuantificationProcess._compute_amplicon_pool_values( sample_concs, dna_amount) res[sample_concs < min_val] = min_val # If there is any sample whose concentration is above the # user-defined max_value, the decision is to not pool that sample. # To not pool the sample, define it's volume to 0 and it will not # get pooled. res[sample_concs > max_val] = 0 res[is_blank] = blank_volume elif isinstance(concentrations[0][0], composition_module.LibraryPrepShotgunComposition): # Shotgun sample_concs = np.zeros_like(layout, dtype=float) for comp, r_conc, _ in concentrations: well = comp.container row = well.row - 1 col = well.column - 1 sample_concs[row][col] = r_conc res = QuantificationProcess._compute_shotgun_pico_concentration( sample_concs, size) # No need for else, because if it is not one of the above types # we don't need to do anything if res is not None: sql_args = [] for p_row, c_row in zip(layout, res): for well, conc in zip(p_row, c_row): if well is not None: sql_args.append([conc, self.id, well.composition.composition_id]) sql = """UPDATE qiita.concentration_calculation SET computed_concentration = %s WHERE upstream_process_id = %s AND quantitated_composition_id = %s""" with sql_connection.TRN as TRN: TRN.add(sql, sql_args, many=True) TRN.execute() @staticmethod def _compute_amplicon_pool_values(sample_concs, dna_amount=240): """Computes amplicon pooling values Parameters ---------- sample_concs: 2D array of float nM sample concentrations dna_amount: float, optional Total amount of DNA, in ng. Default: 240 Returns ------- np.array of floats A 2D array of floats """ return float(dna_amount) / sample_concs class PoolingProcess(Process): """Pooling process object Attributes ---------- quantification_process robot See Also -------- Process """ _table = 'qiita.pooling_process' _id_column = 'pooling_process_id' _process_type = 'pooling' @staticmethod def estimate_pool_conc_vol(sample_vols, sample_concs): """Estimates the molarity and volume of a pool. Parameters ---------- sample_concs : numpy array of float The concentrations calculated via PicoGreen (nM) sample_vols : numpy array of float The calculated pooling volumes (nL) Returns ------- pool_conc : float The estimated actual concentration of the pool, in nM total_vol : float The total volume of the pool, in nL """ # scalar to adjust nL to L for molarity calculations nl_scalar = 1e-9 # calc total pool pmols total_pmols = np.multiply(sample_concs, sample_vols) * nl_scalar # calc total pool vol in nanoliters total_vol = sample_vols.sum() # pool pM is total pmols divided by total liters # (total vol in nL * 1 L / 10^9 nL) pool_conc = total_pmols.sum() / (total_vol * nl_scalar) return (pool_conc, total_vol) @staticmethod def compute_shotgun_pooling_values_eqvol(sample_concs, total_vol=60.0): """Computes molar concentration of libraries from concentration values, using an even volume per sample Parameters ---------- sample_concs : numpy array of float The concentrations calculated via PicoGreen (nM) total_vol : float, optional The total volume to pool (uL). Default: 60 Returns ------- np.array of floats A 2D array of floats """ per_sample_vol = (total_vol / sample_concs.size) * 1000.0 sample_vols = np.zeros(sample_concs.shape) + per_sample_vol return sample_vols @staticmethod def compute_shotgun_pooling_values_minvol( sample_concs, sample_fracs=None, floor_vol=100, floor_conc=40, total_nmol=.01): """Computes pooling volumes for samples based on concentration estimates of nM concentrations (`sample_concs`), taking a minimum volume of samples below a threshold. Reads in concentration values in nM. Samples below a minimum concentration (`floor_conc`, default 40 nM) will be included, but at a decreased volume (`floor_vol`, default 100 nL) to avoid overdiluting the pool. Samples can be assigned a target molar fraction in the pool by passing a np.array (`sample_fracs`, same shape as `sample_concs`) with fractional values per sample. By default, will aim for equal molar pooling. Finally, total pooling size is determined by a target nanomolar quantity (`total_nmol`, default .01). For a perfect 384 sample library, in which you had all samples at a concentration of exactly 400 nM and wanted a total volume of 60 uL, this would be 0.024 nmol. For a Novaseq, we expect to need 150 uL at 4 nM, or about 0.0006 nmol. Taking into account sample loss on the pippin prep (1/2) and molar loss due to exclusion of primer dimers (1/2), figure we need 4 times that or 0.0024. Parameters ---------- sample_concs: 2D array of float nM sample concentrations sample_fracs: 2D of float, optional fractional value for each sample (default 1/N) floor_vol: float, optional volume (nL) at which samples below floor_conc will be pooled. Default: 100 floor_conc: float, optional minimum value (nM) for pooling at real estimated value. Default: 40 total_nmol : float, optional total number of nM to have in pool. Default: 0.01 Returns ------- sample_vols: np.array of floats the volumes in nL per each sample pooled """ if sample_fracs is None: sample_fracs = np.ones(sample_concs.shape) / sample_concs.size # calculate volumetric fractions including floor val sample_vols = (total_nmol * sample_fracs) / sample_concs # convert L to nL sample_vols *= 10**9 # drop volumes for samples below floor concentration to floor_vol sample_vols[sample_concs < floor_conc] = floor_vol return sample_vols @staticmethod def compute_shotgun_pooling_values_floor( sample_concs, sample_fracs=None, min_conc=10, floor_conc=50, total_nmol=.01): """Computes pooling volumes for samples based on concentration estimates of nM concentrations (`sample_concs`). Reads in concentration values in nM. Samples must be above a minimum concentration threshold (`min_conc`, default 10 nM) to be included. Samples above this threshold but below a given floor concentration (`floor_conc`, default 50 nM) will be pooled as if they were at the floor concentration, to avoid overdiluting the pool. Samples can be assigned a target molar fraction in the pool by passing a np.array (`sample_fracs`, same shape as `sample_concs`) with fractional values per sample. By default, will aim for equal molar pooling. Finally, total pooling size is determined by a target nanomolar quantity (`total_nmol`, default .01). For a perfect 384 sample library, in which you had all samples at a concentration of exactly 400 nM and wanted a total volume of 60 uL, this would be 0.024 nmol. Parameters ---------- sample_concs: 2D array of float nM calculated by compute_qpcr_concentration sample_fracs: 2D of float, optional fractional value for each sample (default 1/N) min_conc: float, optional minimum nM concentration to be included in pool. Default: 10 floor_conc: float, optional minimum value for pooling for samples above min_conc. Default: 50 total_nmol : float, optional total number of nM to have in pool. Default 0.01 Returns ------- sample_vols: np.array of floats the volumes in nL per each sample pooled """ if sample_fracs is None: sample_fracs = np.ones(sample_concs.shape) / sample_concs.size # get samples above threshold sample_fracs_pass = sample_fracs.copy() sample_fracs_pass[sample_concs <= min_conc] = 0 # renormalize to exclude lost samples sample_fracs_pass *= 1/sample_fracs_pass.sum() # floor concentration value sample_concs_floor = sample_concs.copy() sample_concs_floor[sample_concs < floor_conc] = floor_conc # calculate volumetric fractions including floor val sample_vols = (total_nmol * sample_fracs_pass) / sample_concs_floor # convert L to nL sample_vols *= 10**9 return sample_vols @classmethod def create(cls, user, quantification_process, pool_name, volume, input_compositions, func_data, robot=None, destination=None): """Creates a new pooling process Parameters ---------- user: labman.db.user.User User performing the pooling process quantification_process: labman.db.process.QuantificationProcess The quantification process this pooling is based on pool_name: str The name of the new pool volume: float The initial volume input_compositions: list of dicts The input compositions for the pool {'composition': Composition, 'input_volume': float, 'percentage_of_output': float} func_data : dict Dictionary with the pooling function information robot: labman.equipment.Equipment, optional The robot performing the pooling, if not manual destination: str The EpMotion destination tube Returns ------- PoolingProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the pooling process table sql = """INSERT INTO qiita.pooling_process (process_id, quantification_process_id, robot_id, destination, pooling_function_data) VALUES (%s, %s, %s, %s, %s) RETURNING pooling_process_id""" r_id = robot.id if robot is not None else None if r_id is None: destination = None TRN.add(sql, [process_id, quantification_process.id, r_id, destination, dumps(func_data)]) instance = cls(TRN.execute_fetchlast()) # Create the new pool tube = container_module.Tube.create(instance, pool_name, volume) pool = composition_module.PoolComposition.create( instance, tube, volume) # Link the pool with its contents sql = """INSERT INTO qiita.pool_composition_components (output_pool_composition_id, input_composition_id, input_volume, percentage_of_output) VALUES (%s, %s, %s, %s)""" sql_args = [] for in_comp in input_compositions: # The wet lab pointed out that we don't need to pool the ones # that have a value below 0.001 if in_comp['input_volume'] < 0.001: continue sql_args.append([pool.id, in_comp['composition'].composition_id, in_comp['input_volume'], in_comp['percentage_of_output']]) TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def quantification_process(self): """The quantification process used Returns ------- QuantificationProcess """ return QuantificationProcess( self._get_attr('quantification_process_id')) @property def robot(self): """The robot used Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('robot_id')) @property def destination(self): """The EpMotion destination tube Returns ------- str """ return self._get_attr('destination') @property def components(self): """The components of the pool Returns ------- list of (Composition, float) """ with sql_connection.TRN as TRN: sql = """SELECT input_composition_id, input_volume FROM qiita.pool_composition_components JOIN qiita.pool_composition ON output_pool_composition_id = pool_composition_id JOIN qiita.composition USING (composition_id) WHERE upstream_process_id = %s ORDER BY pool_composition_components_id""" TRN.add(sql, [self.process_id]) return [(composition_module.Composition.factory(comp_id), vol) for comp_id, vol in TRN.execute_fetchindex()] @property def pool(self): """The generated pool composition Returns ------- PoolComposition """ with sql_connection.TRN as TRN: sql = """SELECT composition_id FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return composition_module.Composition.factory( TRN.execute_fetchlast()) @property def pooling_function_data(self): """The information about the pooling process Returns ------- dict """ return self._get_attr('pooling_function_data') @staticmethod def _format_picklist(vol_sample, max_vol_per_well=60000, dest_plate_shape=None): """Format the contents of an echo pooling pick list Parameters ---------- vol_sample : 2d numpy array of floats The per well sample volume, in nL max_vol_per_well : floats, optional Maximum destination well volume, in nL. Default: 60000 dest_plate_shape: list of 2 elements The destination plate shape """ if dest_plate_shape is None: dest_plate_shape = [16, 24] contents = ['Source Plate Name,Source Plate Type,Source Well,' 'Concentration,Transfer Volume,Destination Plate Name,' 'Destination Well'] # Write the sample transfer volumes rows, cols = vol_sample.shape # replace NaN values with 0s to leave a trail of unpooled wells pool_vols = np.nan_to_num(vol_sample) running_tot = 0 d = 1 for i in range(rows): for j in range(cols): well_name = "%s%d" % (chr(ord('A') + i), j+1) # Machine will round, so just give it enough info to do the # correct rounding. val = "%.2f" % pool_vols[i][j] # test to see if we will exceed total vol per well if running_tot + pool_vols[i][j] > max_vol_per_well: d += 1 running_tot = pool_vols[i][j] else: running_tot += pool_vols[i][j] dest = "%s%d" % (chr(ord('A') + int(np.floor(d/dest_plate_shape[0]))), (d % dest_plate_shape[1])) contents.append(",".join(['1', '384LDV_AQ_B2_HT', well_name, "", val, 'NormalizedDNA', dest])) return "\n".join(contents) def generate_echo_picklist(self, max_vol_per_well=30000): """Generates Echo pick list for pooling the shotgun library Parameters ---------- max_vol_per_well : floats, optional Maximum destination well volume, in nL. Default: 30000 Returns ------- str The echo-formatted pick list """ vol_sample = np.zeros((16, 24)) for comp, vol in self.components: well = comp.container vol_sample[well.row - 1][well.column - 1] = vol return PoolingProcess._format_picklist(vol_sample) def generate_epmotion_file(self): """Generates an EpMotion file to perform the pooling Returns ------- str The EpMotion-formatted pool file contents """ contents = ['Rack,Source,Rack,Destination,Volume,Tool'] destination = self.destination for comp, vol in self.components: source = comp.container.well_id val = "%.3f" % vol # Hard-coded values - never changes according to the wet lab contents.append( ",".join(['1', source, '1', destination, val, '1'])) return "\n".join(contents) def generate_pool_file(self): """Generates the correct pool file based on the pool contents Returns ------- str The contents of the pool file """ comp = self.components[0][0] if isinstance(comp, composition_module.LibraryPrep16SComposition): return self.generate_epmotion_file() elif isinstance(comp, composition_module.LibraryPrepShotgunComposition): return self.generate_echo_picklist() else: # This error should only be shown to programmers raise ValueError( "Can't generate a pooling file for a pool containing " "compositions of type: %s" % comp.__class__.__name__) class SequencingProcess(Process): """Sequencing process object Attributes ---------- See Also -------- Process """ _table = 'qiita.sequencing_process' _id_column = 'sequencing_process_id' _process_type = 'sequencing' sequencer_lanes = { 'HiSeq4000': 8, 'HiSeq3000': 8, 'HiSeq2500': 2, 'HiSeq1500': 2, 'MiSeq': 1, 'MiniSeq': 1, 'NextSeq': 1, 'NovaSeq': 1} @staticmethod def list_sequencing_runs(): """Generates a list of sequencing runs Returns ------- list of dicts The list of sequence run information with the structure: [{'process_id': int, 'run_name': string, ...}] """ with sql_connection.TRN as TRN: sql = """SELECT * FROM qiita.sequencing_process ORDER BY process_id""" TRN.add(sql) return [dict(r) for r in TRN.execute_fetchindex()] @classmethod def create(cls, user, pools, run_name, experiment, sequencer, fwd_cycles, rev_cycles, principal_investigator, contacts=None): """Creates a new sequencing process Parameters ---------- user : labman.db.user.User User preparing the sequencing pools: list of labman.db.composition.PoolComposition The pools being sequenced, in lane order run_name: str The run name experiment: str The run experiment sequencer: labman.db.equipment.Equipment The sequencer used fwd_cycles : int The number of forward cycles rev_cycles : int The number of reverse cycles principal_investigator : labman.db.user.User The principal investigator to list in the run contacts: list of labman.db.user.User, optinal Any additional contacts to add to the Sample Sheet Returns ------- SequencingProcess Raises ------ ValueError If the number of cycles are <= 0 """ if fwd_cycles <= 0 or not isinstance(fwd_cycles, int): raise ValueError("fwd_cycles must be > 0") if rev_cycles <= 0 or not isinstance(rev_cycles, int): raise ValueError("rev_cycles must be > 0") if len(pools) > cls.sequencer_lanes[sequencer.equipment_type]: raise ValueError( 'Number of pools cannot be bigger than the number of lanes ' 'in the sequencer. Pools: %s. Lanes in a %s sequencer: %s' % (len(pools), sequencer.equipment_type, cls.sequencer_lanes[sequencer.equipment_type])) with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) assay = None pool = pools[0] CM = composition_module while assay is None: comp = pool.components[0]['composition'] if isinstance(comp, CM.LibraryPrep16SComposition): assay = 'Amplicon' elif isinstance(comp, CM.LibraryPrepShotgunComposition): assay = 'Metagenomics' elif isinstance(comp, CM.PoolComposition): pool = comp else: # This should never happen - i.e. there is no way # of creating a pool like that raise ValueError( 'Pool with unexpected composition type: %s' % comp.__class__.__name__) # Add the row to the sequencing table sql = """INSERT INTO qiita.sequencing_process (process_id, run_name, experiment, sequencer_id, fwd_cycles, rev_cycles, assay, principal_investigator) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) RETURNING sequencing_process_id""" TRN.add(sql, [process_id, run_name, experiment, sequencer.id, fwd_cycles, rev_cycles, assay, principal_investigator.id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.sequencing_process_lanes (sequencing_process_id, pool_composition_id, lane_number) VALUES (%s, %s, %s)""" sql_args = [[instance.id, p.id, i + 1] for i, p in enumerate(pools)] TRN.add(sql, sql_args, many=True) if contacts: sql = """INSERT INTO qiita.sequencing_process_contacts (sequencing_process_id, contact_id) VALUES (%s, %s)""" sql_args = [[instance.id, c.id] for c in contacts] TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def pools(self): with sql_connection.TRN as TRN: sql = """SELECT pool_composition_id, lane_number FROM qiita.sequencing_process_lanes WHERE sequencing_process_id = %s ORDER BY lane_number""" TRN.add(sql, [self.id]) res = [[composition_module.PoolComposition(p), l] for p, l in TRN.execute_fetchindex()] return res @property def run_name(self): return self._get_attr('run_name') @property def experiment(self): return self._get_attr('experiment') @property def sequencer(self): return equipment_module.Equipment(self._get_attr('sequencer_id')) @property def fwd_cycles(self): return self._get_attr('fwd_cycles') @property def rev_cycles(self): return self._get_attr('rev_cycles') @property def assay(self): return self._get_attr('assay') @property def principal_investigator(self): return user_module.User(self._get_attr('principal_investigator')) @property def contacts(self): with sql_connection.TRN as TRN: sql = """SELECT contact_id FROM qiita.sequencing_process_contacts WHERE sequencing_process_id = %s ORDER BY contact_id""" TRN.add(sql, [self.id]) return [user_module.User(r[0]) for r in TRN.execute_fetchindex()] @staticmethod def _bcl_scrub_name(name): """Modifies a sample name to be BCL2fastq compatible Parameters ---------- name : str the sample name Returns ------- str the sample name, formatted for bcl2fastq """ return re.sub('[^0-9a-zA-Z\-\_]+', '_', name) @staticmethod def _reverse_complement(seq): """Reverse-complement a sequence From http://stackoverflow.com/a/25189185/7146785 Parameters ---------- seq : str The sequence to reverse-complement Returns ------- str The reverse-complemented sequence """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} rev_seq = "".join(complement.get(base, base) for base in reversed(seq)) return rev_seq @staticmethod def _sequencer_i5_index(sequencer, indices): """Decides if the indices should be reversed based on the sequencer """ revcomp_sequencers = ['HiSeq4000', 'MiniSeq', 'NextSeq', 'HiSeq3000'] other_sequencers = ['HiSeq2500', 'HiSeq1500', 'MiSeq', 'NovaSeq'] if sequencer in revcomp_sequencers: return([SequencingProcess._reverse_complement(x) for x in indices]) elif sequencer in other_sequencers: return(indices) else: raise ValueError( 'Your indicated sequencer [%s] is not recognized.\nRecognized ' 'sequencers are: \n' % ' '.join(revcomp_sequencers + other_sequencers)) @staticmethod def _format_sample_sheet_data(sample_ids, i7_name, i7_seq, i5_name, i5_seq, wells=None, sample_plates=None, sample_proj='', description=None, lanes=[1], sep=',', include_header=True): """Creates the [Data] component of the Illumina sample sheet Parameters ---------- sample_ids: array-like The bcl2fastq-compatible sample ids i7_name: array-like The i7 index name, in sample_ids order i7_seq: array-like The i7 sequences, in sample_ids order i5_name: array-like The i5 index name, in sample_ids order i5_seq: array-like The i5 sequences, in sample_ids order wells: array-like, optional The source sample wells, in sample_ids order. Default: None sample_plate: str, optional The plate name. Default: '' sample_proj: str, optional The project name. Default: '' description: array-like, optional The original sample ids, in sample_ids order. Default: None lanes: array-lie, optional The lanes in which the pool will be sequenced. Default: [1] sep: str, optional The file-format separator. Default: ',' include_header: bool, optional Wheather to include the header or not. Default: true Returns ------- str The formatted [Data] component of the Illumina sample sheet Raises ------ ValueError If sample_ids, i7_name, i7_seq, i5_name and i5_seq do not have all the same length """ if sample_plates is None: sample_plates = [''] * len(sample_ids) if (len(sample_ids) != len(i7_name) != len(i7_seq) != len(i5_name) != len(i5_seq) != len(sample_plates)): raise ValueError('Sample information lengths are not all equal') if wells is None: wells = [''] * len(sample_ids) if description is None: description = [''] * len(sample_ids) data = [] for lane in lanes: for i, sample in enumerate(sample_ids): line = sep.join([str(lane), sample, sample, sample_plates[i], wells[i], i7_name[i], i7_seq[i], i5_name[i], i5_seq[i], sample_proj, description[i]]) data.append(line) data = sorted(data) if include_header: data.insert(0, sep.join([ 'Lane', 'Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'])) return '\n'.join(data) @staticmethod def _format_sample_sheet_comments(principal_investigator=None, contacts=None, other=None, sep=','): """Formats the sample sheet comments Parameters ---------- principal_investigator: dict, optional The principal investigator information: {name: email} contacts: dict, optional The contacts information: {name: email} other: str, optional Other information to include in the sample sheet comments sep: str, optional The sample sheet separator Returns ------- str The formatted comments of the sample sheet """ comments = [] if principal_investigator is not None: comments.append('PI{0}{1}\n'.format( sep, sep.join( '{0}{1}{2}'.format(x, sep, principal_investigator[x]) for x in principal_investigator.keys()))) if contacts is not None: comments.append( 'Contact{0}{1}\nContact emails{0}{2}\n'.format( sep, sep.join(x for x in sorted(contacts.keys())), sep.join(contacts[x] for x in sorted(contacts.keys())))) if other is not None: comments.append('%s\n' % other) return ''.join(comments) @staticmethod def _format_sample_sheet(sample_sheet_dict, sep=','): """Formats Illumina-compatible sample sheet. Parameters ---------- sample_sheet_dict : dict dict with the sample sheet information sep: str, optional The sample sheet separator Returns ------- sample_sheet : str the sample sheet string """ template = ( '{comments}[Header]\nIEMFileVersion{sep}{IEMFileVersion}\n' 'Investigator Name{sep}{Investigator Name}\n' 'Experiment Name{sep}{Experiment Name}\nDate{sep}{Date}\n' 'Workflow{sep}{Workflow}\nApplication{sep}{Application}\n' 'Assay{sep}{Assay}\nDescription{sep}{Description}\n' 'Chemistry{sep}{Chemistry}\n\n[Reads]\n{read1}\n{read2}\n\n' '[Settings]\nReverseComplement{sep}{ReverseComplement}\n\n' '[Data]\n{data}') if sample_sheet_dict['comments']: sample_sheet_dict['comments'] = re.sub( '^', '# ', sample_sheet_dict['comments'].rstrip(), flags=re.MULTILINE) + '\n' sample_sheet = template.format(**sample_sheet_dict, **{'sep': sep}) return sample_sheet def _generate_shotgun_sample_sheet(self): """Generates Illumina compatible shotgun sample sheets Returns ------- str The illumina-formatted sample sheet """ bcl2fastq_sample_ids = [] i7_names = [] i7_sequences = [] i5_names = [] i5_sequences = [] wells = [] sample_ids = [] sample_plates = [] sequencer_type = self.sequencer.equipment_type data = [] include_header = True for pool, lane in self.pools: for component in pool.components: lp_composition = component['composition'] # Get the well information well = lp_composition.container wells.append(well.well_id) # Get the plate information sample_plates.append(well.plate.external_id) # Get the i7 index information i7_comp = lp_composition.i7_composition.primer_set_composition i7_names.append(i7_comp.external_id) i7_sequences.append(i7_comp.barcode) # Get the i5 index information i5_comp = lp_composition.i5_composition.primer_set_composition i5_names.append(i5_comp.external_id) i5_sequences.append(i5_comp.barcode) # Get the sample id sample_id = lp_composition.normalized_gdna_composition.\ compressed_gdna_composition.gdna_composition.\ sample_composition.content sample_ids.append(sample_id) # Transform te sample ids to be bcl2fastq-compatible bcl2fastq_sample_ids = [ SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids] # Reverse the i5 sequences if needed based on the sequencer i5_sequences = SequencingProcess._sequencer_i5_index( sequencer_type, i5_sequences) # add the data of the curent pool data.append(SequencingProcess._format_sample_sheet_data( bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names, i5_sequences, wells=wells, sample_plates=sample_plates, description=sample_ids, sample_proj=self.run_name, lanes=[lane], sep=',', include_header=include_header)) include_header = False data = '\n'.join(data) contacts = {c.name: c.email for c in self.contacts} pi = self.principal_investigator principal_investigator = {pi.name: pi.email} sample_sheet_dict = { 'comments': SequencingProcess._format_sample_sheet_comments( principal_investigator=principal_investigator, contacts=contacts), 'IEMFileVersion': '4', 'Investigator Name': pi.name, 'Experiment Name': self.experiment, 'Date': str(self.date), 'Workflow': 'GenerateFASTQ', 'Application': 'FASTQ Only', 'Assay': self.assay, 'Description': '', 'Chemistry': 'Default', 'read1': self.fwd_cycles, 'read2': self.rev_cycles, 'ReverseComplement': '0', 'data': data} return SequencingProcess._format_sample_sheet(sample_sheet_dict) def _generate_amplicon_sample_sheet(self): """Generates Illumina compatible sample sheets Returns ------- str The illumina-formatted sample sheet """ fixed_run_name = SequencingProcess._bcl_scrub_name(self.run_name) data = ( 'Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,' 'index,Sample_Project,Description,,\n' '%s,,,,,NNNNNNNNNNNN,,,,,' % fixed_run_name) contacts = {c.name: c.email for c in self.contacts} pi = self.principal_investigator principal_investigator = {pi.name: pi.email} sample_sheet_dict = { 'comments': SequencingProcess._format_sample_sheet_comments( principal_investigator=principal_investigator, contacts=contacts), 'IEMFileVersion': '4', 'Investigator Name': pi.name, 'Experiment Name': self.experiment, 'Date': str(self.date), 'Workflow': 'GenerateFASTQ', 'Application': 'FASTQ Only', 'Assay': self.assay, 'Description': '', 'Chemistry': 'Default', 'read1': self.fwd_cycles, 'read2': self.rev_cycles, 'ReverseComplement': '0', 'data': data} return SequencingProcess._format_sample_sheet(sample_sheet_dict) def generate_sample_sheet(self): """Generates Illumina compatible sample sheets Returns ------- str The illumina-formatted sample sheet """ assay = self.assay if assay == 'Amplicon': return self._generate_amplicon_sample_sheet() elif assay == 'Metagenomics': return self._generate_shotgun_sample_sheet() def generate_prep_information(self): """Generates prep information Returns ------- dict labman.db.study.Study: str a dict of the Study and the prep """ assay = self.assay data = {} blanks = {} if assay == 'Amplicon': extra_fields = [ # 'e'/'r': equipment/reagent ('e', 'lepmotion_robot_id', 'epmotion_robot'), ('e', 'epmotion_tm300_8_tool_id', 'epmotion_tm300_8_tool'), ('e', 'epmotion_tm50_8_tool_id', 'epmotion_tm50_8_tool'), ('e', 'gepmotion_robot_id', 'gdata_robot'), ('e', 'epmotion_tool_id', 'epmotion_tool'), ('e', 'kingfisher_robot_id', 'kingfisher_robot'), ('r', 'extraction_kit_id', 'extraction_kit'), ('r', 'master_mix_id', 'master_mix'), ('r', 'water_lot_id', 'water_lot'), ] sql = """ SELECT study_id, sample_id, content, run_name, experiment, fwd_cycles, rev_cycles, principal_investigator, et.description as sequencer_description, lpp.epmotion_robot_id as lepmotion_robot_id, epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id, master_mix_id, water_lot_id, gep.epmotion_robot_id as gepmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id, p1.external_id as plate, w1.row_num as row_num, w1.col_num as col_num, p2.external_id as primer_composition, psc.barcode_seq as primer_set_composition, run_name as run_prefix, sp.sequencer_id as platform_id, sp.experiment as center_project_name -- Retrieve sequencing information FROM qiita.sequencing_process sp LEFT JOIN qiita.equipment e ON ( sequencer_id = equipment_id) LEFT JOIN qiita.equipment_type et ON ( et.equipment_type_id = e.equipment_type_id) LEFT JOIN qiita.sequencing_process_lanes spl USING ( sequencing_process_id) -- Retrieve pooling information LEFT JOIN qiita.pool_composition_components pcc1 ON ( pcc1.output_pool_composition_id = spl.pool_composition_id) LEFT JOIN qiita.pool_composition pccon ON ( pcc1.input_composition_id = pccon.composition_id) LEFT JOIN qiita.pool_composition_components pcc2 ON ( pccon.pool_composition_id = pcc2.output_pool_composition_id) -- Retrieve amplicon library prep information LEFT JOIN qiita.library_prep_16S_composition lp ON ( pcc2.input_composition_id = lp.composition_id) LEFT JOIN qiita.composition c1 ON ( lp.composition_id = c1.composition_id) LEFT JOIN qiita.library_prep_16s_process lpp ON ( lpp.process_id = c1.upstream_process_id) -- Retrieve the extracted gdna information LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id) LEFT JOIN qiita.composition c2 ON ( gc.composition_id = c2.composition_id) LEFT JOIN qiita.gdna_extraction_process gep ON ( gep.process_id = c2.upstream_process_id) -- Retrieve the sample information LEFT JOIN qiita.sample_composition sc USING ( sample_composition_id) LEFT JOIN qiita.composition c3 ON ( c3.composition_id = sc.composition_id) LEFT JOIN qiita.well w1 ON ( w1.container_id = c3.container_id) LEFT JOIN qiita.plate p1 ON ( w1.plate_id = p1.plate_id) LEFT JOIN qiita.composition c4 ON ( lp.primer_composition_id = c4.composition_id ) LEFT JOIN qiita.well w2 ON ( w2.container_id = c4.container_id) LEFT JOIN qiita.plate p2 ON ( w2.plate_id = p2.plate_id) LEFT JOIN qiita.primer_composition pc ON ( lp.primer_composition_id = pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition psc ON ( pc.primer_set_composition_id = psc.primer_set_composition_id) FULL JOIN qiita.study_sample USING (sample_id) WHERE sequencing_process_id = %s ORDER BY study_id, sample_id, row_num, col_num""" elif assay == 'Metagenomics': extra_fields = [ ('e', 'gepmotion_robot_id', 'gdata_robot'), ('e', 'epmotion_tool_id', 'epmotion_tool'), ('e', 'kingfisher_robot_id', 'kingfisher_robot'), ('r', 'kappa_hyper_plus_kit_id', 'kappa_hyper_plus_kit'), ('r', 'stub_lot_id', 'stub_lot'), ('r', 'extraction_kit_id', 'extraction_kit'), ('r', 'nwater_lot_id', 'normalization_water_lot'), ] sql = """ SELECT study_id, sample_id, content, run_name, experiment, fwd_cycles, rev_cycles, principal_investigator, i5.barcode_seq as i5_sequence, i7.barcode_seq as i5_sequence, et.description as sequencer_description, gep.epmotion_robot_id as gepmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id, np.water_lot_id as nwater_lot_id, kappa_hyper_plus_kit_id, stub_lot_id, p1.external_id as plate, row_num, col_num, sp.sequencer_id as platform_id, sp.experiment as center_project_name -- Retrieve sequencing information FROM qiita.sequencing_process sp LEFT JOIN qiita.equipment e ON ( sequencer_id = equipment_id) LEFT JOIN qiita.equipment_type et ON ( et.equipment_type_id = e.equipment_type_id) LEFT JOIN qiita.sequencing_process_lanes USING ( sequencing_process_id) -- Retrieving pool information LEFT JOIN qiita.pool_composition_components ON ( output_pool_composition_id = pool_composition_id) -- Retrieving library prep information LEFT JOIN qiita.library_prep_shotgun_composition ON ( input_composition_id = composition_id) LEFT JOIN qiita.primer_composition i5pc ON ( i5_primer_composition_id = i5pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition i5 ON ( i5pc.primer_set_composition_id = i5.primer_set_composition_id ) LEFT JOIN qiita.primer_composition i7pc ON ( i7_primer_composition_id = i7pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition i7 ON ( i7pc.primer_set_composition_id = i7.primer_set_composition_id ) -- Retrieving normalized gdna information LEFT JOIN qiita.normalized_gdna_composition ngc USING ( normalized_gdna_composition_id) LEFT JOIN qiita.composition c1 ON ( ngc.composition_id = c1.composition_id) LEFT JOIN qiita.library_prep_shotgun_process lps ON ( lps.process_id = c1.upstream_process_id) LEFT JOIN qiita.normalization_process np USING ( normalization_process_id) -- Retrieving compressed gdna information LEFT JOIN qiita.compressed_gdna_composition cgc USING ( compressed_gdna_composition_id) -- Retrieving gdna information LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id) LEFT JOIN qiita.composition c2 ON ( gc.composition_id = c2.composition_id) LEFT JOIN qiita.gdna_extraction_process gep ON ( gep.process_id = c2.upstream_process_id) LEFT JOIN qiita.sample_composition sc USING ( sample_composition_id) LEFT JOIN qiita.composition c3 ON ( c3.composition_id = sc.composition_id) LEFT JOIN qiita.well w1 ON ( w1.container_id = c3.container_id) LEFT JOIN qiita.plate p1 ON ( w1.plate_id = p1.plate_id) FULL JOIN qiita.study_sample USING (sample_id) WHERE sequencing_process_id = %s ORDER BY study_id, sample_id, row_num, col_num, i5.barcode_seq """ with sql_connection.TRN as TRN: # to simplify the main queries, let's get all the equipment info TRN.add("""SELECT equipment_id, external_id, notes, description FROM qiita.equipment LEFT JOIN qiita.equipment_type USING (equipment_type_id)""") equipment = {} for row in TRN.execute_fetchindex(): row = dict(row) eid = row.pop('equipment_id') equipment[eid] = row # and the reagents TRN.add("""SELECT reagent_composition_id, composition_id, external_lot_id, description FROM qiita.reagent_composition LEFT JOIN qiita.reagent_composition_type USING (reagent_composition_type_id)""") reagent = {} for row in TRN.execute_fetchindex(): row = dict(row) rid = row.pop('reagent_composition_id') reagent[rid] = row TRN.add(sql, [self.id]) for result in TRN.execute_fetchindex(): result = dict(result) study_id = result.pop('study_id') sid = result.pop('sample_id') content = result.pop('content') # format well col = result.pop('col_num') row = result.pop('row_num') well = [] while row: row, rem = divmod(row-1, 26) well[:0] = container_module.LETTERS[rem] result['well'] = ''.join(well) + str(col) # format extra fields list for t, k, nk in extra_fields: _id = result.pop(k) if _id is not None: if t == 'e': val = equipment[_id]['external_id'] else: val = reagent[_id]['external_lot_id'] else: val = '' result[nk] = val # format some final fields result['platform'] = equipment[ result.pop('platform_id')]['description'] if sid is not None and study_id is not None: study = Study(study_id) if study not in data: data[study] = {} data[study][content] = result if assay == 'Metagenomics': result['run_prefix'] = \ SequencingProcess._bcl_scrub_name(content) else: if assay == 'Metagenomics': result['run_prefix'] = \ SequencingProcess._bcl_scrub_name(content) blanks[content] = result # converting from dict to pandas and then to tsv for study, vals in data.items(): merged = {**vals, **blanks} df = pd.DataFrame.from_dict(merged, orient='index') df.sort_index(inplace=True) cols = sorted(list(df.columns)) sio = StringIO() df[cols].to_csv(sio, sep='\t', index_label='sample_name') data[study] = sio.getvalue() return data
bsd-3-clause
akhmadMizkat/odoo
addons/mail/models/res_partner.py
1
12524
# -*- coding: utf-8 -*- import logging import threading from openerp import _, api, fields, models, tools from openerp.osv import expression _logger = logging.getLogger(__name__) class Partner(models.Model): """ Update partner to add a field about notification preferences. Add a generic opt-out field that can be used to restrict usage of automatic email templates. """ _name = "res.partner" _inherit = ['res.partner', 'mail.thread'] _mail_flat_thread = False _mail_mass_mailing = _('Customers') message_bounce = fields.Integer('Bounce', help="Counter of the number of bounced emails for this contact") notify_email = fields.Selection([ ('none', 'Never'), ('always', 'All Messages')], 'Email Messages and Notifications', required=True, oldname='notification_email_send', default='always', help="Policy to receive emails for new messages pushed to your personal Inbox:\n" "- Never: no emails are sent\n" "- All Messages: for every notification you receive in your Inbox") opt_out = fields.Boolean( 'Opt-Out', help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. " "Filter 'Available for Mass Mailing' allows users to filter the partners when performing mass mailing.") channel_ids = fields.Many2many('mail.channel', 'mail_channel_partner', 'partner_id', 'channel_id', string='Channels') @api.multi def message_get_suggested_recipients(self): recipients = super(Partner, self).message_get_suggested_recipients() for partner in self: partner._message_add_suggested_recipient(recipients, partner=partner, reason=_('Partner Profile')) return recipients @api.multi def message_get_default_recipients(self): return dict((res_id, {'partner_ids': [res_id], 'email_to': False, 'email_cc': False}) for res_id in self.ids) @api.model def _notify_prepare_template_context(self, message): # compute signature signature = "" if message.author_id and message.author_id.user_ids and message.author_id.user_ids[0].signature: signature = message.author_id.user_ids[0].signature elif message.author_id: signature = "<p>-- <br/>%s</p>" % message.author_id.name # compute Sent by if message.author_id and message.author_id.user_ids: user = message.author_id.user_ids[0] else: user = self.env.user if user.company_id.website: website_url = 'http://%s' % user.company_id.website if not user.company_id.website.lower().startswith(('http:', 'https:')) else user.company_id.website else: website_url = False company_name = user.company_id.name model_name = False if message.model: model_name = self.env['ir.model'].sudo().search([('model', '=', self.env[message.model]._name)]).name_get()[0][1] record_name = message.record_name tracking = [] for tracking_value in message.tracking_value_ids: tracking.append((tracking_value.field_desc, tracking_value.get_old_display_value()[0], tracking_value.get_new_display_value()[0])) is_discussion = message.subtype_id.id == self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'), return { 'signature': signature, 'website_url': website_url, 'company_name': company_name, 'model_name': model_name, 'record_name': record_name, 'tracking': tracking, 'is_discussion': is_discussion, } @api.model def _notify_prepare_email_values(self, message): # compute email references references = message.parent_id.message_id if message.parent_id else False # custom values custom_values = dict() if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'): custom_values = self.env[message.model].browse(message.res_id).message_get_email_values(message) mail_values = { 'mail_message_id': message.id, 'auto_delete': self._context.get('mail_auto_delete', True), 'references': references, } mail_values.update(custom_values) return mail_values @api.model def _notify_send(self, body, subject, recipients, **mail_values): emails = self.env['mail.mail'] recipients_nbr, recipients_max = len(recipients), 50 email_chunks = [recipients[x:x + recipients_max] for x in xrange(0, len(recipients), recipients_max)] for email_chunk in email_chunks: # TDE FIXME: missing message parameter. So we will find mail_message_id # in the mail_values and browse it. It should already be in the # cache so should not impact performances. mail_message_id = mail_values.get('mail_message_id') message = self.env['mail.message'].browse(mail_message_id) if mail_message_id else None if message and message.model and message.res_id and message.model in self.env and hasattr(self.env[message.model], 'message_get_recipient_values'): tig = self.env[message.model].browse(message.res_id) recipient_values = tig.message_get_recipient_values(notif_message=message, recipient_ids=email_chunk.ids) else: recipient_values = self.env['mail.thread'].message_get_recipient_values(notif_message=None, recipient_ids=email_chunk.ids) create_values = { 'body_html': body, 'subject': subject, } create_values.update(mail_values) create_values.update(recipient_values) emails |= self.env['mail.mail'].create(create_values) return emails, recipients_nbr @api.multi def _notify(self, message, force_send=False, user_signature=True): # TDE TODO: model-dependant ? (like customer -> always email ?) message_sudo = message.sudo() email_channels = message.channel_ids.filtered(lambda channel: channel.email_send) self.sudo().search([ '|', ('id', 'in', self.ids), ('channel_ids', 'in', email_channels.ids), ('email', '!=', message_sudo.author_id and message_sudo.author_id.email or message.email_from), ('notify_email', '!=', 'none')])._notify_by_email(message, force_send=force_send, user_signature=user_signature) self._notify_by_chat(message) return True @api.multi def _notify_by_email(self, message, force_send=False, user_signature=True): """ Method to send email linked to notified messages. The recipients are the recordset on which this method is called. """ if not self.ids: return True # existing custom notification email base_template = None if message.model: base_template = self.env.ref('mail.mail_template_data_notification_email_%s' % message.model.replace('.', '_'), raise_if_not_found=False) if not base_template: base_template = self.env.ref('mail.mail_template_data_notification_email_default') base_template_ctx = self._notify_prepare_template_context(message) if not user_signature: base_template_ctx['signature'] = False base_mail_values = self._notify_prepare_email_values(message) # classify recipients: actions / no action if message.model and message.res_id and hasattr(self.env[message.model], '_message_notification_recipients'): recipients = self.env[message.model].browse(message.res_id)._message_notification_recipients(message, self) else: recipients = self.env['mail.thread']._message_notification_recipients(message, self) emails = self.env['mail.mail'] recipients_nbr, recipients_max = 0, 50 for email_type, recipient_template_values in recipients.iteritems(): if recipient_template_values['followers']: # generate notification email content template_fol_values = dict(base_template_ctx, **recipient_template_values) # fixme: set button_unfollow to none template_fol_values['button_follow'] = False template_fol = base_template.with_context(**template_fol_values) # generate templates for followers and not followers fol_values = template_fol.generate_email(message.id, fields=['body_html', 'subject']) # send email new_emails, new_recipients_nbr = self._notify_send(fol_values['body'], fol_values['subject'], recipient_template_values['followers'], **base_mail_values) emails |= new_emails recipients_nbr += new_recipients_nbr if recipient_template_values['not_followers']: # generate notification email content template_not_values = dict(base_template_ctx, **recipient_template_values) # fixme: set button_follow to none template_not_values['button_unfollow'] = False template_not = base_template.with_context(**template_not_values) # generate templates for followers and not followers not_values = template_not.generate_email(message.id, fields=['body_html', 'subject']) # send email new_emails, new_recipients_nbr = self._notify_send(not_values['body'], not_values['subject'], recipient_template_values['not_followers'], **base_mail_values) emails |= new_emails recipients_nbr += new_recipients_nbr # NOTE: # 1. for more than 50 followers, use the queue system # 2. do not send emails immediately if the registry is not loaded, # to prevent sending email during a simple update of the database # using the command-line. if force_send and recipients_nbr < recipients_max and \ (not self.pool._init or getattr(threading.currentThread(), 'testing', False)): emails.send() return True @api.multi def _notify_by_chat(self, message): """ Broadcast the message to all the partner since """ message_values = message.message_format()[0] notifications = [] for partner in self: notifications.append([(self._cr.dbname, 'ir.needaction', partner.id), dict(message_values)]) self.env['bus.bus'].sendmany(notifications) @api.model def get_needaction_count(self): """ compute the number of needaction of the current user """ if self.env.user.partner_id: self.env.cr.execute(""" SELECT count(*) as needaction_count FROM mail_message_res_partner_needaction_rel R WHERE R.res_partner_id = %s """, (self.env.user.partner_id.id,)) return self.env.cr.dictfetchall()[0].get('needaction_count') _logger.error('Call to needaction_count without partner_id') return 0 @api.model def get_static_mention_suggestions(self): """ To be overwritten to return the id, name and email of partners used as static mention suggestions loaded once at webclient initialization and stored client side. """ return [] @api.model def get_mention_suggestions(self, search, limit=8): """ Return 'limit'-first partners' id, name and email such that the name or email matches a 'search' string. Prioritize users, and then extend the research to all partners. """ search_dom = expression.OR([[('name', 'ilike', search)], [('email', 'ilike', search)]]) fields = ['id', 'name', 'email'] # Search users domain = expression.AND([[('user_ids.id', '!=', False)], search_dom]) users = self.search_read(domain, fields, limit=limit) # Search partners if less than 'limit' users found partners = [] if len(users) < limit: partners = self.search_read(search_dom, fields, limit=limit) # Remove duplicates partners = [p for p in partners if not len([u for u in users if u['id'] == p['id']])] return [users, partners]
gpl-3.0
dingliumath/quant-econ
quantecon/tests/test_graph_tools.py
4
7552
""" Filename: test_graph_tools.py Author: Daisuke Oyama Tests for graph_tools.py """ import sys import numpy as np from numpy.testing import assert_array_equal import nose from nose.tools import eq_, ok_, raises from quantecon.graph_tools import DiGraph def list_of_array_equal(s, t): """ Compare two lists of ndarrays s, t: lists of numpy.ndarrays """ eq_(len(s), len(t)) all(assert_array_equal(x, y) for x, y in zip(s, t)) class Graphs: """Setup graphs for the tests""" def __init__(self): self.strongly_connected_graph_dicts = [] self.not_strongly_connected_graph_dicts = [] graph_dict = { 'A': np.array([[1, 0], [0, 1]]), 'strongly_connected_components': [np.array([0]), np.array([1])], 'sink_strongly_connected_components': [np.array([0]), np.array([1])], 'is_strongly_connected': False, } self.not_strongly_connected_graph_dicts.append(graph_dict) graph_dict = { 'A': np.array([[1, 0, 0], [1, 0, 1], [0, 0, 1]]), 'strongly_connected_components': [np.array([0]), np.array([1]), np.array([2])], 'sink_strongly_connected_components': [np.array([0]), np.array([2])], 'is_strongly_connected': False, } self.not_strongly_connected_graph_dicts.append(graph_dict) graph_dict = { 'A': np.array([[1, 1], [1, 1]]), 'strongly_connected_components': [np.arange(2)], 'sink_strongly_connected_components': [np.arange(2)], 'is_strongly_connected': True, 'period': 1, 'is_aperiodic': True, 'cyclic_components': [np.arange(2)], } self.strongly_connected_graph_dicts.append(graph_dict) graph_dict = { 'A': np.array([[0, 1], [1, 0]]), 'strongly_connected_components': [np.arange(2)], 'sink_strongly_connected_components': [np.arange(2)], 'is_strongly_connected': True, 'period': 2, 'is_aperiodic': False, 'cyclic_components': [np.array([0]), np.array([1])], } self.strongly_connected_graph_dicts.append(graph_dict) graph_dict = { 'A': np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 1], [0, 0, 1, 0]]), 'strongly_connected_components': [np.arange(4)], 'sink_strongly_connected_components': [np.arange(4)], 'is_strongly_connected': True, 'period': 1, 'is_aperiodic': True, 'cyclic_components': [np.arange(4)], } self.strongly_connected_graph_dicts.append(graph_dict) # Weighted graph graph_dict = { 'A': np.array([[0, 0.5], [2, 0]]), 'weighted': True, 'strongly_connected_components': [np.arange(2)], 'sink_strongly_connected_components': [np.arange(2)], 'is_strongly_connected': True, 'period': 2, 'is_aperiodic': False, 'cyclic_components': [np.array([0]), np.array([1])], } self.strongly_connected_graph_dicts.append(graph_dict) # Degenrate graph with no edge graph_dict = { 'A': np.array([[0]]), 'strongly_connected_components': [np.arange(1)], 'sink_strongly_connected_components': [np.arange(1)], 'is_strongly_connected': True, 'period': 1, 'is_aperiodic': True, 'cyclic_components': [np.array([0])], } self.strongly_connected_graph_dicts.append(graph_dict) # Degenrate graph with self loop graph_dict = { 'A': np.array([[1]]), 'strongly_connected_components': [np.arange(1)], 'sink_strongly_connected_components': [np.arange(1)], 'is_strongly_connected': True, 'period': 1, 'is_aperiodic': True, 'cyclic_components': [np.array([0])], } self.strongly_connected_graph_dicts.append(graph_dict) self.graph_dicts = \ self.strongly_connected_graph_dicts + \ self.not_strongly_connected_graph_dicts class TestDiGraph: """Test the methods in Digraph""" def setUp(self): """Setup Digraph instances""" self.graphs = Graphs() for graph_dict in self.graphs.graph_dicts: try: weighted = graph_dict['weighted'] except: weighted = False graph_dict['g'] = DiGraph(graph_dict['A'], weighted=weighted) def test_strongly_connected_components(self): for graph_dict in self.graphs.graph_dicts: list_of_array_equal( sorted(graph_dict['g'].strongly_connected_components, key=lambda x: x[0]), sorted(graph_dict['strongly_connected_components'], key=lambda x: x[0]) ) def test_num_strongly_connected_components(self): for graph_dict in self.graphs.graph_dicts: eq_(graph_dict['g'].num_strongly_connected_components, len(graph_dict['strongly_connected_components'])) def test_sink_strongly_connected_components(self): for graph_dict in self.graphs.graph_dicts: list_of_array_equal( sorted(graph_dict['g'].sink_strongly_connected_components, key=lambda x: x[0]), sorted(graph_dict['sink_strongly_connected_components'], key=lambda x: x[0]) ) def test_num_sink_strongly_connected_components(self): for graph_dict in self.graphs.graph_dicts: eq_(graph_dict['g'].num_sink_strongly_connected_components, len(graph_dict['sink_strongly_connected_components'])) def test_is_strongly_connected(self): for graph_dict in self.graphs.graph_dicts: eq_(graph_dict['g'].is_strongly_connected, graph_dict['is_strongly_connected']) def test_period(self): for graph_dict in self.graphs.graph_dicts: try: eq_(graph_dict['g'].period, graph_dict['period']) except NotImplementedError: eq_(graph_dict['g'].is_strongly_connected, False) def test_is_aperiodic(self): for graph_dict in self.graphs.graph_dicts: try: eq_(graph_dict['g'].is_aperiodic, graph_dict['is_aperiodic']) except NotImplementedError: eq_(graph_dict['g'].is_strongly_connected, False) def test_cyclic_components(self): for graph_dict in self.graphs.graph_dicts: try: list_of_array_equal( sorted(graph_dict['g'].cyclic_components, key=lambda x: x[0]), sorted(graph_dict['cyclic_components'], key=lambda x: x[0]) ) except NotImplementedError: eq_(graph_dict['g'].is_strongly_connected, False) @raises(ValueError) def test_raises_value_error_non_sym(): """Test with non symmetric input""" g = DiGraph(np.array([[0.4, 0.6]])) if __name__ == '__main__': argv = sys.argv[:] argv.append('--verbose') argv.append('--nocapture') nose.main(argv=argv, defaultTest=__file__)
bsd-3-clause
draugiskisprendimai/odoo
addons/portal/wizard/__init__.py
447
1098
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import portal_wizard import share_wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0