code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# functions that implement analysis and synthesis of sounds using the Stochastic Model # (for example usage check stochasticModel_function.py in the models_interface directory) import numpy as np from scipy.signal import hanning, resample from scipy.fftpack import fft, ifft import utilFunctions as UF def stochasticModelAnal(x, H, N, stocf): """ Stochastic analysis of a sound x: input array sound, H: hop size, N: fftsize stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1 returns stocEnv: stochastic envelope """ hN = N/2+1 # positive size of fft No2 = N/2 # half of N if (hN*stocf < 3): # raise exception if decimation factor too small raise ValueError("Stochastic decimation factor too small") if (stocf > 1): # raise exception if decimation factor too big raise ValueError("Stochastic decimation factor above 1") if (H <= 0): # raise error if hop size 0 or negative raise ValueError("Hop size (H) smaller or equal to 0") if not(UF.isPower2(N)): # raise error if N not a power of two raise ValueError("FFT size (N) is not a power of 2") w = hanning(N) # analysis window x = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0 x = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample pin = No2 # initialize sound pointer in middle of analysis window pend = x.size-No2 # last sample to start a frame while pin<=pend: xw = x[pin-No2:pin+No2] * w # window the input sound X = fft(xw) # compute FFT mX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies mY = resample(np.maximum(-200, mX), stocf*hN) # decimate the mag spectrum if pin == No2: # first frame stocEnv = np.array([mY]) else: # rest of frames stocEnv = np.vstack((stocEnv, np.array([mY]))) pin += H # advance sound pointer return stocEnv def stochasticModelSynth(stocEnv, H, N): """ Stochastic synthesis of a sound stocEnv: stochastic envelope; H: hop size; N: fft size returns y: output sound """ if not(UF.isPower2(N)): # raise error if N not a power of two raise ValueError("N is not a power of two") hN = N/2+1 # positive size of fft No2 = N/2 # half of N L = stocEnv[:,0].size # number of frames ysize = H*(L+3) # output sound size y = np.zeros(ysize) # initialize output array ws = 2*hanning(N) # synthesis window pout = 0 # output sound pointer for l in range(L): mY = resample(stocEnv[l,:], hN) # interpolate to original size pY = 2*np.pi*np.random.rand(hN) # generate phase random values Y = np.zeros(N, dtype = complex) # initialize synthesis spectrum Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq. Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq. fftbuffer = np.real(ifft(Y)) # inverse FFT y[pout:pout+N] += ws*fftbuffer # overlap-add pout += H y = np.delete(y, range(No2)) # delete half of first window y = np.delete(y, range(y.size-No2, y.size)) # delete half of the last window return y def stochasticModel(x, H, N, stocf): """ Stochastic analysis/synthesis of a sound, one frame at a time x: input array sound, H: hop size, N: fft size stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1 returns y: output sound """ hN = N/2+1 # positive size of fft No2 = N/2 # half of N if (hN*stocf < 3): # raise exception if decimation factor too small raise ValueError("Stochastic decimation factor too small") if (stocf > 1): # raise exception if decimation factor too big raise ValueError("Stochastic decimation factor above 1") if (H <= 0): # raise error if hop size 0 or negative raise ValueError("Hop size (H) smaller or equal to 0") if not(UF.isPower2(N)): # raise error if N not a power of twou raise ValueError("FFT size (N) is not a power of 2") w = hanning(N) # analysis/synthesis window x = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0 x = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample pin = No2 # initialize sound pointer in middle of analysis window pend = x.size - No2 # last sample to start a frame y = np.zeros(x.size) # initialize output array while pin<=pend: #-----analysis----- xw = x[pin-No2:pin+No2]*w # window the input sound X = fft(xw) # compute FFT mX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies stocEnv = resample(np.maximum(-200, mX), hN*stocf) # decimate the mag spectrum #-----synthesis----- mY = resample(stocEnv, hN) # interpolate to original size pY = 2*np.pi*np.random.rand(hN) # generate phase random values Y = np.zeros(N, dtype = complex) Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq. Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq. fftbuffer = np.real(ifft(Y)) # inverse FFT y[pin-No2:pin+No2] += w*fftbuffer # overlap-add pin += H # advance sound pointer y = np.delete(y, range(No2)) # delete half of first window which was added y = np.delete(y, range(y.size-No2, y.size)) # delete half of last window which was added return y
unknown
codeparrot/codeparrot-clean
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dilated Neighborhood Attention Transformer model configuration""" from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DinatConfig(BackboneConfigMixin, PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Dinat [shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 64): Dimensionality of patch embedding. depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`): Number of layers in each level of the encoder. num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`): Number of attention heads in each layer of the Transformer encoder. kernel_size (`int`, *optional*, defaults to 7): Neighborhood Attention kernel size. dilations (`list[list[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`): Dilation value of each NA layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 3.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 0.0): The initial value for the layer scale. Disabled if <=0. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import DinatConfig, DinatModel >>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration >>> configuration = DinatConfig() >>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration >>> model = DinatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dinat" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-5, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.dilations = dilations self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["DinatConfig"]
python
github
https://github.com/huggingface/transformers
src/transformers/models/dinat/configuration_dinat.py
# (c) 2010 Marcos Dione <mdione@grulic.org.ar> # This file is part of satyr. # satyr is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # satyr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with satyr. If not, see <http://www.gnu.org/licenses/>. # qt/kde related from PyKDE4.kio import KIO from PyKDE4.kdecore import KUrl, KJob from PyQt4.QtCore import QDir # std python import os.path # we needed before loggin to get the handler import satyr # logging import logging logger = logging.getLogger(__name__) logger.addHandler(satyr.loggingHandler) # local from satyr.common import ConfigurableObject from satyr import utils class Renamer (ConfigurableObject): # TODO: move everything to CollAggr def __init__ (self, collaggr): ConfigurableObject.__init__ (self, 'Renamer') self.collaggr= collaggr # TODO: ***becareful!*** mixing unicode with paths! # artist, year, collection, diskno, album, trackno, title, length self.configValues= ( # ('format', unicode, u"{%artist/}{%4year - }{%collection/}{%02diskno - }{%album/}{Disk %02disk/}{%02trackno - }{%title}"), ('format', unicode, u"{%artist}/{%4year - }{%album}/{Disk %02diskno}/{%02trackno - }{%title}"), ('vaFormat', unicode, u"{%4year - }{%album}/{Disk %02diskno}/{%02trackno - }{%artist - }{%title}"), ('collection', unicode, u"{%artist}/{%4year - }{%collection}/{%02diskno - }{%album}/{%02trackno - }{%title}"), ) self.loadConfig () self.jobs= [] # TODO: make this a method of Song called properPath() def songPath (self, base, song): # TODO: take ext from file format? lastDot= song.filepath.rfind ('.') if lastDot==-1: ext= '' else: ext= song.filepath[lastDot:] if not song.variousArtists: if song.collection==u'': songPath= utils.expandConditionally (self.format, song) else: songPath= utils.expandConditionally (self.collection, song) else: songPath= utils.expandConditionally (self.vaFormat, song) if songPath!='': ans= base+"/"+songPath+ext else: ans= base+"/"+song.filepath return ans def jobFinished (self, job): try: self.jobs.remove (job) except ValueError: logger.warning ("Renamer.jobFinished()", job, "not found!") if job.error()==KJob.NoError: # TODO: update iface logger.debug ("Renamer.jobFinished(): success!") else: # job.errorString () is a QString logger.warning ("Renamer.jobFinished(): ***** error! *****", unicode (job.errorString ())) # TODO: Renamer.jobFinished(): ***** error! ***** A file named foo already exists. def rename (self, songs): # TODO: parametrize the main music colleciton mainColl= self.collaggr.collections[0] base= mainColl.path d= QDir () for song in songs: dstPath= self.songPath (base, song) dstDir= os.path.dirname (dstPath) # TODO: QtDir is not net transp. try to make sub jobs creating the missing path if d.mkpath (dstDir): # HINT: KUrl because KIO.* use KUrl # src= KUrl (song.filepath) src= KUrl (utils.path2qurl (song.filepath)) # BUG: Renamer.rename() # PyQt4.QtCore.QUrl(u'file:///home/mdione/media/music/new/bandidos rurales/05 - uruguay, uruguay.mp3') -> # PyQt4.QtCore.QUrl(u'file:///home/mdione/media/music/Le\xf3n Gieco/2001 - Bandidos rurales/05 - Uruguay, Uruguay.mp3') # ^^^^ dst= KUrl (dstPath) logger.info ("Renamer.rename()", src, "->", dst) # TODO: do not launch them all in parallel job= KIO.file_move (src, dst) # TODO: emit a finished. # print "Renamer.rename()", job job.result.connect (self.jobFinished) # print "Renamer.rename(): connected" self.jobs.append (job) # print "Renamer.rename(): next!" else: logger.info ("Renamer.rename(): failed to create", dstDir, ", skipping", dstPath) # print "Renamer.rename(): finished" def delete (self, songs): # TODO: parametrize the trash music colleciton trashColl= self.collaggr.collections[-1] base= trashColl.path for song in songs: logger.debug ("Renamer.delete()", song.filepath) # end
unknown
codeparrot/codeparrot-clean
import unittest from datetime import datetime from pyprint.NullPrinter import NullPrinter from pyprint.Printer import Printer from pyprint.StringPrinter import StringPrinter from coalib.misc import Constants from coalib.output.printers.LogPrinter import LogPrinter from coalib.processes.communication.LogMessage import LOG_LEVEL, LogMessage class LogPrinterTest(unittest.TestCase): timestamp = datetime.today() log_message = LogMessage(LOG_LEVEL.ERROR, Constants.COMPLEX_TEST_STRING, timestamp=timestamp) def test_interface(self): uut = LogPrinter(Printer()) self.assertRaises(NotImplementedError, uut.log_message, self.log_message) def test_get_printer(self): self.assertIs(LogPrinter(None).printer, None) printer = Printer() self.assertIs(LogPrinter(printer).printer, printer) def test_logging(self): uut = LogPrinter(StringPrinter(), timestamp_format="") uut.log_message(self.log_message, end="") self.assertEqual(uut.printer.string, str(self.log_message)) uut = LogPrinter(StringPrinter(), log_level=LOG_LEVEL.DEBUG) uut.log_message(self.log_message, end="") self.assertEqual( uut.printer.string, "[ERROR][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING) uut.printer.clear() uut.log(LOG_LEVEL.ERROR, Constants.COMPLEX_TEST_STRING, timestamp=self.timestamp, end="") self.assertEqual( uut.printer.string, "[ERROR][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING) uut.printer.clear() uut.debug(Constants.COMPLEX_TEST_STRING, "d", timestamp=self.timestamp, end="") self.assertEqual( uut.printer.string, "[DEBUG][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING + " d") uut.printer.clear() uut.log_level = LOG_LEVEL.INFO uut.debug(Constants.COMPLEX_TEST_STRING, timestamp=self.timestamp, end="") self.assertEqual(uut.printer.string, "") uut.printer.clear() uut.info(Constants.COMPLEX_TEST_STRING, "d", timestamp=self.timestamp, end="") self.assertEqual( uut.printer.string, "[INFO][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING + " d") uut.log_level = LOG_LEVEL.WARNING uut.printer.clear() uut.debug(Constants.COMPLEX_TEST_STRING, timestamp=self.timestamp, end="") self.assertEqual(uut.printer.string, "") uut.printer.clear() uut.warn(Constants.COMPLEX_TEST_STRING, "d", timestamp=self.timestamp, end="") self.assertEqual( uut.printer.string, "[WARNING][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING + " d") uut.printer.clear() uut.err(Constants.COMPLEX_TEST_STRING, "d", timestamp=self.timestamp, end="") self.assertEqual( uut.printer.string, "[ERROR][" + self.timestamp.strftime("%X") + "] " + Constants.COMPLEX_TEST_STRING + " d") uut.log_level = LOG_LEVEL.DEBUG uut.printer.clear() uut.log_exception( "Something failed.", NotImplementedError(Constants.COMPLEX_TEST_STRING), timestamp=self.timestamp) self.assertTrue(uut.printer.string.startswith( "[ERROR][" + self.timestamp.strftime("%X") + "] Something failed.\n" + "[DEBUG][" + self.timestamp.strftime("%X") + "] Exception was:")) uut.log_level = LOG_LEVEL.INFO uut.printer.clear() logged = uut.log_exception( "Something failed.", NotImplementedError(Constants.COMPLEX_TEST_STRING), timestamp=self.timestamp, end="") self.assertTrue(uut.printer.string.startswith( "[ERROR][" + self.timestamp.strftime("%X") + "] Something failed.")) def test_raises(self): uut = LogPrinter(NullPrinter()) self.assertRaises(TypeError, uut.log, 5) self.assertRaises(TypeError, uut.log_exception, "message", 5) self.assertRaises(TypeError, uut.log_message, 5)
unknown
codeparrot/codeparrot-clean
import numpy as np import nose.tools as nt import mock import hyperspy.api as hs from hyperspy.misc.utils import slugify class TestModelJacobians: def setUp(self): s = hs.signals.Spectrum(np.zeros(1)) m = s.create_model() self.low_loss = 7. self.weights = 0.3 m.axis.axis = np.array([1, 0]) m.channel_switches = np.array([0, 1], dtype=bool) m.append(hs.model.components.Gaussian()) m[0].A.value = 1 m[0].centre.value = 2. m[0].sigma.twin = m[0].centre m._low_loss = mock.MagicMock() m.low_loss.return_value = self.low_loss self.model = m m.convolution_axis = np.zeros(2) def test_jacobian_not_convolved(self): m = self.model m.convolved = False jac = m._jacobian((1, 2, 3), None, weights=self.weights) np.testing.assert_array_almost_equal(jac.squeeze(), self.weights * np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0)])) nt.assert_equal(m[0].A.value, 1) nt.assert_equal(m[0].centre.value, 2) nt.assert_equal(m[0].sigma.value, 2) def test_jacobian_convolved(self): m = self.model m.convolved = True m.append(hs.model.components.Gaussian()) m[0].convolved = False m[1].convolved = True jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights) np.testing.assert_array_almost_equal(jac.squeeze(), self.weights * np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0), m[1].A.grad(0) * self.low_loss, m[1].centre.grad(0) * self.low_loss, m[1].sigma.grad(0) * self.low_loss, ])) nt.assert_equal(m[0].A.value, 1) nt.assert_equal(m[0].centre.value, 2) nt.assert_equal(m[0].sigma.value, 2) nt.assert_equal(m[1].A.value, 3) nt.assert_equal(m[1].centre.value, 4) nt.assert_equal(m[1].sigma.value, 5) class TestModelCallMethod: def setUp(self): s = hs.signals.Spectrum(np.empty(1)) m = s.create_model() m.append(hs.model.components.Gaussian()) m.append(hs.model.components.Gaussian()) self.model = m def test_call_method_no_convolutions(self): m = self.model m.convolved = False m[1].active = False r1 = m() r2 = m(onlyactive=True) np.testing.assert_almost_equal(m[0].function(0) * 2, r1) np.testing.assert_almost_equal(m[0].function(0), r2) m.convolved = True r1 = m(non_convolved=True) r2 = m(non_convolved=True, onlyactive=True) np.testing.assert_almost_equal(m[0].function(0) * 2, r1) np.testing.assert_almost_equal(m[0].function(0), r2) def test_call_method_with_convolutions(self): m = self.model m._low_loss = mock.MagicMock() m.low_loss.return_value = 0.3 m.convolved = True m.append(hs.model.components.Gaussian()) m[1].active = False m[0].convolved = True m[1].convolved = False m[2].convolved = False m.convolution_axis = np.array([0., ]) r1 = m() r2 = m(onlyactive=True) np.testing.assert_almost_equal(m[0].function(0) * 2.3, r1) np.testing.assert_almost_equal(m[0].function(0) * 1.3, r2) def test_call_method_binned(self): m = self.model m.convolved = False m.remove(1) m.signal.metadata.Signal.binned = True m.signal.axes_manager[-1].scale = 0.3 r1 = m() np.testing.assert_almost_equal(m[0].function(0) * 0.3, r1) class TestModelPlotCall: def setUp(self): s = hs.signals.Spectrum(np.empty(1)) m = s.create_model() m.__call__ = mock.MagicMock() m.__call__.return_value = np.array([0.5, 0.25]) m.axis = mock.MagicMock() m.fetch_stored_values = mock.MagicMock() m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool) self.model = m def test_model2plot_own_am(self): m = self.model m.axis.axis.shape = (5,) res = m._model2plot(m.axes_manager) np.testing.assert_array_equal( res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan])) nt.assert_true(m.__call__.called) nt.assert_dict_equal( m.__call__.call_args[1], { 'non_convolved': False, 'onlyactive': True}) nt.assert_false(m.fetch_stored_values.called) def test_model2plot_other_am(self): m = self.model res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False) np.testing.assert_array_equal(res, np.array([0.5, 0.25])) nt.assert_true(m.__call__.called) nt.assert_dict_equal( m.__call__.call_args[1], { 'non_convolved': False, 'onlyactive': True}) nt.assert_equal(2, m.fetch_stored_values.call_count) class TestModelSettingPZero: def setUp(self): s = hs.signals.Spectrum(np.empty(1)) m = s.create_model() m.append(hs.model.components.Gaussian()) m[0].A.value = 1.1 m[0].centre._number_of_elements = 2 m[0].centre.value = (2.2, 3.3) m[0].sigma.value = 4.4 m[0].sigma.free = False m[0].A._bounds = (0.1, 0.11) m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31)) m[0].sigma._bounds = (0.4, 0.41) self.model = m def test_setting_p0(self): m = self.model m.append(hs.model.components.Gaussian()) m[-1].active = False m.p0 = None m._set_p0() nt.assert_equal(m.p0, (1.1, 2.2, 3.3)) def test_fetching_from_p0(self): m = self.model m.append(hs.model.components.Gaussian()) m[-1].active = False m[-1].A.value = 100 m[-1].sigma.value = 200 m[-1].centre.value = 300 m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8) m._fetch_values_from_p0() nt.assert_equal(m[0].A.value, 1.2) nt.assert_equal(m[0].centre.value, (2.3, 3.4)) nt.assert_equal(m[0].sigma.value, 4.4) nt.assert_equal(m[1].A.value, 100) nt.assert_equal(m[1].sigma.value, 200) nt.assert_equal(m[1].centre.value, 300) def test_setting_boundaries(self): m = self.model m.append(hs.model.components.Gaussian()) m[-1].active = False m.set_boundaries() nt.assert_equal(m.free_parameters_boundaries, [(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)]) def test_setting_mpfit_parameters_info(self): m = self.model m[0].A.bmax = None m[0].centre.bmin = None m[0].centre.bmax = 0.31 m.append(hs.model.components.Gaussian()) m[-1].active = False m.set_mpfit_parameters_info() nt.assert_equal(m.mpfit_parinfo, [{'limited': [True, False], 'limits': [0.1, 0]}, {'limited': [False, True], 'limits': [0, 0.31]}, {'limited': [False, True], 'limits': [0, 0.31]}, ]) class TestModel1D: def setUp(self): s = hs.signals.Spectrum(np.empty(1)) m = s.create_model() self.model = m def test_errfunc(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. np.testing.assert_equal(m._errfunc(None, 1., None), 2.) np.testing.assert_equal(m._errfunc(None, 1., 0.3), 0.6) def test_errfunc2(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. * np.ones(2) np.testing.assert_equal(m._errfunc2(None, np.ones(2), None), 2 * 4.) np.testing.assert_equal(m._errfunc2(None, np.ones(2), 0.3), 2 * 0.36) def test_gradient_ls(self): m = self.model m._errfunc = mock.MagicMock() m._errfunc.return_value = 0.1 m._jacobian = mock.MagicMock() m._jacobian.return_value = np.ones((1, 2)) * 7. np.testing.assert_equal(m._gradient_ls(None, None), 2 * 0.1 * 7 * 2) def test_gradient_ml(self): m = self.model m._model_function = mock.MagicMock() m._model_function.return_value = 3. * np.ones(2) m._jacobian = mock.MagicMock() m._jacobian.return_value = np.ones((1, 2)) * 7. np.testing.assert_equal( m._gradient_ml(None, 1.2), -2 * 7 * (1.2 / 3 - 1)) def test_model_function(self): m = self.model m.append(hs.model.components.Gaussian()) m[0].A.value = 1.3 m[0].centre.value = 0.003 m[0].sigma.value = 0.1 param = (100, 0.1, 0.2) np.testing.assert_array_almost_equal(176.03266338, m._model_function(param)) nt.assert_equal(m[0].A.value, 100) nt.assert_equal(m[0].centre.value, 0.1) nt.assert_equal(m[0].sigma.value, 0.2) @nt.raises(ValueError) def test_append_existing_component(self): g = hs.model.components.Gaussian() m = self.model m.append(g) m.append(g) def test_append_component(self): g = hs.model.components.Gaussian() m = self.model m.append(g) nt.assert_in(g, m) nt.assert_is(g.model, m) nt.assert_is(g._axes_manager, m.axes_manager) nt.assert_true(all([hasattr(p, 'map') for p in g.parameters])) def test_calculating_convolution_axis(self): m = self.model # setup m.axis.offset = 10 m.axis.size = 10 ll_axis = mock.MagicMock() ll_axis.size = 7 ll_axis.value2index.return_value = 3 m._low_loss = mock.MagicMock() m.low_loss.axes_manager.signal_axes = [ll_axis, ] # calculation m.set_convolution_axis() # tests np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23)) np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0) def test_access_component_by_name(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) nt.assert_is(m["test"], g2) def test_access_component_by_index(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) nt.assert_is(m[1], g2) def test_component_name_when_append(self): m = self.model gs = [ hs.model.components.Gaussian(), hs.model.components.Gaussian(), hs.model.components.Gaussian()] m.extend(gs) nt.assert_is(m['Gaussian'], gs[0]) nt.assert_is(m['Gaussian_0'], gs[1]) nt.assert_is(m['Gaussian_1'], gs[2]) @nt.raises(ValueError) def test_several_component_with_same_name(self): m = self.model gs = [ hs.model.components.Gaussian(), hs.model.components.Gaussian(), hs.model.components.Gaussian()] m.extend(gs) m[0]._name = "hs.model.components.Gaussian" m[1]._name = "hs.model.components.Gaussian" m[2]._name = "hs.model.components.Gaussian" m['Gaussian'] @nt.raises(ValueError) def test_no_component_with_that_name(self): m = self.model m['Voigt'] @nt.raises(ValueError) def test_component_already_in_model(self): m = self.model g1 = hs.model.components.Gaussian() m.extend((g1, g1)) def test_remove_component(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) m.remove(g1) nt.assert_equal(len(m), 0) def test_remove_component_by_index(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) m.remove(0) nt.assert_equal(len(m), 0) def test_remove_component_by_name(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) m.remove(g1.name) nt.assert_equal(len(m), 0) def test_delete_component_by_index(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) del m[0] nt.assert_not_in(g1, m) def test_delete_component_by_name(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) del m[g1.name] nt.assert_not_in(g1, m) def test_delete_slice(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g3 = hs.model.components.Gaussian() m.extend([g1, g2, g3]) del m[:2] nt.assert_not_in(g1, m) nt.assert_not_in(g2, m) nt.assert_in(g3, m) def test_get_component_by_name(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) nt.assert_is(m._get_component("test"), g2) def test_get_component_by_index(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) nt.assert_is(m._get_component(1), g2) def test_get_component_by_component(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) nt.assert_is(m._get_component(g2), g2) @nt.raises(ValueError) def test_get_component_wrong(self): m = self.model g1 = hs.model.components.Gaussian() g2 = hs.model.components.Gaussian() g2.name = "test" m.extend((g1, g2)) m._get_component(1.2) def test_components_class_default(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) nt.assert_is(getattr(m.components, g1.name), g1) def test_components_class_change_name(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) g1.name = "test" nt.assert_is(getattr(m.components, g1.name), g1) @nt.raises(AttributeError) def test_components_class_change_name_del_default(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) g1.name = "test" getattr(m.components, "Gaussian") def test_components_class_change_invalid_name(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) g1.name = "1, Test This!" nt.assert_is( getattr(m.components, slugify(g1.name, valid_variable_name=True)), g1) @nt.raises(AttributeError) def test_components_class_change_name_del_default(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) invalid_name = "1, Test This!" g1.name = invalid_name g1.name = "test" getattr(m.components, slugify(invalid_name)) def test_snap_parameter_bounds(self): m = self.model g1 = hs.model.components.Gaussian() m.append(g1) g2 = hs.model.components.Gaussian() m.append(g2) g3 = hs.model.components.Gaussian() m.append(g3) g4 = hs.model.components.Gaussian() m.append(g4) g1.A.value = 3. g1.centre.bmin = 300. g1.centre.value = 1. g1.sigma.bmax = 15. g1.sigma.value = 30 g2.A.value = 1 g2.A.bmin = 0. g2.A.bmax = 3. g2.centre.value = 0 g2.centre.bmin = 1 g2.centre.bmax = 3. g2.sigma.value = 4 g2.sigma.bmin = 1 g2.sigma.bmax = 3. g3.A.bmin = 0 g3.A.value = -3 g3.A.free = False g3.centre.value = 15 g3.centre.bmax = 10 g3.centre.free = False g3.sigma.value = 1 g3.sigma.bmin = 0 g3.sigma.bmax = 0 g4.active = False g4.A.value = 300 g4.A.bmin = 500 g4.centre.value = 0 g4.centre.bmax = -1 g4.sigma.value = 1 g4.sigma.bmin = 10 m.ensure_parameters_in_bounds() np.testing.assert_almost_equal(g1.A.value, 3.) np.testing.assert_almost_equal(g2.A.value, 1.) np.testing.assert_almost_equal(g3.A.value, -3.) np.testing.assert_almost_equal(g4.A.value, 300.) np.testing.assert_almost_equal(g1.centre.value, 300.) np.testing.assert_almost_equal(g2.centre.value, 1.) np.testing.assert_almost_equal(g3.centre.value, 15.) np.testing.assert_almost_equal(g4.centre.value, 0) np.testing.assert_almost_equal(g1.sigma.value, 15.) np.testing.assert_almost_equal(g2.sigma.value, 3.) np.testing.assert_almost_equal(g3.sigma.value, 0.) np.testing.assert_almost_equal(g4.sigma.value, 1) class TestModel2D: def setUp(self): g = hs.model.components.Gaussian2D( centre_x=-5., centre_y=-5., sigma_x=1., sigma_y=2.) x = np.arange(-10, 10, 0.01) y = np.arange(-10, 10, 0.01) X, Y = np.meshgrid(x, y) im = hs.signals.Image(g.function(X, Y)) im.axes_manager[0].scale = 0.01 im.axes_manager[0].offset = -10 im.axes_manager[1].scale = 0.01 im.axes_manager[1].offset = -10 self.im = im def test_fitting(self): im = self.im m = im.create_model() gt = hs.model.components.Gaussian2D(centre_x=-4.5, centre_y=-4.5, sigma_x=0.5, sigma_y=1.5) m.append(gt) m.fit() np.testing.assert_almost_equal(gt.centre_x.value, -5.) np.testing.assert_almost_equal(gt.centre_y.value, -5.) np.testing.assert_almost_equal(gt.sigma_x.value, 1.) np.testing.assert_almost_equal(gt.sigma_y.value, 2.) class TestModelFitBinned: def setUp(self): np.random.seed(1) s = hs.signals.Spectrum( np.random.normal( scale=2, size=10000)).get_histogram() s.metadata.Signal.binned = True g = hs.model.components.Gaussian() m = s.create_model() m.append(g) g.sigma.value = 1 g.centre.value = 0.5 g.A.value = 1e3 self.m = m def test_fit_fmin_leastsq(self): self.m.fit(fitter="fmin", method="ls") np.testing.assert_almost_equal(self.m[0].A.value, 9976.14519369) np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610743285) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380705455) def test_fit_fmin_ml(self): self.m.fit(fitter="fmin", method="ml") np.testing.assert_almost_equal(self.m[0].A.value, 10001.39613936, decimal=3) np.testing.assert_almost_equal(self.m[0].centre.value, -0.104151206314, decimal=6) np.testing.assert_almost_equal(self.m[0].sigma.value, 2.00053642434) def test_fit_leastsq(self): self.m.fit(fitter="leastsq") np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526082, 1) np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610727064) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707571, 5) def test_fit_mpfit(self): self.m.fit(fitter="mpfit") np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526286, 5) np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610718444) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707614) def test_fit_odr(self): self.m.fit(fitter="odr") np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3) np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939) def test_fit_leastsq_grad(self): self.m.fit(fitter="leastsq", grad=True) np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084) np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552) def test_fit_mpfit_grad(self): self.m.fit(fitter="mpfit", grad=True) np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084) np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552) def test_fit_odr_grad(self): self.m.fit(fitter="odr", grad=True) np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3) np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054) np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939) def test_fit_bounded(self): self.m[0].centre.bmin = 0.5 # self.m[0].bounded = True self.m.fit(fitter="mpfit", bounded=True) np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4) np.testing.assert_almost_equal(self.m[0].centre.value, 0.5) np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966) def test_fit_bounded_bad_starting_values(self): self.m[0].centre.bmin = 0.5 self.m[0].centre.value = -1 # self.m[0].bounded = True self.m.fit(fitter="mpfit", bounded=True) np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4) np.testing.assert_almost_equal(self.m[0].centre.value, 0.5) np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966) @nt.raises(ValueError) def test_wrong_method(self): self.m.fit(method="dummy") class TestModelWeighted: def setUp(self): np.random.seed(1) s = hs.signals.SpectrumSimulation(np.arange(10, 100, 0.1)) s.metadata.set_item("Signal.Noise_properties.variance", hs.signals.Spectrum(np.arange(10, 100, 0.01))) s.axes_manager[0].scale = 0.1 s.axes_manager[0].offset = 10 s.add_poissonian_noise() m = s.create_model() m.append(hs.model.components.Polynomial(1)) self.m = m def test_fit_leastsq_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="leastsq", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596693502778, 1.6628238107916631)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_odr_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="odr", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596548961972, 1.6628247412317521)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_mpfit_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="mpfit", method="ls") for result, expected in zip(self.m[0].coefficients.value, (9.9165596607108739, 1.6628243846485873)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_fmin_binned(self): self.m.signal.metadata.Signal.binned = True self.m.fit( fitter="fmin", method="ls", ) for result, expected in zip(self.m[0].coefficients.value, (9.9137288425667442, 1.8446013472266145)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_leastsq_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="leastsq", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596391487121, 0.16628254242532492)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_odr_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="odr", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596548961943, 0.16628247412317315)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_mpfit_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit(fitter="mpfit", method="ls") for result, expected in zip( self.m[0].coefficients.value, (0.99165596295068958, 0.16628257462820528)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_fit_fmin_unbinned(self): self.m.signal.metadata.Signal.binned = False self.m.fit( fitter="fmin", method="ls", ) for result, expected in zip( self.m[0].coefficients.value, (0.99136169230026261, 0.18483060534056939)): np.testing.assert_almost_equal(result, expected, decimal=5) def test_chisq(self): self.m.signal.metadata.Signal.binned = True self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.chisq.data, 3029.16949561) def test_red_chisq(self): self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.red_chisq.data, 3.37700055) class TestModelScalarVariance: def setUp(self): s = hs.signals.SpectrumSimulation(np.ones(100)) m = s.create_model() m.append(hs.model.components.Offset()) self.s = s self.m = m def test_std1_chisq(self): std = 1 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229) def test_std10_chisq(self): std = 10 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229) def test_std1_red_chisq(self): std = 1 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135) def test_std10_red_chisq(self): std = 10 np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135) def test_std1_red_chisq_in_range(self): std = 1 self.m.set_signal_range(10, 50) np.random.seed(1) self.s.add_gaussian_noise(std) self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2) self.m.fit(fitter="leastsq", method="ls") np.testing.assert_almost_equal(self.m.red_chisq.data, 0.86206965) class TestModelSignalVariance: def setUp(self): variance = hs.signals.SpectrumSimulation( np.arange( 100, 300).reshape( (2, 100))) s = variance.deepcopy() np.random.seed(1) std = 10 s.add_gaussian_noise(std) s.add_poissonian_noise() s.metadata.set_item("Signal.Noise_properties.variance", variance + std ** 2) m = s.create_model() m.append(hs.model.components.Polynomial(order=1)) self.s = s self.m = m def test_std1_red_chisq(self): self.m.multifit(fitter="leastsq", method="ls", show_progressbar=None) np.testing.assert_almost_equal(self.m.red_chisq.data[0], 0.79693355673230915) np.testing.assert_almost_equal(self.m.red_chisq.data[1], 0.91453032901427167) class TestMultifit: def setUp(self): s = hs.signals.Spectrum(np.zeros((2, 200))) s.axes_manager[-1].offset = 1 s.data[:] = 2 * s.axes_manager[-1].axis ** (-3) m = s.create_model() m.append(hs.model.components.PowerLaw()) m[0].A.value = 2 m[0].r.value = 2 m.store_current_values() m.axes_manager.indices = (1,) m[0].r.value = 100 m[0].A.value = 2 m.store_current_values() m[0].A.free = False self.m = m m.axes_manager.indices = (0,) m[0].A.value = 100 def test_fetch_only_fixed_false(self): self.m.multifit(fetch_only_fixed=False, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 100.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [2., 2.]) def test_fetch_only_fixed_true(self): self.m.multifit(fetch_only_fixed=True, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 3.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [2., 2.]) def test_bounded_snapping(self): m = self.m m[0].A.free = True m.signal.data *= 2. m[0].A.value = 2. m[0].A.bmin = 3. m.multifit(fitter='mpfit', bounded=True, show_progressbar=None) np.testing.assert_array_almost_equal(self.m[0].r.map['values'], [3., 3.]) np.testing.assert_array_almost_equal(self.m[0].A.map['values'], [4., 4.]) class TestStoreCurrentValues: def setUp(self): self.m = hs.signals.Spectrum(np.arange(10)).create_model() self.o = hs.model.components.Offset() self.m.append(self.o) def test_active(self): self.o.offset.value = 2 self.o.offset.std = 3 self.m.store_current_values() nt.assert_equal(self.o.offset.map["values"][0], 2) nt.assert_equal(self.o.offset.map["is_set"][0], True) def test_not_active(self): self.o.active = False self.o.offset.value = 2 self.o.offset.std = 3 self.m.store_current_values() nt.assert_not_equal(self.o.offset.map["values"][0], 2) class TestSetCurrentValuesTo: def setUp(self): self.m = hs.signals.Spectrum( np.arange(10).reshape(2, 5)).create_model() self.comps = [ hs.model.components.Offset(), hs.model.components.Offset()] self.m.extend(self.comps) def test_set_all(self): for c in self.comps: c.offset.value = 2 self.m.assign_current_values_to_all() nt.assert_true((self.comps[0].offset.map["values"] == 2).all()) nt.assert_true((self.comps[1].offset.map["values"] == 2).all()) def test_set_1(self): self.comps[1].offset.value = 2 self.m.assign_current_values_to_all([self.comps[1]]) nt.assert_true((self.comps[0].offset.map["values"] != 2).all()) nt.assert_true((self.comps[1].offset.map["values"] == 2).all()) class TestAsSignal: def setUp(self): self.m = hs.signals.Spectrum( np.arange(10).reshape(2, 5)).create_model() self.comps = [ hs.model.components.Offset(), hs.model.components.Offset()] self.m.extend(self.comps) for c in self.comps: c.offset.value = 2 self.m.assign_current_values_to_all() def test_all_components_simple(self): s = self.m.as_signal(show_progressbar=None) nt.assert_true(np.all(s.data == 4.)) def test_one_component_simple(self): s = self.m.as_signal(component_list=[0], show_progressbar=None) nt.assert_true(np.all(s.data == 2.)) nt.assert_true(self.m[1].active) def test_all_components_multidim(self): self.m[0].active_is_multidimensional = True s = self.m.as_signal(show_progressbar=None) nt.assert_true(np.all(s.data == 4.)) self.m[0]._active_array[0] = False s = self.m.as_signal(show_progressbar=None) np.testing.assert_array_equal( s.data, np.array([np.ones(5) * 2, np.ones(5) * 4])) nt.assert_true(self.m[0].active_is_multidimensional) def test_one_component_multidim(self): self.m[0].active_is_multidimensional = True s = self.m.as_signal(component_list=[0], show_progressbar=None) nt.assert_true(np.all(s.data == 2.)) nt.assert_true(self.m[1].active) nt.assert_false(self.m[1].active_is_multidimensional) s = self.m.as_signal(component_list=[1], show_progressbar=None) np.testing.assert_equal(s.data, 2.) nt.assert_true(self.m[0].active_is_multidimensional) self.m[0]._active_array[0] = False s = self.m.as_signal(component_list=[1], show_progressbar=None) nt.assert_true(np.all(s.data == 2.)) s = self.m.as_signal(component_list=[0], show_progressbar=None) np.testing.assert_array_equal(s.data, np.array([np.zeros(5), np.ones(5) * 2])) class TestCreateModel: def setUp(self): self.s = hs.signals.Spectrum(np.asarray([0, ])) self.im = hs.signals.Image(np.ones([1, 1, ])) def test_create_model(self): from hyperspy.models.model1D import Model1D from hyperspy.models.model2D import Model2D nt.assert_is_instance( self.s.create_model(), Model1D) nt.assert_is_instance( self.im.create_model(), Model2D) class TestAdjustPosition: def setUp(self): self.s = hs.signals.Spectrum(np.random.rand(10, 10, 20)) self.m = self.s.create_model() def test_enable_adjust_position(self): self.m.append(hs.model.components.Gaussian()) self.m.enable_adjust_position() nt.assert_equal(len(self.m._position_widgets), 1) # Check that both line and label was added nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2) def test_disable_adjust_position(self): self.m.append(hs.model.components.Gaussian()) self.m.enable_adjust_position() self.m.disable_adjust_position() nt.assert_equal(len(self.m._position_widgets), 0) def test_enable_all(self): self.m.append(hs.model.components.Gaussian()) self.m.enable_adjust_position() self.m.append(hs.model.components.Gaussian()) nt.assert_equal(len(self.m._position_widgets), 2) def test_enable_all_zero_start(self): self.m.enable_adjust_position() self.m.append(hs.model.components.Gaussian()) nt.assert_equal(len(self.m._position_widgets), 1) def test_manual_close(self): self.m.append(hs.model.components.Gaussian()) self.m.append(hs.model.components.Gaussian()) self.m.enable_adjust_position() list(self.m._position_widgets.values())[0][0].close() nt.assert_equal(len(self.m._position_widgets), 2) nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 1) list(self.m._position_widgets.values())[0][0].close() nt.assert_equal(len(self.m._position_widgets), 1) nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2) self.m.disable_adjust_position() nt.assert_equal(len(self.m._position_widgets), 0)
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Support; use Carbon\CarbonInterface; use Carbon\CarbonInterval; use Illuminate\Support\Defer\DeferredCallback; use Illuminate\Support\Defer\DeferredCallbackCollection; use Illuminate\Support\Facades\Date; use Symfony\Component\Process\PhpExecutableFinder; if (! function_exists('Illuminate\Support\defer')) { /** * Defer execution of the given callback. * * @param callable|null $callback * @param string|null $name * @param bool $always * @return ($callback is null ? \Illuminate\Support\Defer\DeferredCallbackCollection : \Illuminate\Support\Defer\DeferredCallback) */ function defer(?callable $callback = null, ?string $name = null, bool $always = false): DeferredCallback|DeferredCallbackCollection { if ($callback === null) { return app(DeferredCallbackCollection::class); } return tap( new DeferredCallback($callback, $name, $always), fn ($deferred) => app(DeferredCallbackCollection::class)[] = $deferred ); } } if (! function_exists('Illuminate\Support\php_binary')) { /** * Determine the PHP Binary. */ function php_binary(): string { return (new PhpExecutableFinder)->find(false) ?: 'php'; } } if (! function_exists('Illuminate\Support\artisan_binary')) { /** * Determine the proper Artisan executable. */ function artisan_binary(): string { return defined('ARTISAN_BINARY') ? ARTISAN_BINARY : 'artisan'; } } // Time functions... if (! function_exists('Illuminate\Support\now')) { /** * Create a new Carbon instance for the current time. * * @param \DateTimeZone|\UnitEnum|string|null $tz * @return \Illuminate\Support\Carbon */ function now($tz = null): CarbonInterface { return Date::now(enum_value($tz)); } } if (! function_exists('Illuminate\Support\microseconds')) { /** * Get the current date / time plus the given number of microseconds. */ function microseconds(int|float $microseconds): CarbonInterval { return CarbonInterval::microseconds($microseconds); } } if (! function_exists('Illuminate\Support\milliseconds')) { /** * Get the current date / time plus the given number of milliseconds. */ function milliseconds(int|float $milliseconds): CarbonInterval { return CarbonInterval::milliseconds($milliseconds); } } if (! function_exists('Illuminate\Support\seconds')) { /** * Get the current date / time plus the given number of seconds. */ function seconds(int|float $seconds): CarbonInterval { return CarbonInterval::seconds($seconds); } } if (! function_exists('Illuminate\Support\minutes')) { /** * Get the current date / time plus the given number of minutes. */ function minutes(int|float $minutes): CarbonInterval { return CarbonInterval::minutes($minutes); } } if (! function_exists('Illuminate\Support\hours')) { /** * Get the current date / time plus the given number of hours. */ function hours(int|float $hours): CarbonInterval { return CarbonInterval::hours($hours); } } if (! function_exists('Illuminate\Support\days')) { /** * Get the current date / time plus the given number of days. */ function days(int|float $days): CarbonInterval { return CarbonInterval::days($days); } } if (! function_exists('Illuminate\Support\weeks')) { /** * Get the current date / time plus the given number of weeks. */ function weeks(int $weeks): CarbonInterval { return CarbonInterval::weeks($weeks); } } if (! function_exists('Illuminate\Support\months')) { /** * Get the current date / time plus the given number of months. */ function months(int $months): CarbonInterval { return CarbonInterval::months($months); } } if (! function_exists('Illuminate\Support\years')) { /** * Get the current date / time plus the given number of years. */ function years(int $years): CarbonInterval { return CarbonInterval::years($years); } }
php
github
https://github.com/laravel/framework
src/Illuminate/Support/functions.php
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from examples/modular-transformers/modular_new_imgproc_model.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_new_imgproc_model.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 import numpy as np import torch from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class ImgprocModelImageProcessor(BaseImageProcessor): r""" Constructs a IMGPROC_MODEL image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: dict[str, int] | None = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: int | float = 1 / 255, do_normalize: bool = True, image_mean: float | list[float] | None = None, image_std: float | list[float] | None = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 384, "width": 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: str | ChannelDimension | None = None, input_data_format: str | ChannelDimension | None = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool | None = None, size: dict[str, int] | None = None, resample: PILImageResampling | None = None, do_rescale: bool | None = None, rescale_factor: float | None = None, do_normalize: bool | None = None, image_mean: float | list[float] | None = None, image_std: float | list[float] | None = None, return_tensors: str | TensorType | None = None, do_convert_rgb: bool | None = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: str | ChannelDimension | None = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs def new_image_processing_method(self, pixel_values: torch.FloatTensor): return pixel_values / 2
python
github
https://github.com/huggingface/transformers
examples/modular-transformers/image_processing_new_imgproc_model.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from autosynth.change_pusher import build_pr_body, _parse_trailers from integration_tests import util def test_build_pr_body_with_synth_log(): synth_log = "The best pull request ever!" pr_body = build_pr_body(synth_log) assert pr_body.find(synth_log) > -1 def test_build_pr_body_with_kokoro_build_id(): with util.ModifiedEnvironment({"KOKORO_BUILD_ID": "42"}): pr_body = build_pr_body("") assert ( pr_body.find("https://source.cloud.google.com/results/invocations/42") > -1 ) def test_build_pr_body_with_synth_log_and_kokoro_build_id(): with util.ModifiedEnvironment({"KOKORO_BUILD_ID": "42"}): synth_log = "A great pull request." pr_body = build_pr_body(synth_log) assert ( pr_body.find("https://source.cloud.google.com/results/invocations/42") > -1 ) assert pr_body.find(synth_log) > -1 def test_build_pr_body_with_very_long_synth_log(): with util.ModifiedEnvironment({"KOKORO_BUILD_ID": "42"}): synth_log = "abcdefghi\n" * 10000 pr_body = build_pr_body(synth_log) assert ( pr_body.find("https://source.cloud.google.com/results/invocations/42") > -1 ) assert pr_body.find("abcdefghi") > -1 assert pr_body.find("[LOG TRUNCATED]") > -1 assert len(pr_body) < 60000 def test_build_pr_body_with_synth_trailers(): synth_log = "synth log" pr_body = build_pr_body(synth_log, "a: b\nc: d") assert pr_body.find("a: b") > -1 assert pr_body.find("c: d") > -1 def test_parse_trailers(): text = """ Source-Author: Google APIs <noreply@google.com> Source-Date: Mon Apr 13 12:05:23 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 4d61e1cb40184a7ad63ef37b1813f6608718674a Source-Link: https://github.com/googleapis/googleapis/commit/4d61e1cb40184a7ad63ef37b1813f6608718674a * Removing erroneous comment, a la https://github.com/googleapis/java-speech/pull/103 PiperOrigin-RevId: 296332968 Source-Author: Google APIs <noreply@google.com> Source-Date: Thu Feb 20 17:19:15 2020 -0800 Source-Repo: googleapis/googleapis Source-Sha: 17567c4a1ef0a9b50faa87024d66f8acbb561089 Source-Link: https://github.com/googleapis/googleapis/commit/17567c4a1ef0a9b50faa87024d66f8acbb561089 * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. """.strip() trailers = _parse_trailers(text) golden_trailers = "Source-Link: https://github.com/googleapis/googleapis/commit/4d61e1cb40184a7ad63ef37b1813f6608718674a\nPiperOrigin-RevId: 296332968\nSource-Link: https://github.com/googleapis/googleapis/commit/17567c4a1ef0a9b50faa87024d66f8acbb561089" assert trailers == golden_trailers
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Foundation\Support\Providers; use Closure; use Illuminate\Contracts\Routing\UrlGenerator; use Illuminate\Routing\Router; use Illuminate\Support\ServiceProvider; use Illuminate\Support\Traits\ForwardsCalls; /** * @mixin \Illuminate\Routing\Router */ class RouteServiceProvider extends ServiceProvider { use ForwardsCalls; /** * The controller namespace for the application. * * @var string|null */ protected $namespace; /** * The callback that should be used to load the application's routes. * * @var \Closure|null */ protected $loadRoutesUsing; /** * The global callback that should be used to load the application's routes. * * @var \Closure|null */ protected static $alwaysLoadRoutesUsing; /** * The callback that should be used to load the application's cached routes. * * @var \Closure|null */ protected static $alwaysLoadCachedRoutesUsing; /** * Register any application services. * * @return void */ public function register() { $this->booted(function () { $this->setRootControllerNamespace(); if ($this->routesAreCached()) { $this->loadCachedRoutes(); } else { $this->loadRoutes(); $this->app->booted(function () { $this->app['router']->getRoutes()->refreshNameLookups(); $this->app['router']->getRoutes()->refreshActionLookups(); }); } }); } /** * Bootstrap any application services. * * @return void */ public function boot() { // } /** * Register the callback that will be used to load the application's routes. * * @param \Closure $routesCallback * @return $this */ protected function routes(Closure $routesCallback) { $this->loadRoutesUsing = $routesCallback; return $this; } /** * Register the callback that will be used to load the application's routes. * * @param \Closure|null $routesCallback * @return void */ public static function loadRoutesUsing(?Closure $routesCallback) { self::$alwaysLoadRoutesUsing = $routesCallback; } /** * Register the callback that will be used to load the application's cached routes. * * @param \Closure|null $routesCallback * @return void */ public static function loadCachedRoutesUsing(?Closure $routesCallback) { self::$alwaysLoadCachedRoutesUsing = $routesCallback; } /** * Set the root controller namespace for the application. * * @return void */ protected function setRootControllerNamespace() { if (! is_null($this->namespace)) { $this->app[UrlGenerator::class]->setRootControllerNamespace($this->namespace); } } /** * Determine if the application routes are cached. * * @return bool */ protected function routesAreCached() { return $this->app->routesAreCached(); } /** * Load the cached routes for the application. * * @return void */ protected function loadCachedRoutes() { if (! is_null(self::$alwaysLoadCachedRoutesUsing)) { $this->app->call(self::$alwaysLoadCachedRoutesUsing); return; } $this->app->booted(function () { require $this->app->getCachedRoutesPath(); }); } /** * Load the application routes. * * @return void */ protected function loadRoutes() { if (! is_null(self::$alwaysLoadRoutesUsing)) { $this->app->call(self::$alwaysLoadRoutesUsing); } if (! is_null($this->loadRoutesUsing)) { $this->app->call($this->loadRoutesUsing); } elseif (method_exists($this, 'map')) { $this->app->call([$this, 'map']); } } /** * Pass dynamic methods onto the router instance. * * @param string $method * @param array $parameters * @return mixed */ public function __call($method, $parameters) { return $this->forwardCallTo( $this->app->make(Router::class), $method, $parameters ); } }
php
github
https://github.com/laravel/framework
src/Illuminate/Foundation/Support/Providers/RouteServiceProvider.php
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import itertools import operator import uuid from functools import partial from inspect import getmembers from io import FileIO from six import iteritems, string_types, text_type from jinja2.exceptions import UndefinedError from ansible.errors import AnsibleParserError from ansible.parsing import DataLoader from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.template import Templar from ansible.utils.boolean import boolean from ansible.utils.debug import debug from ansible.utils.vars import combine_vars, isidentifier from ansible.template import template class Base: # connection/transport _connection = FieldAttribute(isa='string') _port = FieldAttribute(isa='int') _remote_user = FieldAttribute(isa='string') # variables _vars = FieldAttribute(isa='dict', default=dict(), priority=100) # flags and misc. settings _environment = FieldAttribute(isa='list') _no_log = FieldAttribute(isa='bool') # param names which have been deprecated/removed DEPRECATED_ATTRIBUTES = [ 'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags', 'su', 'su_user', 'su_pass', 'su_exe', 'su_flags', ] def __init__(self): # initialize the data loader and variable manager, which will be provided # later when the object is actually loaded self._loader = None self._variable_manager = None # every object gets a random uuid: self._uuid = uuid.uuid4() # and initialize the base attributes self._initialize_base_attributes() try: from __main__ import display self._display = display except ImportError: from ansible.utils.display import Display self._display = Display() # The following three functions are used to programatically define data # descriptors (aka properties) for the Attributes of all of the playbook # objects (tasks, blocks, plays, etc). # # The function signature is a little strange because of how we define # them. We use partial to give each method the name of the Attribute that # it is for. Since partial prefills the positional arguments at the # beginning of the function we end up with the first positional argument # being allocated to the name instead of to the class instance (self) as # normal. To deal with that we make the property name field the first # positional argument and self the second arg. # # Because these methods are defined inside of the class, they get bound to # the instance when the object is created. After we run partial on them # and put the result back into the class as a property, they get bound # a second time. This leads to self being placed in the arguments twice. # To work around that, we mark the functions as @staticmethod so that the # first binding to the instance doesn't happen. @staticmethod def _generic_g(prop_name, self): method = "_get_attr_%s" % prop_name if hasattr(self, method): return getattr(self, method)() return self._attributes[prop_name] @staticmethod def _generic_s(prop_name, self, value): self._attributes[prop_name] = value @staticmethod def _generic_d(prop_name, self): del self._attributes[prop_name] def _get_base_attributes(self): ''' Returns the list of attributes for this class (or any subclass thereof). If the attribute name starts with an underscore, it is removed ''' base_attributes = dict() for (name, value) in getmembers(self.__class__): if isinstance(value, Attribute): if name.startswith('_'): name = name[1:] base_attributes[name] = value return base_attributes def _initialize_base_attributes(self): # each class knows attributes set upon it, see Task.py for example self._attributes = dict() for (name, value) in self._get_base_attributes().items(): getter = partial(self._generic_g, name) setter = partial(self._generic_s, name) deleter = partial(self._generic_d, name) # Place the property into the class so that cls.name is the # property functions. setattr(Base, name, property(getter, setter, deleter)) # Place the value into the instance so that the property can # process and hold that value/ setattr(self, name, value.default) def preprocess_data(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' for base_class in self.__class__.mro(): method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None) if method: return method(ds) return ds def load_data(self, ds, variable_manager=None, loader=None): ''' walk the input datastructure and assign any values ''' assert ds is not None # the variable manager class is used to manage and merge variables # down to a single dictionary for reference in templating, etc. self._variable_manager = variable_manager # the data loader class is used to parse data from strings and files if loader is not None: self._loader = loader else: self._loader = DataLoader() # call the preprocess_data() function to massage the data into # something we can more easily parse, and then call the validation # function on it to ensure there are no incorrect key values ds = self.preprocess_data(ds) self._validate_attributes(ds) # Walk all attributes in the class. We sort them based on their priority # so that certain fields can be loaded before others, if they are dependent. # FIXME: we currently don't do anything with private attributes but # may later decide to filter them out of 'ds' here. base_attributes = self._get_base_attributes() for name, attr in sorted(base_attributes.items(), key=operator.itemgetter(1)): # copy the value over unless a _load_field method is defined if name in ds: method = getattr(self, '_load_%s' % name, None) if method: self._attributes[name] = method(name, ds[name]) else: self._attributes[name] = ds[name] # run early, non-critical validation self.validate() # cache the datastructure internally setattr(self, '_ds', ds) # return the constructed object return self def get_ds(self): try: return getattr(self, '_ds') except AttributeError: return None def get_loader(self): return self._loader def get_variable_manager(self): return self._variable_manager def _validate_attributes(self, ds): ''' Ensures that there are no keys in the datastructure which do not map to attributes for this object. ''' valid_attrs = frozenset(name for name in self._get_base_attributes()) for key in ds: if key not in valid_attrs: raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds) def validate(self, all_vars=dict()): ''' validation that is done at parse time, not load time ''' # walk all fields in the object for (name, attribute) in iteritems(self._get_base_attributes()): # run validator only if present method = getattr(self, '_validate_%s' % name, None) if method: method(attribute, name, getattr(self, name)) else: # and make sure the attribute is of the type it should be value = getattr(self, name) if value is not None: if attribute.isa == 'string' and isinstance(value, (list, dict)): raise AnsibleParserError("The field '%s' is supposed to be a string type, however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()) def copy(self): ''' Create a copy of this object and return it. ''' new_me = self.__class__() for name in self._get_base_attributes(): setattr(new_me, name, getattr(self, name)) new_me._loader = self._loader new_me._variable_manager = self._variable_manager # if the ds value was set on the object, copy it to the new copy too if hasattr(self, '_ds'): new_me._ds = self._ds return new_me def post_validate(self, templar): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as any _post_validate_<foo> functions. ''' basedir = None if self._loader is not None: basedir = self._loader.get_basedir() # save the omit value for later checking omit_value = templar._available_variables.get('omit') for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError("the field '%s' is required but was not set" % name) elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'): # Intermediate objects like Play() won't have their fields validated by # default, as their values are often inherited by other objects and validated # later, so we don't want them to fail out early continue try: # Run the post-validator if present. These methods are responsible for # using the given templar to template the values, if required. method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, getattr(self, name), templar) else: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # if this evaluated to the omit value, set the value back to # the default specified in the FieldAttribute and move on if omit_value is not None and value == omit_value: value = attribute.default continue # and make sure the attribute is of the type it should be if value is not None: if attribute.isa == 'string': value = text_type(value) elif attribute.isa == 'int': value = int(value) elif attribute.isa == 'float': value = float(value) elif attribute.isa == 'bool': value = boolean(value) elif attribute.isa == 'percent': # special value, which may be an integer or float # with an optional '%' at the end if isinstance(value, string_types) and '%' in value: value = value.replace('%', '') value = float(value) elif attribute.isa == 'list': if value is None: value = [] elif not isinstance(value, list): value = [ value ] if attribute.listof is not None: for item in value: if not isinstance(item, attribute.listof): raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds()) elif attribute.required and attribute.listof == string_types: if item is None or item.strip() == "": raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds()) elif attribute.isa == 'set': if value is None: value = set() else: if not isinstance(value, (list, set)): value = [ value ] if not isinstance(value, set): value = set(value) elif attribute.isa == 'dict': if value is None: value = dict() elif not isinstance(value, dict): raise TypeError("%s is not a dictionary" % value) # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError) as e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError as e: if templar._fail_on_undefined_errors and name != 'name': raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds()) def serialize(self): ''' Serializes the object derived from the base object into a dictionary of values. This only serializes the field attributes for the object, so this may need to be overridden for any classes which wish to add additional items not stored as field attributes. ''' repr = dict() for name in self._get_base_attributes(): repr[name] = getattr(self, name) # serialize the uuid field repr['uuid'] = getattr(self, '_uuid') return repr def deserialize(self, data): ''' Given a dictionary of values, load up the field attributes for this object. As with serialize(), if there are any non-field attribute data members, this method will need to be overridden and extended. ''' assert isinstance(data, dict) for (name, attribute) in iteritems(self._get_base_attributes()): if name in data: setattr(self, name, data[name]) else: setattr(self, name, attribute.default) # restore the UUID field setattr(self, '_uuid', data.get('uuid')) def _load_vars(self, attr, ds): ''' Vars in a play can be specified either as a dictionary directly, or as a list of dictionaries. If the later, this method will turn the list into a single dictionary. ''' def _validate_variable_keys(ds): for key in ds: if not isidentifier(key): raise TypeError("%s is not a valid variable name" % key) try: if isinstance(ds, dict): _validate_variable_keys(ds) return ds elif isinstance(ds, list): all_vars = dict() for item in ds: if not isinstance(item, dict): raise ValueError _validate_variable_keys(item) all_vars = combine_vars(all_vars, item) return all_vars elif ds is None: return {} else: raise ValueError except ValueError: raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__, obj=ds) except TypeError as e: raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds) def _extend_value(self, value, new_value): ''' Will extend the value given with new_value (and will turn both into lists if they are not so already). The values are run through a set to remove duplicate values. ''' if not isinstance(value, list): value = [ value ] if not isinstance(new_value, list): new_value = [ new_value ] #return list(set(value + new_value)) return [i for i,_ in itertools.groupby(value + new_value)] def __getstate__(self): return self.serialize() def __setstate__(self, data): self.__init__() self.deserialize(data)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2006 Damien Miller <djm@mindrot.org> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # $Id: test.py,v 1.3 2007/05/03 23:36:36 djm Exp $ import editdist import unittest import random test_vectors = ( ( 'abc', 'abc', 0 ), ( 'abc', 'ab', 1 ), ( 'abc', 'abcd', 1 ), ( 'abc', 'bc', 1 ), ( 'abc', 'a', 2 ), ( 'abc', '', 3 ), ( '', '', 0 ), ( 'abc', 'acx', 2 ), ( 'abc', 'acxx', 3 ), ( 'abc', 'bcd', 2 ), ( 'a' * 1000, 'a' * 1000, 0 ), ( 'a' * 1000, 'b' * 1000, 1000), ) def randstring(l): a = "abcdefghijklmnopqrstuvwxyz" r = "" for i in range(0, l): r += a[random.randint(0, len(a) - 1)] return r class TestRadix(unittest.TestCase): def test_00__test_vectors(self): for a, b, score in test_vectors: self.assertEqual(editdist.distance(a, b), score) def test_01__reversed_test_vectors(self): for b, a, score in test_vectors: self.assertEqual(editdist.distance(a, b), score) def test_02__fuzz(self): for i in range(0, 32) + range(128, 1024, 128): for j in range(0, 32): a = randstring(i) b = randstring(j) dist = editdist.distance(a, b) self.assert_(dist >= 0) def main(): unittest.main() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true require "cases/helper" class ModelTest < ActiveModel::TestCase include ActiveModel::Lint::Tests module DefaultValue def self.included(klass) klass.class_eval { attr_accessor :hello } end def initialize(*args) @attr ||= "default value" super end end class BasicModel include DefaultValue include ActiveModel::Model attr_accessor :attr end class BasicModelWithReversedMixins include ActiveModel::Model include DefaultValue attr_accessor :attr end class SimpleModel include ActiveModel::Model attr_accessor :attr end def setup @model = BasicModel.new end def test_initialize_with_params object = BasicModel.new(attr: "value") assert_equal "value", object.attr end def test_initialize_with_params_and_mixins_reversed object = BasicModelWithReversedMixins.new(attr: "value") assert_equal "value", object.attr end def test_initialize_with_nil_or_empty_hash_params_does_not_explode assert_nothing_raised do BasicModel.new() BasicModel.new(nil) BasicModel.new({}) SimpleModel.new(attr: "value") end end def test_persisted_is_always_false object = BasicModel.new(attr: "value") assert_not object.persisted? end def test_mixin_inclusion_chain object = BasicModel.new assert_equal "default value", object.attr end def test_mixin_initializer_when_args_exist object = BasicModel.new(hello: "world") assert_equal "world", object.hello end def test_mixin_initializer_when_args_dont_exist assert_raises(ActiveModel::UnknownAttributeError) do SimpleModel.new(hello: "world") end end def test_load_hook_is_called value = "not loaded" ActiveSupport.on_load(:active_model) { value = "loaded" } assert_equal "loaded", value end end
ruby
github
https://github.com/rails/rails
activemodel/test/cases/model_test.rb
## Input ```javascript function foo(a, b, c) { const x = []; const y = []; if (x) { } y.push(a); x.push(b); } export const FIXTURE_ENTRYPOINT = { fn: foo, params: ['TodoAdd'], isComponent: 'TodoAdd', }; ``` ## Code ```javascript function foo(a, b, c) { const x = []; const y = []; if (x) { } y.push(a); x.push(b); } export const FIXTURE_ENTRYPOINT = { fn: foo, params: ["TodoAdd"], isComponent: "TodoAdd", }; ```
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/overlapping-scopes-interleaved-by-terminal.expect.md
import ipaddress from functools import lru_cache try: from psycopg import ClientCursor, IsolationLevel, adapt, adapters, errors, sql from psycopg.postgres import types from psycopg.types.datetime import TimestamptzLoader from psycopg.types.json import Jsonb from psycopg.types.range import Range, RangeDumper from psycopg.types.string import TextLoader Inet = ipaddress.ip_address DateRange = DateTimeRange = DateTimeTZRange = NumericRange = Range RANGE_TYPES = (Range,) TSRANGE_OID = types["tsrange"].oid TSTZRANGE_OID = types["tstzrange"].oid def mogrify(sql, params, connection): with connection.cursor() as cursor: return ClientCursor(cursor.connection).mogrify(sql, params) # Adapters. class BaseTzLoader(TimestamptzLoader): """ Load a PostgreSQL timestamptz using the a specific timezone. The timezone can be None too, in which case it will be chopped. """ timezone = None def load(self, data): res = super().load(data) return res.replace(tzinfo=self.timezone) def register_tzloader(tz, context): class SpecificTzLoader(BaseTzLoader): timezone = tz context.adapters.register_loader("timestamptz", SpecificTzLoader) class DjangoRangeDumper(RangeDumper): """A Range dumper customized for Django.""" def upgrade(self, obj, format): # Dump ranges containing naive datetimes as tstzrange, because # Django doesn't use tz-aware ones. dumper = super().upgrade(obj, format) if dumper is not self and dumper.oid == TSRANGE_OID: dumper.oid = TSTZRANGE_OID return dumper @lru_cache def get_adapters_template(use_tz, timezone): # Create an adapters map extending the base one. ctx = adapt.AdaptersMap(adapters) # Register a no-op dumper to avoid a round trip from psycopg version 3 # decode to json.dumps() to json.loads(), when using a custom decoder # in JSONField. ctx.register_loader("jsonb", TextLoader) # Don't convert automatically from PostgreSQL network types to Python # ipaddress. ctx.register_loader("inet", TextLoader) ctx.register_loader("cidr", TextLoader) ctx.register_dumper(Range, DjangoRangeDumper) # Register a timestamptz loader configured on self.timezone. # This, however, can be overridden by create_cursor. register_tzloader(timezone, ctx) return ctx is_psycopg3 = True except ImportError: from enum import IntEnum from psycopg2 import errors, extensions, sql # NOQA from psycopg2.extras import ( # NOQA DateRange, DateTimeRange, DateTimeTZRange, Inet, Json, NumericRange, Range, ) RANGE_TYPES = (DateRange, DateTimeRange, DateTimeTZRange, NumericRange) class IsolationLevel(IntEnum): READ_UNCOMMITTED = extensions.ISOLATION_LEVEL_READ_UNCOMMITTED READ_COMMITTED = extensions.ISOLATION_LEVEL_READ_COMMITTED REPEATABLE_READ = extensions.ISOLATION_LEVEL_REPEATABLE_READ SERIALIZABLE = extensions.ISOLATION_LEVEL_SERIALIZABLE def _quote(value, connection=None): adapted = extensions.adapt(value) if hasattr(adapted, "encoding"): adapted.encoding = "utf8" # getquoted() returns a quoted bytestring of the adapted value. return adapted.getquoted().decode() sql.quote = _quote def mogrify(sql, params, connection): with connection.cursor() as cursor: return cursor.mogrify(sql, params).decode() is_psycopg3 = False class Jsonb(Json): def getquoted(self): quoted = super().getquoted() return quoted + b"::jsonb"
python
github
https://github.com/django/django
django/db/backends/postgresql/psycopg_any.py
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.base.payload import Payload from pants.base.payload_field import JarsField from pants.build_graph.target import Target class ManagedJarDependencies(Target): """A set of pinned external artifact versions to apply transitively.""" def __init__(self, payload=None, artifacts=None, **kwargs): """ :param artifacts: List of `jar <#jar>`_\s or specs to jar_library targets with pinned versions. Versions are pinned per (org, name, classifier, ext) artifact coordinate (excludes, etc are ignored for the purposes of pinning). """ jar_objects, self._library_specs = self._split_jars_and_specs(artifacts) payload = payload or Payload() payload.add_fields({ 'artifacts': JarsField(jar_objects), }) super(ManagedJarDependencies, self).__init__(payload=payload, **kwargs) @property def traversable_specs(self): return iter(self.library_specs) @property def library_specs(self): """Lists of specs to resolve to jar_libraries containing more jars.""" return self._library_specs def _split_jars_and_specs(self, jars): library_specs = [] jar_objects = [] for item in jars: if isinstance(item, JarDependency): jar_objects.append(item) else: library_specs.append(item) return jar_objects, library_specs
unknown
codeparrot/codeparrot-clean
#!/bin/sh test_description='rebase topology tests with merges' . ./test-lib.sh . "$TEST_DIRECTORY"/lib-rebase.sh test_revision_subjects () { expected="$1" shift set -- $(git log --format=%s --no-walk=unsorted "$@") test "$expected" = "$*" } # a---b-----------c # \ \ # d-------e \ # \ \ \ # n---o---w---v # \ # z test_expect_success 'setup of non-linear-history' ' test_commit a && test_commit b && test_commit c && git checkout b && test_commit d && test_commit e && git checkout c && test_commit g && revert h g && git checkout d && cherry_pick gp g && test_commit i && git checkout b && test_commit f && git checkout d && test_commit n && test_commit o && test_merge w e && test_merge v c && git checkout o && test_commit z ' test_run_rebase () { result=$1 shift test_expect_$result "rebase $* after merge from upstream" " reset_rebase && git rebase $* e w && test_cmp_rev e HEAD~2 && test_linear_range 'n o' e.. " } test_run_rebase success --apply test_run_rebase success -m test_run_rebase success -i test_run_rebase () { result=$1 shift expected=$1 shift test_expect_$result "rebase $* of non-linear history is linearized in place" " reset_rebase && git rebase $* d w && test_cmp_rev d HEAD~3 && test_linear_range "\'"$expected"\'" d.. " } test_run_rebase success 'n o e' --apply test_run_rebase success 'n o e' -m test_run_rebase success 'n o e' -i test_run_rebase () { result=$1 shift expected=$1 shift test_expect_$result "rebase $* of non-linear history is linearized upstream" " reset_rebase && git rebase $* c w && test_cmp_rev c HEAD~4 && test_linear_range "\'"$expected"\'" c.. " } test_run_rebase success 'd n o e' --apply test_run_rebase success 'd n o e' -m test_run_rebase success 'd n o e' -i test_run_rebase () { result=$1 shift expected=$1 shift test_expect_$result "rebase $* of non-linear history with merges after upstream merge is linearized" " reset_rebase && git rebase $* c v && test_cmp_rev c HEAD~4 && test_linear_range "\'"$expected"\'" c.. " } test_run_rebase success 'd n o e' --apply test_run_rebase success 'd n o e' -m test_run_rebase success 'd n o e' -i test_done
unknown
github
https://github.com/git/git
t/t3425-rebase-topology-merges.sh
""" Classes dealing with task repositories and their synchronization. """ import os import subprocess from pathlib import Path from typing import Any, Iterator, Optional, Union class Repository: """Local task repository that does not perform synchronization.""" def __init__(self, path: Union[str, Path]) -> None: """ Create a repository with the given absolute directory path, which is allowed to not exist yet on the file system. """ if not path: raise ValueError("path must not be empty") _path = Path(path) if not _path.is_absolute(): raise ValueError("path must be absolute") self._path = _path @property def path(self) -> Path: """Return the path of this repository as a pathlib.Path object.""" return self._path @property def path_s(self) -> str: """Return the path of this repository as a string.""" return str(self._path) def find_files(self, glob_pattern: str) -> Iterator[Any]: """Yield all existing files in this repository which match the given pattern.""" return self._path.glob(glob_pattern) def call_make(self, timeout: int = 300) -> None: """Call the `make` command in this repository's directory.""" subprocess.check_call(["make", "--silent"], cwd=self.path_s, timeout=timeout) def synchronize(self) -> None: """Synchronize files with a remote source (optional).""" def __repr__(self) -> str: return f"{self.__class__.__name__}({self.path_s!r})" _GIT_ENVIRON = os.environ.copy() _GIT_ENVIRON["GIT_SSH_COMMAND"] = "ssh -F/dev/null -oBatchMode=yes -oStrictHostKeyChecking=no" class GitRepository(Repository): """Local task repository that can be synchronized with a remote git repository.""" def __init__( self, path: Union[str, Path], *, url: str, branch: str, timeout: Optional[int] = 30 ) -> None: """ Create a repository that can be synchronized with the given git url and branch, which must be passed as keyword arguments. The path must be absolute and may point to a directory that already contains an initialized git clone. Otherwise, it will be created during the first synchronization. The timeout specifies the maximum allowed runtime for the git subprocesses spawned by this class. """ super().__init__(path) if not url: raise ValueError("url must not be empty") if not branch: raise ValueError("branch must not be empty") self.url = url self.branch = branch self.timeout = timeout def update(self) -> None: """ Perform the equivalent of a `git pull` in this git clone, which must already be initialized, using a failure-resistant strategy (currently fetch + hard reset). """ self.git("remote", "set-url", "origin", self.url) self.git("fetch", "--quiet", "--depth=1", "origin", "+refs/heads/*:refs/remotes/origin/*") self.git("stash", "-u") self.git("reset", "--hard", f"origin/{self.branch}") def initialize(self) -> None: """Initialize the git clone for this repository.""" self.path.mkdir(parents=True, exist_ok=True) self.git("clone", "--quiet", "--depth=1", "--branch", self.branch, self.url, ".") def git(self, *args: str) -> None: """Perform given `git` subcommand (and arguments) in this git clone.""" subprocess.check_call( ["git"] + list(args), cwd=self.path_s, env=_GIT_ENVIRON, stdout=subprocess.DEVNULL, timeout=self.timeout, ) def synchronize(self) -> None: """Synchronize files with the remote git repository.""" if self.path.joinpath(".git").is_dir(): self.update() else: self.initialize()
unknown
codeparrot/codeparrot-clean
import { type RawSourceMap, SourceMapConsumer } from 'source-map-js' import { parse as babelParse } from '@babel/parser' import { type SFCTemplateCompileOptions, compileTemplate, } from '../src/compileTemplate' import { type SFCTemplateBlock, parse } from '../src/parse' import { compileScript } from '../src' import { getPositionInCode } from './utils' function compile(opts: Omit<SFCTemplateCompileOptions, 'id'>) { return compileTemplate({ ...opts, id: '', }) } test('should work', () => { const source = `<div><p>{{ render }}</p></div>` const result = compile({ filename: 'example.vue', source }) expect(result.errors.length).toBe(0) expect(result.source).toBe(source) // should expose render fn expect(result.code).toMatch(`export function render(`) }) // #6807 test('should work with style comment', () => { const source = ` <div style=" /* nothing */ width: 300px; height: 100px/* nothing */ ">{{ render }}</div> ` const result = compile({ filename: 'example.vue', source }) expect(result.errors.length).toBe(0) expect(result.source).toBe(source) expect(result.code).toMatch(`{"width":"300px","height":"100px"}`) }) test('preprocess pug', () => { const template = parse( ` <template lang="pug"> body h1 Pug Examples div.container p Cool Pug example! </template> `, { filename: 'example.vue', sourceMap: true }, ).descriptor.template as SFCTemplateBlock const result = compile({ filename: 'example.vue', source: template.content, preprocessLang: template.lang, }) expect(result.errors.length).toBe(0) }) test('preprocess pug with indents and blank lines', () => { const template = parse( ` <template lang="pug"> body h1 The next line contains four spaces. div.container p The next line is empty. p This is the last line. </template> `, { filename: 'example.vue', sourceMap: true }, ).descriptor.template as SFCTemplateBlock const result = compile({ filename: 'example.vue', source: template.content, preprocessLang: template.lang, }) expect(result.errors.length).toBe(0) expect(result.source).toBe( '<body><h1>The next line contains four spaces.</h1><div class="container"><p>The next line is empty.</p></div><p>This is the last line.</p></body>', ) }) test('warn missing preprocessor', () => { const template = parse(`<template lang="unknownLang">hi</template>\n`, { filename: 'example.vue', sourceMap: true, }).descriptor.template as SFCTemplateBlock const result = compile({ filename: 'example.vue', source: template.content, preprocessLang: template.lang, }) expect(result.errors.length).toBe(1) }) test('transform asset url options', () => { const input = { source: `<foo bar="~baz"/>`, filename: 'example.vue' } // Object option const { code: code1 } = compile({ ...input, transformAssetUrls: { tags: { foo: ['bar'] }, }, }) expect(code1).toMatch(`import _imports_0 from 'baz'\n`) // legacy object option (direct tags config) const { code: code2 } = compile({ ...input, transformAssetUrls: { foo: ['bar'], }, }) expect(code2).toMatch(`import _imports_0 from 'baz'\n`) // false option const { code: code3 } = compile({ ...input, transformAssetUrls: false, }) expect(code3).not.toMatch(`import _imports_0 from 'baz'\n`) }) test('source map', () => { const template = parse( ` <template> <div><p>{{ foobar }}</p></div> </template> `, { filename: 'example.vue', sourceMap: true }, ).descriptor.template! const { code, map } = compile({ filename: 'example.vue', source: template.content, }) expect(map!.sources).toEqual([`example.vue`]) expect(map!.sourcesContent).toEqual([template.content]) const consumer = new SourceMapConsumer(map as RawSourceMap) expect( consumer.originalPositionFor(getPositionInCode(code, 'foobar')), ).toMatchObject(getPositionInCode(template.content, `foobar`)) }) test('source map: v-if generated comment should not have original position', () => { const template = parse( ` <template> <div v-if="true"></div> </template> `, { filename: 'example.vue', sourceMap: true }, ).descriptor.template! const { code, map } = compile({ filename: 'example.vue', source: template.content, }) expect(map!.sources).toEqual([`example.vue`]) expect(map!.sourcesContent).toEqual([template.content]) const consumer = new SourceMapConsumer(map as RawSourceMap) const commentNode = code.match(/_createCommentVNode\("v-if", true\)/) expect(commentNode).not.toBeNull() const commentPosition = getPositionInCode(code, commentNode![0]) const originalPosition = consumer.originalPositionFor(commentPosition) // the comment node should not be mapped to the original source expect(originalPosition.column).toBeNull() expect(originalPosition.line).toBeNull() expect(originalPosition.source).toBeNull() }) test('should work w/ AST from descriptor', () => { const source = ` <template> <div><p>{{ foobar }}</p></div> </template> ` const template = parse(source, { filename: 'example.vue', sourceMap: true, }).descriptor.template! expect(template.ast!.source).toBe(source) const { code, map } = compile({ filename: 'example.vue', source: template.content, ast: template.ast, }) expect(map!.sources).toEqual([`example.vue`]) // when reusing AST from SFC parse for template compile, // the source corresponds to the entire SFC expect(map!.sourcesContent).toEqual([source]) const consumer = new SourceMapConsumer(map as RawSourceMap) expect( consumer.originalPositionFor(getPositionInCode(code, 'foobar')), ).toMatchObject(getPositionInCode(source, `foobar`)) expect(code).toBe( compile({ filename: 'example.vue', source: template.content, }).code, ) }) test('should work w/ AST from descriptor in SSR mode', () => { const source = ` <template> <div><p>{{ foobar }}</p></div> </template> ` const template = parse(source, { filename: 'example.vue', sourceMap: true, }).descriptor.template! expect(template.ast!.source).toBe(source) const { code, map } = compile({ filename: 'example.vue', source: '', // make sure it's actually using the AST instead of source ast: template.ast, ssr: true, }) expect(map!.sources).toEqual([`example.vue`]) // when reusing AST from SFC parse for template compile, // the source corresponds to the entire SFC expect(map!.sourcesContent).toEqual([source]) const consumer = new SourceMapConsumer(map as RawSourceMap) expect( consumer.originalPositionFor(getPositionInCode(code, 'foobar')), ).toMatchObject(getPositionInCode(source, `foobar`)) expect(code).toBe( compile({ filename: 'example.vue', source: template.content, ssr: true, }).code, ) }) test('should not reuse AST if using custom compiler', () => { const source = ` <template> <div><p>{{ foobar }}</p></div> </template> ` const template = parse(source, { filename: 'example.vue', sourceMap: true, }).descriptor.template! const { code } = compile({ filename: 'example.vue', source: template.content, ast: template.ast, compiler: { parse: () => null as any, // @ts-expect-error compile: input => ({ code: input }), }, }) // what we really want to assert is that the `input` received by the custom // compiler is the source string, not the AST. expect(code).toBe(template.content) }) test('should force re-parse on already transformed AST', () => { const source = ` <template> <div><p>{{ foobar }}</p></div> </template> ` const template = parse(source, { filename: 'example.vue', sourceMap: true, }).descriptor.template! // force set to empty, if this is reused then it won't generate proper code template.ast!.children = [] template.ast!.transformed = true const { code } = compile({ filename: 'example.vue', source: '', ast: template.ast, }) expect(code).toBe( compile({ filename: 'example.vue', source: template.content, }).code, ) }) test('should force re-parse with correct compiler in SSR mode', () => { const source = ` <template> <div><p>{{ foobar }}</p></div> </template> ` const template = parse(source, { filename: 'example.vue', sourceMap: true, }).descriptor.template! // force set to empty, if this is reused then it won't generate proper code template.ast!.children = [] template.ast!.transformed = true const { code } = compile({ filename: 'example.vue', source: '', ast: template.ast, ssr: true, }) expect(code).toBe( compile({ filename: 'example.vue', source: template.content, ssr: true, }).code, ) }) test('template errors', () => { const result = compile({ filename: 'example.vue', source: `<div :bar="a[" v-model="baz"/>`, }) expect(result.errors).toMatchSnapshot() }) test('preprocessor errors', () => { const template = parse( ` <template lang="pug"> div(class='class) </template> `, { filename: 'example.vue', sourceMap: true }, ).descriptor.template as SFCTemplateBlock const result = compile({ filename: 'example.vue', source: template.content, preprocessLang: template.lang, }) expect(result.errors.length).toBe(1) const message = result.errors[0].toString() expect(message).toMatch(`Error: example.vue:3:1`) expect(message).toMatch( `The end of the string reached with no closing bracket ) found.`, ) }) // #3447 test('should generate the correct imports expression', () => { const { code } = compile({ filename: 'example.vue', source: ` <img src="./foo.svg"/> <Comp> <img src="./bar.svg"/> </Comp> `, ssr: true, }) expect(code).toMatch(`_ssrRenderAttr(\"src\", _imports_1)`) expect(code).toMatch(`_createVNode(\"img\", { src: _imports_1 })`) }) // #3874 test('should not hoist srcset URLs in SSR mode', () => { const { code } = compile({ filename: 'example.vue', source: ` <picture> <source srcset="./img/foo.svg"/> <img src="./img/foo.svg"/> </picture> <router-link> <picture> <source srcset="./img/bar.svg"/> <img src="./img/bar.svg"/> </picture> </router-link> `, ssr: true, }) expect(code).toMatchSnapshot() }) // #6742 test('dynamic v-on + static v-on should merged', () => { const source = `<input @blur="onBlur" @[validateEvent]="onValidateEvent">` const result = compile({ filename: 'example.vue', source }) expect(result.code).toMatchSnapshot() }) // #9853 regression found in Nuxt tests // walkIdentifiers can get called multiple times on the same node // due to #9729 calling it during SFC template usage check. // conditions needed: // 1. `<script setup lang="ts">` // 2. Has import // 3. inlineTemplate: false // 4. AST being reused test('prefixing edge case for reused AST', () => { const src = ` <script setup lang="ts"> import { Foo } from './foo' </script> <template> {{ list.map((t, index) => ({ t: t })) }} </template> ` const { descriptor } = parse(src) // compileScript triggers importUsageCheck compileScript(descriptor, { id: 'xxx' }) const { code } = compileTemplate({ id: 'xxx', filename: 'test.vue', ast: descriptor.template!.ast, source: descriptor.template!.content, }) expect(code).not.toMatch(`_ctx.t`) }) test('prefixing edge case for reused AST ssr mode', () => { const src = ` <script setup lang="ts"> import { Foo } from './foo' </script> <template> <Bar> <template #option="{ foo }"></template> </Bar> </template> ` const { descriptor } = parse(src) // compileScript triggers importUsageCheck compileScript(descriptor, { id: 'xxx' }) expect(() => compileTemplate({ id: 'xxx', filename: 'test.vue', ast: descriptor.template!.ast, source: descriptor.template!.content, ssr: true, }), ).not.toThrowError() }) // #10852 test('non-identifier expression in legacy filter syntax', () => { const src = ` <template> <div> Today is {{ new Date() | formatDate }} </div> </template> ` const { descriptor } = parse(src) const compilationResult = compileTemplate({ id: 'xxx', filename: 'test.vue', ast: descriptor.template!.ast, source: descriptor.template!.content, ssr: false, compilerOptions: { compatConfig: { MODE: 2, }, }, }) expect(() => { babelParse(compilationResult.code, { sourceType: 'module' }) }).not.toThrow() }) test('prefixing props edge case in inline mode', () => { const src = ` <script setup lang="ts"> defineProps<{ Foo: { Bar: unknown } }>() </script> <template> <Foo.Bar/> </template> ` const { descriptor } = parse(src) const { content } = compileScript(descriptor, { id: 'xxx', inlineTemplate: true, }) expect(content).toMatchSnapshot() expect(content).toMatch(`__props["Foo"]).Bar`) })
typescript
github
https://github.com/vuejs/core
packages/compiler-sfc/__tests__/compileTemplate.spec.ts
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_DISTRIBUTED_SAVE_OP_H_ #define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_DISTRIBUTED_SAVE_OP_H_ #include <string> #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { namespace data { namespace experimental { // Initiates the process of distributedly saving a dataset to disk. class DistributedSaveOp : public OpKernel { public: static constexpr const char* const kDirectory = "directory"; static constexpr const char* const kAddress = "address"; static constexpr const char* const kMetadata = "metadata"; explicit DistributedSaveOp(OpKernelConstruction* ctx); void Compute(OpKernelContext* ctx) override; private: std::string serialized_metadata_; }; } // namespace experimental } // namespace data } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_DISTRIBUTED_SAVE_OP_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/kernels/data/experimental/distributed_save_op.h
""" Support for thr Free Mobile SMS platform. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.free_mobile/ """ import logging import voluptuous as vol from homeassistant.components.notify import ( PLATFORM_SCHEMA, BaseNotificationService) from homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['freesms==0.1.1'] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_ACCESS_TOKEN): cv.string, }) def get_service(hass, config, discovery_info=None): """Get the Free Mobile SMS notification service.""" return FreeSMSNotificationService(config[CONF_USERNAME], config[CONF_ACCESS_TOKEN]) class FreeSMSNotificationService(BaseNotificationService): """Implement a notification service for the Free Mobile SMS service.""" def __init__(self, username, access_token): """Initialize the service.""" from freesms import FreeClient self.free_client = FreeClient(username, access_token) def send_message(self, message="", **kwargs): """Send a message to the Free Mobile user cell.""" resp = self.free_client.send_sms(message) if resp.status_code == 400: _LOGGER.error("At least one parameter is missing") elif resp.status_code == 402: _LOGGER.error("Too much SMS send in a few time") elif resp.status_code == 403: _LOGGER.error("Wrong Username/Password") elif resp.status_code == 500: _LOGGER.error("Server error, try later")
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ce_interface_ospf version_added: "2.4" short_description: Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches. description: - Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches. author: QijunPan (@QijunPan) notes: - This module requires the netconf system service be enabled on the remote device being managed. - Recommended connection is C(netconf). - This module also works with C(local) connections for legacy playbooks. options: interface: description: - Full name of interface, i.e. 40GE1/0/10. required: true process_id: description: - Specifies a process ID. The value is an integer ranging from 1 to 4294967295. required: true area: description: - Ospf area associated with this ospf process. Valid values are a string, formatted as an IP address (i.e. "0.0.0.0") or as an integer between 1 and 4294967295. required: true cost: description: - The cost associated with this interface. Valid values are an integer in the range from 1 to 65535. hello_interval: description: - Time between sending successive hello packets. Valid values are an integer in the range from 1 to 65535. dead_interval: description: - Time interval an ospf neighbor waits for a hello packet before tearing down adjacencies. Valid values are an integer in the range from 1 to 235926000. silent_interface: description: - Setting to true will prevent this interface from receiving HELLO packets. Valid values are 'true' and 'false'. type: bool default: 'no' auth_mode: description: - Specifies the authentication type. choices: ['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'] auth_text_simple: description: - Specifies a password for simple authentication. The value is a string of 1 to 8 characters. auth_key_id: description: - Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5. Valid value is an integer is in the range from 1 to 255. auth_text_md5: description: - Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication. The value is a string of 1 to 255 case-sensitive characters, spaces not supported. state: description: - Determines whether the config should be present or not on the device. default: present choices: ['present','absent'] """ EXAMPLES = ''' - name: eth_trunk module test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Enables OSPF and sets the cost on an interface ce_interface_ospf: interface: 10GE1/0/30 process_id: 1 area: 100 cost: 100 provider: '{{ cli }}' - name: Sets the dead interval of the OSPF neighbor ce_interface_ospf: interface: 10GE1/0/30 process_id: 1 area: 100 dead_interval: 100 provider: '{{ cli }}' - name: Sets the interval for sending Hello packets on an interface ce_interface_ospf: interface: 10GE1/0/30 process_id: 1 area: 100 hello_interval: 2 provider: '{{ cli }}' - name: Disables an interface from receiving and sending OSPF packets ce_interface_ospf: interface: 10GE1/0/30 process_id: 1 area: 100 silent_interface: true provider: '{{ cli }}' ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100"} existing: description: k/v pairs of existing configuration returned: verbose mode type: dict sample: {"process_id": "1", "area": "0.0.0.100"} end_state: description: k/v pairs of configuration after module execution returned: verbose mode type: dict sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100", "dead_interval": "40", "hello_interval": "10", "silent_interface": "false", "auth_mode": "none"} updates: description: commands sent to the device returned: always type: list sample: ["interface 10GE1/0/30", "ospf enable 1 area 0.0.0.100", "ospf cost 100"] changed: description: check to see if a change was made on the device returned: always type: bool sample: true ''' from xml.etree import ElementTree from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec CE_NC_GET_OSPF = """ <filter type="subtree"> <ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <ospfv2comm> <ospfSites> <ospfSite> <processId>%s</processId> <routerId></routerId> <vrfName></vrfName> <areas> <area> <areaId>%s</areaId> <interfaces> <interface> <ifName>%s</ifName> <networkType></networkType> <helloInterval></helloInterval> <deadInterval></deadInterval> <silentEnable></silentEnable> <configCost></configCost> <authenticationMode></authenticationMode> <authTextSimple></authTextSimple> <keyId></keyId> <authTextMd5></authTextMd5> </interface> </interfaces> </area> </areas> </ospfSite> </ospfSites> </ospfv2comm> </ospfv2> </filter> """ CE_NC_XML_BUILD_PROCESS = """ <config> <ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <ospfv2comm> <ospfSites> <ospfSite> <processId>%s</processId> <areas> <area> <areaId>%s</areaId> %s </area> </areas> </ospfSite> </ospfSites> </ospfv2comm> </ospfv2> </config> """ CE_NC_XML_BUILD_MERGE_INTF = """ <interfaces> <interface operation="merge"> %s </interface> </interfaces> """ CE_NC_XML_BUILD_DELETE_INTF = """ <interfaces> <interface operation="delete"> %s </interface> </interfaces> """ CE_NC_XML_SET_IF_NAME = """ <ifName>%s</ifName> """ CE_NC_XML_SET_HELLO = """ <helloInterval>%s</helloInterval> """ CE_NC_XML_SET_DEAD = """ <deadInterval>%s</deadInterval> """ CE_NC_XML_SET_SILENT = """ <silentEnable>%s</silentEnable> """ CE_NC_XML_SET_COST = """ <configCost>%s</configCost> """ CE_NC_XML_SET_AUTH_MODE = """ <authenticationMode>%s</authenticationMode> """ CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """ <authTextSimple>%s</authTextSimple> """ CE_NC_XML_SET_AUTH_MD5 = """ <keyId>%s</keyId> <authTextMd5>%s</authTextMd5> """ def get_interface_type(interface): """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" if interface is None: return None iftype = None if interface.upper().startswith('GE'): iftype = 'ge' elif interface.upper().startswith('10GE'): iftype = '10ge' elif interface.upper().startswith('25GE'): iftype = '25ge' elif interface.upper().startswith('4X10GE'): iftype = '4x10ge' elif interface.upper().startswith('40GE'): iftype = '40ge' elif interface.upper().startswith('100GE'): iftype = '100ge' elif interface.upper().startswith('VLANIF'): iftype = 'vlanif' elif interface.upper().startswith('LOOPBACK'): iftype = 'loopback' elif interface.upper().startswith('METH'): iftype = 'meth' elif interface.upper().startswith('ETH-TRUNK'): iftype = 'eth-trunk' elif interface.upper().startswith('VBDIF'): iftype = 'vbdif' elif interface.upper().startswith('NVE'): iftype = 'nve' elif interface.upper().startswith('TUNNEL'): iftype = 'tunnel' elif interface.upper().startswith('ETHERNET'): iftype = 'ethernet' elif interface.upper().startswith('FCOE-PORT'): iftype = 'fcoe-port' elif interface.upper().startswith('FABRIC-PORT'): iftype = 'fabric-port' elif interface.upper().startswith('STACK-PORT'): iftype = 'stack-port' elif interface.upper().startswith('NULL'): iftype = 'null' else: return None return iftype.lower() def is_valid_v4addr(addr): """check is ipv4 addr is valid""" if not addr: return False if addr.find('.') != -1: addr_list = addr.split('.') if len(addr_list) != 4: return False for each_num in addr_list: if not each_num.isdigit(): return False if int(each_num) > 255: return False return True return False class InterfaceOSPF(object): """ Manages configuration of an OSPF interface instance. """ def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.init_module() # module input info self.interface = self.module.params['interface'] self.process_id = self.module.params['process_id'] self.area = self.module.params['area'] self.cost = self.module.params['cost'] self.hello_interval = self.module.params['hello_interval'] self.dead_interval = self.module.params['dead_interval'] self.silent_interface = self.module.params['silent_interface'] self.auth_mode = self.module.params['auth_mode'] self.auth_text_simple = self.module.params['auth_text_simple'] self.auth_key_id = self.module.params['auth_key_id'] self.auth_text_md5 = self.module.params['auth_text_md5'] self.state = self.module.params['state'] # ospf info self.ospf_info = dict() # state self.changed = False self.updates_cmd = list() self.results = dict() self.proposed = dict() self.existing = dict() self.end_state = dict() def init_module(self): """init module""" self.module = AnsibleModule( argument_spec=self.spec, supports_check_mode=True) def netconf_set_config(self, xml_str, xml_name): """netconf set config""" rcv_xml = set_nc_config(self.module, xml_str) if "<ok/>" not in rcv_xml: self.module.fail_json(msg='Error: %s failed.' % xml_name) def get_area_ip(self): """convert integer to ip address""" if not self.area.isdigit(): return self.area addr_int = ['0'] * 4 addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF) addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF) addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF) addr_int[3] = str(int(self.area) & 0xFF) return '.'.join(addr_int) def get_ospf_dict(self): """ get one ospf attributes dict.""" ospf_info = dict() conf_str = CE_NC_GET_OSPF % ( self.process_id, self.get_area_ip(), self.interface) rcv_xml = get_nc_config(self.module, conf_str) if "<data/>" in rcv_xml: return ospf_info xml_str = rcv_xml.replace('\r', '').replace('\n', '').\ replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ replace('xmlns="http://www.huawei.com/netconf/vrp"', "") # get process base info root = ElementTree.fromstring(xml_str) ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite") if not ospfsite: self.module.fail_json(msg="Error: ospf process does not exist.") for site in ospfsite: if site.tag in ["processId", "routerId", "vrfName"]: ospf_info[site.tag] = site.text # get areas info ospf_info["areaId"] = "" areas = root.find( "ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area") if areas: for area in areas: if area.tag == "areaId": ospf_info["areaId"] = area.text break # get interface info ospf_info["interface"] = dict() intf = root.find( "ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area/interfaces/interface") if intf: for attr in intf: if attr.tag in ["ifName", "networkType", "helloInterval", "deadInterval", "silentEnable", "configCost", "authenticationMode", "authTextSimple", "keyId", "authTextMd5"]: ospf_info["interface"][attr.tag] = attr.text return ospf_info def set_ospf_interface(self): """set interface ospf enable, and set its ospf attributes""" xml_intf = CE_NC_XML_SET_IF_NAME % self.interface # ospf view self.updates_cmd.append("ospf %s" % self.process_id) self.updates_cmd.append("area %s" % self.get_area_ip()) if self.silent_interface: xml_intf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower() if self.silent_interface: self.updates_cmd.append("silent-interface %s" % self.interface) else: self.updates_cmd.append("undo silent-interface %s" % self.interface) # interface view self.updates_cmd.append("interface %s" % self.interface) self.updates_cmd.append("ospf enable %s area %s" % ( self.process_id, self.get_area_ip())) if self.cost: xml_intf += CE_NC_XML_SET_COST % self.cost self.updates_cmd.append("ospf cost %s" % self.cost) if self.hello_interval: xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval self.updates_cmd.append("ospf timer hello %s" % self.hello_interval) if self.dead_interval: xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval self.updates_cmd.append("ospf timer dead %s" % self.dead_interval) if self.auth_mode: xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode if self.auth_mode == "none": self.updates_cmd.append("undo ospf authentication-mode") else: self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode) if self.auth_mode == "simple" and self.auth_text_simple: xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple self.updates_cmd.pop() self.updates_cmd.append("ospf authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple)) elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id: xml_intf += CE_NC_XML_SET_AUTH_MD5 % ( self.auth_key_id, self.auth_text_md5) self.updates_cmd.pop() self.updates_cmd.append("ospf authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) else: pass xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, self.get_area_ip(), (CE_NC_XML_BUILD_MERGE_INTF % xml_intf)) self.netconf_set_config(xml_str, "SET_INTERFACE_OSPF") self.changed = True def merge_ospf_interface(self): """merge interface ospf attributes""" intf_dict = self.ospf_info["interface"] # ospf view xml_ospf = "" if intf_dict.get("silentEnable") != str(self.silent_interface).lower(): xml_ospf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower() self.updates_cmd.append("ospf %s" % self.process_id) self.updates_cmd.append("area %s" % self.get_area_ip()) if self.silent_interface: self.updates_cmd.append("silent-interface %s" % self.interface) else: self.updates_cmd.append("undo silent-interface %s" % self.interface) # interface view xml_intf = "" self.updates_cmd.append("interface %s" % self.interface) if self.cost and intf_dict.get("configCost") != self.cost: xml_intf += CE_NC_XML_SET_COST % self.cost self.updates_cmd.append("ospf cost %s" % self.cost) if self.hello_interval and intf_dict.get("helloInterval") != self.hello_interval: xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval self.updates_cmd.append("ospf timer hello %s" % self.hello_interval) if self.dead_interval and intf_dict.get("deadInterval") != self.dead_interval: xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval self.updates_cmd.append("ospf timer dead %s" % self.dead_interval) if self.auth_mode: # NOTE: for security, authentication config will always be update xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode if self.auth_mode == "none": self.updates_cmd.append("undo ospf authentication-mode") else: self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode) if self.auth_mode == "simple" and self.auth_text_simple: xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple self.updates_cmd.pop() self.updates_cmd.append("ospf authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple)) elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id: xml_intf += CE_NC_XML_SET_AUTH_MD5 % ( self.auth_key_id, self.auth_text_md5) self.updates_cmd.pop() self.updates_cmd.append("ospf authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5)) else: pass if not xml_intf: self.updates_cmd.pop() # remove command: interface if not xml_ospf and not xml_intf: return xml_sum = CE_NC_XML_SET_IF_NAME % self.interface xml_sum += xml_ospf + xml_intf xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, self.get_area_ip(), (CE_NC_XML_BUILD_MERGE_INTF % xml_sum)) self.netconf_set_config(xml_str, "MERGE_INTERFACE_OSPF") self.changed = True def unset_ospf_interface(self): """set interface ospf disable, and all its ospf attributes will be removed""" intf_dict = self.ospf_info["interface"] xml_sum = "" xml_intf = CE_NC_XML_SET_IF_NAME % self.interface if intf_dict.get("silentEnable") == "true": xml_sum += CE_NC_XML_BUILD_MERGE_INTF % ( xml_intf + (CE_NC_XML_SET_SILENT % "false")) self.updates_cmd.append("ospf %s" % self.process_id) self.updates_cmd.append("area %s" % self.get_area_ip()) self.updates_cmd.append( "undo silent-interface %s" % self.interface) xml_sum += CE_NC_XML_BUILD_DELETE_INTF % xml_intf xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, self.get_area_ip(), xml_sum) self.netconf_set_config(xml_str, "DELETE_INTERFACE_OSPF") self.updates_cmd.append("undo ospf cost") self.updates_cmd.append("undo ospf timer hello") self.updates_cmd.append("undo ospf timer dead") self.updates_cmd.append("undo ospf authentication-mode") self.updates_cmd.append("undo ospf enable %s area %s" % ( self.process_id, self.get_area_ip())) self.changed = True def check_params(self): """Check all input params""" self.interface = self.interface.replace(" ", "").upper() # interface check if not get_interface_type(self.interface): self.module.fail_json(msg="Error: interface is invalid.") # process_id check if not self.process_id.isdigit(): self.module.fail_json(msg="Error: process_id is not digit.") if int(self.process_id) < 1 or int(self.process_id) > 4294967295: self.module.fail_json(msg="Error: process_id must be an integer between 1 and 4294967295.") # area check if self.area.isdigit(): if int(self.area) < 0 or int(self.area) > 4294967295: self.module.fail_json(msg="Error: area id (Integer) must be between 0 and 4294967295.") else: if not is_valid_v4addr(self.area): self.module.fail_json(msg="Error: area id is invalid.") # area authentication check if self.state == "present": if self.auth_mode: if self.auth_mode == "simple": if self.auth_text_simple and len(self.auth_text_simple) > 8: self.module.fail_json( msg="Error: auth_text_simple is not in the range from 1 to 8.") if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: if self.auth_key_id and not self.auth_text_md5: self.module.fail_json( msg='Error: auth_key_id and auth_text_md5 should be set at the same time.') if not self.auth_key_id and self.auth_text_md5: self.module.fail_json( msg='Error: auth_key_id and auth_text_md5 should be set at the same time.') if self.auth_key_id: if not self.auth_key_id.isdigit(): self.module.fail_json( msg="Error: auth_key_id is not digit.") if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255: self.module.fail_json( msg="Error: auth_key_id is not in the range from 1 to 255.") if self.auth_text_md5 and len(self.auth_text_md5) > 255: self.module.fail_json( msg="Error: auth_text_md5 is not in the range from 1 to 255.") # cost check if self.cost: if not self.cost.isdigit(): self.module.fail_json(msg="Error: cost is not digit.") if int(self.cost) < 1 or int(self.cost) > 65535: self.module.fail_json( msg="Error: cost is not in the range from 1 to 65535") # hello_interval check if self.hello_interval: if not self.hello_interval.isdigit(): self.module.fail_json( msg="Error: hello_interval is not digit.") if int(self.hello_interval) < 1 or int(self.hello_interval) > 65535: self.module.fail_json( msg="Error: hello_interval is not in the range from 1 to 65535") # dead_interval check if self.dead_interval: if not self.dead_interval.isdigit(): self.module.fail_json(msg="Error: dead_interval is not digit.") if int(self.dead_interval) < 1 or int(self.dead_interval) > 235926000: self.module.fail_json( msg="Error: dead_interval is not in the range from 1 to 235926000") def get_proposed(self): """get proposed info""" self.proposed["interface"] = self.interface self.proposed["process_id"] = self.process_id self.proposed["area"] = self.get_area_ip() self.proposed["cost"] = self.cost self.proposed["hello_interval"] = self.hello_interval self.proposed["dead_interval"] = self.dead_interval self.proposed["silent_interface"] = self.silent_interface if self.auth_mode: self.proposed["auth_mode"] = self.auth_mode if self.auth_mode == "simple": self.proposed["auth_text_simple"] = self.auth_text_simple if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]: self.proposed["auth_key_id"] = self.auth_key_id self.proposed["auth_text_md5"] = self.auth_text_md5 self.proposed["state"] = self.state def get_existing(self): """get existing info""" if not self.ospf_info: return if self.ospf_info["interface"]: self.existing["interface"] = self.interface self.existing["cost"] = self.ospf_info["interface"].get("configCost") self.existing["hello_interval"] = self.ospf_info["interface"].get("helloInterval") self.existing["dead_interval"] = self.ospf_info["interface"].get("deadInterval") self.existing["silent_interface"] = self.ospf_info["interface"].get("silentEnable") self.existing["auth_mode"] = self.ospf_info["interface"].get("authenticationMode") self.existing["auth_text_simple"] = self.ospf_info["interface"].get("authTextSimple") self.existing["auth_key_id"] = self.ospf_info["interface"].get("keyId") self.existing["auth_text_md5"] = self.ospf_info["interface"].get("authTextMd5") self.existing["process_id"] = self.ospf_info["processId"] self.existing["area"] = self.ospf_info["areaId"] def get_end_state(self): """get end state info""" ospf_info = self.get_ospf_dict() if not ospf_info: return if ospf_info["interface"]: self.end_state["interface"] = self.interface self.end_state["cost"] = ospf_info["interface"].get("configCost") self.end_state["hello_interval"] = ospf_info["interface"].get("helloInterval") self.end_state["dead_interval"] = ospf_info["interface"].get("deadInterval") self.end_state["silent_interface"] = ospf_info["interface"].get("silentEnable") self.end_state["auth_mode"] = ospf_info["interface"].get("authenticationMode") self.end_state["auth_text_simple"] = ospf_info["interface"].get("authTextSimple") self.end_state["auth_key_id"] = ospf_info["interface"].get("keyId") self.end_state["auth_text_md5"] = ospf_info["interface"].get("authTextMd5") self.end_state["process_id"] = ospf_info["processId"] self.end_state["area"] = ospf_info["areaId"] def work(self): """worker""" self.check_params() self.ospf_info = self.get_ospf_dict() self.get_existing() self.get_proposed() # deal present or absent if self.state == "present": if not self.ospf_info or not self.ospf_info["interface"]: # create ospf area and set interface config self.set_ospf_interface() else: # merge interface ospf area config self.merge_ospf_interface() else: if self.ospf_info and self.ospf_info["interface"]: # delete interface ospf area config self.unset_ospf_interface() self.get_end_state() self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def main(): """Module main""" argument_spec = dict( interface=dict(required=True, type='str'), process_id=dict(required=True, type='str'), area=dict(required=True, type='str'), cost=dict(required=False, type='str'), hello_interval=dict(required=False, type='str'), dead_interval=dict(required=False, type='str'), silent_interface=dict(required=False, default=False, type='bool'), auth_mode=dict(required=False, choices=['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'), auth_text_simple=dict(required=False, type='str', no_log=True), auth_key_id=dict(required=False, type='str'), auth_text_md5=dict(required=False, type='str', no_log=True), state=dict(required=False, default='present', choices=['present', 'absent']) ) argument_spec.update(ce_argument_spec) module = InterfaceOSPF(argument_spec) module.work() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # File: _test.py import sys import numpy as np import cv2 import unittest from .base import ImageAugmentor, AugmentorList from .imgproc import Contrast from .noise import SaltPepperNoise from .misc import Flip, Resize def _rand_image(shape=(20, 20)): return np.random.rand(*shape).astype("float32") class LegacyBrightness(ImageAugmentor): def __init__(self, delta, clip=True): super(LegacyBrightness, self).__init__() assert delta > 0 self._init(locals()) def _get_augment_params(self, _): v = self._rand_range(-self.delta, self.delta) return v def _augment(self, img, v): old_dtype = img.dtype img = img.astype('float32') img += v if self.clip or old_dtype == np.uint8: img = np.clip(img, 0, 255) return img.astype(old_dtype) class LegacyFlip(ImageAugmentor): def __init__(self, horiz=False, vert=False, prob=0.5): super(LegacyFlip, self).__init__() if horiz and vert: raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.") elif horiz: self.code = 1 elif vert: self.code = 0 else: raise ValueError("At least one of horiz or vert has to be True!") self._init(locals()) def _get_augment_params(self, img): h, w = img.shape[:2] do = self._rand_range() < self.prob return (do, h, w) def _augment(self, img, param): do, _, _ = param if do: ret = cv2.flip(img, self.code) if img.ndim == 3 and ret.ndim == 2: ret = ret[:, :, np.newaxis] else: ret = img return ret def _augment_coords(self, coords, param): do, h, w = param if do: if self.code == 0: coords[:, 1] = h - coords[:, 1] elif self.code == 1: coords[:, 0] = w - coords[:, 0] return coords class ImgAugTest(unittest.TestCase): def _get_augs(self): return AugmentorList([ Contrast((0.8, 1.2)), Flip(horiz=True), Resize((30, 30)), SaltPepperNoise() ]) def _get_augs_with_legacy(self): return AugmentorList([ LegacyBrightness(0.5), LegacyFlip(horiz=True), Resize((30, 30)), SaltPepperNoise() ]) def test_augmentors(self): augmentors = self._get_augs() img = _rand_image() orig = img.copy() tfms = augmentors.get_transform(img) # test printing print(augmentors) print(tfms) newimg = tfms.apply_image(img) print(tfms) # lazy ones will instantiate after the first apply newimg2 = tfms.apply_image(orig) self.assertTrue(np.allclose(newimg, newimg2)) self.assertEqual(newimg2.shape[0], 30) coords = np.asarray([[0, 0], [10, 12]], dtype="float32") tfms.apply_coords(coords) def test_legacy_usage(self): augmentors = self._get_augs() img = _rand_image() orig = img.copy() newimg, tfms = augmentors.augment_return_params(img) newimg2 = augmentors.augment_with_params(orig, tfms) self.assertTrue(np.allclose(newimg, newimg2)) self.assertEqual(newimg2.shape[0], 30) coords = np.asarray([[0, 0], [10, 12]], dtype="float32") augmentors.augment_coords(coords, tfms) def test_legacy_augs_new_usage(self): augmentors = self._get_augs_with_legacy() img = _rand_image() orig = img.copy() tfms = augmentors.get_transform(img) newimg = tfms.apply_image(img) newimg2 = tfms.apply_image(orig) self.assertTrue(np.allclose(newimg, newimg2)) self.assertEqual(newimg2.shape[0], 30) coords = np.asarray([[0, 0], [10, 12]], dtype="float32") tfms.apply_coords(coords) def test_legacy_augs_legacy_usage(self): augmentors = self._get_augs_with_legacy() img = _rand_image() orig = img.copy() newimg, tfms = augmentors.augment_return_params(img) newimg2 = augmentors.augment_with_params(orig, tfms) self.assertTrue(np.allclose(newimg, newimg2)) self.assertEqual(newimg2.shape[0], 30) coords = np.asarray([[0, 0], [10, 12]], dtype="float32") augmentors.augment_coords(coords, tfms) if __name__ == '__main__': anchors = [(0.2, 0.2), (0.7, 0.2), (0.8, 0.8), (0.5, 0.5), (0.2, 0.5)] augmentors = AugmentorList([ Contrast((0.8, 1.2)), Flip(horiz=True), # RandomCropRandomShape(0.3), SaltPepperNoise() ]) img = cv2.imread(sys.argv[1]) newimg, prms = augmentors._augment_return_params(img) cv2.imshow(" ", newimg.astype('uint8')) cv2.waitKey() newimg = augmentors._augment(img, prms) cv2.imshow(" ", newimg.astype('uint8')) cv2.waitKey()
unknown
codeparrot/codeparrot-clean
''' HIDInput: Native support of HID input from linux kernel Support start from 2.6.32-ubuntu, or 2.6.34. To configure HIDInput, put in your configuration :: [input] # devicename = hidinput,/dev/input/eventXX # example with Stantum MTP4.3" screen stantum = hidinput,/dev/input/event2 .. note:: You must have read access to the input event. You have the possibility to use custom range for some X, Y and pressure value. On some drivers, the range reported is invalid. To fix that, you can add one of theses options on the argument line : * invert_x : 1 to invert X axis * invert_y : 1 to invert Y axis * min_position_x : X minimum * max_position_x : X maximum * min_position_y : Y minimum * max_position_y : Y maximum * min_pressure : pressure minimum * max_pressure : pressure maximum For example, on Asus T101M, the touchscreen report a range from 0-4095 for X and Y value, but real value are in a range from 0-32768. You can put it on configuration :: [input] t101m = hidinput,/dev/input/event7,max_position_x=32768,max_position_y=32768 ''' __all__ = ('HIDInputTouchProvider', 'HIDTouch') import os from pymt.input.touch import Touch from pymt.input.shape import TouchShapeRect class HIDTouch(Touch): def depack(self, args): self.sx = args['x'] self.sy = args['y'] self.profile = ['pos'] if 'size_w' in args and 'size_h' in args: self.shape = TouchShapeRect() self.shape.width = args['size_w'] self.shape.height = args['size_h'] self.profile.append('shape') if 'pressure' in args: self.pressure = args['pressure'] self.profile.append('pressure') super(HIDTouch, self).depack(args) def __str__(self): return '<HIDTouch id=%d pos=(%f, %f) device=%s>' % (self.id, self.sx, self.sy, self.device) if 'PYMT_DOC' in os.environ: # documentation hack HIDInputTouchProvider = None else: import threading import collections import struct import fcntl from pymt.input.provider import TouchProvider from pymt.input.factory import TouchFactory from pymt.logger import pymt_logger # # This part is taken from linux-source-2.6.32/include/linux/input.h # # Event types EV_SYN = 0x00 EV_KEY = 0x01 EV_REL = 0x02 EV_ABS = 0x03 EV_MSC = 0x04 EV_SW = 0x05 EV_LED = 0x11 EV_SND = 0x12 EV_REP = 0x14 EV_FF = 0x15 EV_PWR = 0x16 EV_FF_STATUS = 0x17 EV_MAX = 0x1f EV_CNT = (EV_MAX+1) KEY_MAX = 0x2ff # Synchronization events SYN_REPORT = 0 SYN_CONFIG = 1 SYN_MT_REPORT = 2 # Misc events MSC_SERIAL = 0x00 MSC_PULSELED = 0x01 MSC_GESTURE = 0x02 MSC_RAW = 0x03 MSC_SCAN = 0x04 MSC_MAX = 0x07 MSC_CNT = (MSC_MAX+1) ABS_MT_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse ABS_MT_TOUCH_MINOR = 0x31 # Minor axis (omit if circular) ABS_MT_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse ABS_MT_WIDTH_MINOR = 0x33 # Minor axis (omit if circular) ABS_MT_ORIENTATION = 0x34 # Ellipse orientation ABS_MT_POSITION_X = 0x35 # Center X ellipse position ABS_MT_POSITION_Y = 0x36 # Center Y ellipse position ABS_MT_TOOL_TYPE = 0x37 # Type of touching device ABS_MT_BLOB_ID = 0x38 # Group a set of packets as a blob ABS_MT_TRACKING_ID = 0x39 # Unique ID of initiated contact ABS_MT_PRESSURE = 0x3a # Pressure on contact area # some ioctl base (with 0 value) EVIOCGNAME = 2147501318 EVIOCGBIT = 2147501344 EVIOCGABS = 2149074240 # sizeof(struct input_event) struct_input_event_sz = struct.calcsize('LLHHi') struct_input_absinfo_sz = struct.calcsize('iiiiii') sz_l = struct.calcsize('Q') class HIDInputTouchProvider(TouchProvider): options = ('min_position_x', 'max_position_x', 'min_position_y', 'max_position_y', 'min_pressure', 'max_pressure', 'invert_x', 'invert_y') def __init__(self, device, args): super(HIDInputTouchProvider, self).__init__(device, args) self.input_fn = None self.default_ranges = dict() # split arguments args = args.split(',') if not args: pymt_logger.error('HIDInput: No filename pass to HIDInput configuration') pymt_logger.error('HIDInput: Use /dev/input/event0 for example') return None # read filename self.input_fn = args[0] pymt_logger.info('HIDInput: Read event from <%s>' % self.input_fn) # read parameters for arg in args[1:]: if arg == '': continue arg = arg.split('=') # ensure it's a key = value if len(arg) != 2: pymt_logger.error('HIDInput: invalid parameter %s, not in key=value format.' % arg) continue # ensure the key exist key, value = arg if key not in HIDInputTouchProvider.options: pymt_logger.error('HIDInput: unknown %s option' % key) continue # ensure the value try: self.default_ranges[key] = int(value) except ValueError: pymt_logger.error('HIDInput: invalid value %s for option %s' % (key, value)) continue # all good! pymt_logger.info('HIDInput: Set custom %s to %d' % (key, int(value))) def start(self): if self.input_fn is None: return self.uid = 0 self.queue = collections.deque() self.thread = threading.Thread( target=self._thread_run, kwargs=dict( queue=self.queue, input_fn=self.input_fn, device=self.device, default_ranges=self.default_ranges )) self.thread.daemon = True self.thread.start() def _thread_run(self, **kwargs): input_fn = kwargs.get('input_fn') queue = kwargs.get('queue') device = kwargs.get('device') drs = kwargs.get('default_ranges').get touches = {} touches_sent = [] point = {} l_points = [] # prepare some vars to get limit of some component range_min_position_x = 0 range_max_position_x = 2048 range_min_position_y = 0 range_max_position_y = 2048 range_min_pressure = 0 range_max_pressure = 255 invert_x = int(bool(drs('invert_x', 0))) invert_y = int(bool(drs('invert_y', 0))) def process(points): actives = [args['id'] for args in points] for args in points: tid = args['id'] try: touch = touches[tid] if touch.sx == args['x'] and touch.sy == args['y']: continue touch.move(args) if tid not in touches_sent: queue.append(('down', touch)) touches_sent.append(tid) queue.append(('move', touch)) except KeyError: touch = HIDTouch(device, tid, args) touches[touch.id] = touch for tid in touches.keys()[:]: if tid not in actives: touch = touches[tid] if tid in touches_sent: queue.append(('up', touch)) touches_sent.remove(tid) del touches[tid] def normalize(value, vmin, vmax): return (value - vmin) / float(vmax - vmin) # open the input fd = open(input_fn, 'rb') # get the controler name (EVIOCGNAME) device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16), " " * 256).split('\x00')[0] pymt_logger.info('HIDTouch: using <%s>' % device_name) # get abs infos bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l) bit, = struct.unpack('Q', bit) for x in xrange(EV_MAX): # preserve this, we may want other things than EV_ABS if x != EV_ABS: continue # EV_ABS available for this device ? if (bit & (1 << x)) == 0: continue # ask abs info keys to the devices sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16), ' ' * sz_l) sbit, = struct.unpack('Q', sbit) for y in xrange(KEY_MAX): if (sbit & (1 << y)) == 0: continue absinfo = fcntl.ioctl(fd, EVIOCGABS + y + (struct_input_absinfo_sz << 16), ' ' * struct_input_absinfo_sz) abs_value, abs_min, abs_max, abs_fuzz, \ abs_flat, abs_res = struct.unpack('iiiiii', absinfo) if y == ABS_MT_POSITION_X: range_min_position_x = drs('min_position_x', abs_min) range_max_position_x = drs('max_position_x', abs_max) pymt_logger.info('HIDTouch: ' + '<%s> range position X is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_MT_POSITION_Y: range_min_position_y = drs('min_position_y', abs_min) range_max_position_y = drs('max_position_y', abs_max) pymt_logger.info('HIDTouch: ' + '<%s> range position Y is %d - %d' % ( device_name, abs_min, abs_max)) elif y == ABS_MT_PRESSURE: range_min_pressure = drs('min_pressure', abs_min) range_max_pressure = drs('max_pressure', abs_max) pymt_logger.info('HIDTouch: ' + '<%s> range pressure is %d - %d' % ( device_name, abs_min, abs_max)) # read until the end while fd: data = fd.read(struct_input_event_sz) if len(data) < struct_input_event_sz: break # extract each event for i in xrange(len(data) / struct_input_event_sz): ev = data[i * struct_input_event_sz:] # extract timeval + event infos tv_sec, tv_usec, ev_type, ev_code, ev_value = \ struct.unpack('LLHHi', ev[:struct_input_event_sz]) # sync event if ev_type == EV_SYN: if ev_code == SYN_MT_REPORT: if 'id' not in point: continue l_points.append(point) elif ev_code == SYN_REPORT: process(l_points) l_points = [] elif ev_type == EV_MSC and ev_code in (MSC_RAW, MSC_SCAN): pass else: # compute multitouch track if ev_code == ABS_MT_TRACKING_ID: point = {} point['id'] = ev_value elif ev_code == ABS_MT_POSITION_X: val = normalize(ev_value, range_min_position_x, range_max_position_x) if invert_x: val = 1. - val point['x'] = val elif ev_code == ABS_MT_POSITION_Y: val = 1. - normalize(ev_value, range_min_position_y, range_max_position_y) if invert_y: val = 1. - val point['y'] = val elif ev_code == ABS_MT_ORIENTATION: point['orientation'] = ev_value elif ev_code == ABS_MT_BLOB_ID: point['blobid'] = ev_value elif ev_code == ABS_MT_PRESSURE: point['pressure'] = normalize(ev_value, range_min_pressure, range_max_pressure) elif ev_code == ABS_MT_TOUCH_MAJOR: point['size_w'] = ev_value elif ev_code == ABS_MT_TOUCH_MINOR: point['size_h'] = ev_value def update(self, dispatch_fn): # dispatch all event from threads try: while True: event_type, touch = self.queue.popleft() dispatch_fn(event_type, touch) except: pass TouchFactory.register('hidinput', HIDInputTouchProvider)
unknown
codeparrot/codeparrot-clean
""" An abstraction layer over OS-dependent file-like objects, that provides a consistent view of a *duplex byte stream*. """ import sys import os import socket import time import errno from rpyc.lib import safe_import from rpyc.lib.compat import select, select_error, BYTES_LITERAL, get_exc_errno, maxint win32file = safe_import("win32file") win32pipe = safe_import("win32pipe") msvcrt = safe_import("msvcrt") ssl = safe_import("ssl") retry_errnos = (errno.EAGAIN, errno.EWOULDBLOCK) class Stream(object): """Base Stream""" __slots__ = () def close(self): """closes the stream, releasing any system resources associated with it""" raise NotImplementedError() @property def closed(self): """tests whether the stream is closed or not""" raise NotImplementedError() def fileno(self): """returns the stream's file descriptor""" raise NotImplementedError() def poll(self, timeout): """indicates whether the stream has data to read (within *timeout* seconds)""" try: while True: try: rl, _, _ = select([self], [], [], timeout) except select_error as ex: if ex[0] == errno.EINTR: continue else: raise else: break except ValueError as ex: # i get this some times: "ValueError: file descriptor cannot be a negative integer (-1)" # let's translate it to select.error raise select_error(str(ex)) return bool(rl) def read(self, count): """reads **exactly** *count* bytes, or raise EOFError :param count: the number of bytes to read :returns: read data """ raise NotImplementedError() def write(self, data): """writes the entire *data*, or raise EOFError :param data: a string of binary data """ raise NotImplementedError() class ClosedFile(object): """Represents a closed file object (singleton)""" __slots__ = () def __getattr__(self, name): if name.startswith("__"): # issue 71 raise AttributeError("stream has been closed") raise EOFError("stream has been closed") def close(self): pass @property def closed(self): return True def fileno(self): raise EOFError("stream has been closed") ClosedFile = ClosedFile() class SocketStream(Stream): """A stream over a socket""" __slots__ = ("sock",) MAX_IO_CHUNK = 8000 def __init__(self, sock): self.sock = sock @classmethod def _connect(cls, host, port, family = socket.AF_INET, socktype = socket.SOCK_STREAM, proto = 0, timeout = 3, nodelay = False, keepalive = False): family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, family, socktype, proto)[0] s = socket.socket(family, socktype, proto) s.settimeout(timeout) s.connect(sockaddr) if nodelay: s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if keepalive: s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # Linux specific: after 10 idle minutes, start sending keepalives every 5 minutes. # Drop connection after 10 failed keepalives if hasattr(socket, "TCP_KEEPIDLE") and hasattr(socket, "TCP_KEEPINTVL") and hasattr(socket, "TCP_KEEPCNT"): s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 10 * 60) s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 5 * 60) s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 10) return s @classmethod def connect(cls, host, port, **kwargs): """factory method that creates a ``SocketStream`` over a socket connected to *host* and *port* :param host: the host name :param port: the TCP port :param kwargs: additional keyword arguments: ``family``, ``socktype``, ``proto``, ``timeout``, ``nodelay``, passed directly to the ``socket`` constructor, or ``ipv6``. :param ipv6: if True, creates an IPv6 socket (``AF_INET6``); otherwise an IPv4 (``AF_INET``) socket is created :returns: a :class:`SocketStream` """ if kwargs.pop("ipv6", False): kwargs["family"] = socket.AF_INET6 return cls(cls._connect(host, port, **kwargs)) @classmethod def ssl_connect(cls, host, port, ssl_kwargs, **kwargs): """factory method that creates a ``SocketStream`` over an SSL-wrapped socket, connected to *host* and *port* with the given credentials. :param host: the host name :param port: the TCP port :param ssl_kwargs: a dictionary of keyword arguments to be passed directly to ``ssl.wrap_socket`` :param kwargs: additional keyword arguments: ``family``, ``socktype``, ``proto``, ``timeout``, ``nodelay``, passed directly to the ``socket`` constructor, or ``ipv6``. :param ipv6: if True, creates an IPv6 socket (``AF_INET6``); otherwise an IPv4 (``AF_INET``) socket is created :returns: a :class:`SocketStream` """ if kwargs.pop("ipv6", False): kwargs["family"] = socket.AF_INET6 s = cls._connect(host, port, **kwargs) s2 = ssl.wrap_socket(s, **ssl_kwargs) return cls(s2) @property def closed(self): return self.sock is ClosedFile def close(self): if not self.closed: try: self.sock.shutdown(socket.SHUT_RDWR) except Exception: pass self.sock.close() self.sock = ClosedFile def fileno(self): try: return self.sock.fileno() except socket.error: self.close() ex = sys.exc_info()[1] if get_exc_errno(ex) == errno.EBADF: raise EOFError() else: raise def read(self, count): data = [] while count > 0: try: buf = self.sock.recv(min(self.MAX_IO_CHUNK, count)) except socket.timeout: continue except socket.error: ex = sys.exc_info()[1] if get_exc_errno(ex) in retry_errnos: # windows just has to be a bitch continue self.close() raise EOFError(ex) if not buf: self.close() raise EOFError("connection closed by peer") data.append(buf) count -= len(buf) return BYTES_LITERAL("").join(data) def write(self, data): try: while data: count = self.sock.send(data[:self.MAX_IO_CHUNK]) data = data[count:] except socket.error: ex = sys.exc_info()[1] self.close() raise EOFError(ex) class TunneledSocketStream(SocketStream): """A socket stream over an SSH tunnel (terminates the tunnel when the connection closes)""" __slots__ = ("tun",) def __init__(self, sock): self.sock = sock self.tun = None def close(self): SocketStream.close(self) if self.tun: self.tun.close() class PipeStream(Stream): """A stream over two simplex pipes (one used to input, another for output)""" __slots__ = ("incoming", "outgoing") MAX_IO_CHUNK = 32000 def __init__(self, incoming, outgoing): outgoing.flush() self.incoming = incoming self.outgoing = outgoing @classmethod def from_std(cls): """factory method that creates a PipeStream over the standard pipes (``stdin`` and ``stdout``) :returns: a :class:`PipeStream` instance """ return cls(sys.stdin, sys.stdout) @classmethod def create_pair(cls): """factory method that creates two pairs of anonymous pipes, and creates two PipeStreams over them. Useful for ``fork()``. :returns: a tuple of two :class:`PipeStream` instances """ r1, w1 = os.pipe() r2, w2 = os.pipe() side1 = cls(os.fdopen(r1, "rb"), os.fdopen(w2, "wb")) side2 = cls(os.fdopen(r2, "rb"), os.fdopen(w1, "wb")) return side1, side2 @property def closed(self): return self.incoming is ClosedFile def close(self): self.incoming.close() self.outgoing.close() self.incoming = ClosedFile self.outgoing = ClosedFile def fileno(self): return self.incoming.fileno() def read(self, count): data = [] try: while count > 0: buf = os.read(self.incoming.fileno(), min(self.MAX_IO_CHUNK, count)) if not buf: raise EOFError("connection closed by peer") data.append(buf) count -= len(buf) except EOFError: self.close() raise except EnvironmentError: ex = sys.exc_info()[1] self.close() raise EOFError(ex) return BYTES_LITERAL("").join(data) def write(self, data): try: while data: chunk = data[:self.MAX_IO_CHUNK] written = os.write(self.outgoing.fileno(), chunk) data = data[written:] except EnvironmentError: ex = sys.exc_info()[1] self.close() raise EOFError(ex) class Win32PipeStream(Stream): """A stream over two simplex pipes (one used to input, another for output). This is an implementation for Windows pipes (which suck)""" __slots__ = ("incoming", "outgoing", "_fileno", "_keepalive") PIPE_BUFFER_SIZE = 130000 MAX_IO_CHUNK = 32000 def __init__(self, incoming, outgoing): self._keepalive = (incoming, outgoing) if hasattr(incoming, "fileno"): self._fileno = incoming.fileno() incoming = msvcrt.get_osfhandle(incoming.fileno()) if hasattr(outgoing, "fileno"): outgoing = msvcrt.get_osfhandle(outgoing.fileno()) self.incoming = incoming self.outgoing = outgoing @classmethod def from_std(cls): return cls(sys.stdin, sys.stdout) @classmethod def create_pair(cls): r1, w1 = win32pipe.CreatePipe(None, cls.PIPE_BUFFER_SIZE) r2, w2 = win32pipe.CreatePipe(None, cls.PIPE_BUFFER_SIZE) return cls(r1, w2), cls(r2, w1) def fileno(self): return self._fileno @property def closed(self): return self.incoming is ClosedFile def close(self): if self.closed: return try: win32file.CloseHandle(self.incoming) except Exception: pass self.incoming = ClosedFile try: win32file.CloseHandle(self.outgoing) except Exception: pass self.outgoing = ClosedFile def read(self, count): try: data = [] while count > 0: dummy, buf = win32file.ReadFile(self.incoming, int(min(self.MAX_IO_CHUNK, count))) count -= len(buf) data.append(buf) except TypeError: ex = sys.exc_info()[1] if not self.closed: raise raise EOFError(ex) except win32file.error: ex = sys.exc_info()[1] self.close() raise EOFError(ex) return BYTES_LITERAL("").join(data) def write(self, data): try: while data: dummy, count = win32file.WriteFile(self.outgoing, data[:self.MAX_IO_CHUNK]) data = data[count:] except TypeError: ex = sys.exc_info()[1] if not self.closed: raise raise EOFError(ex) except win32file.error: ex = sys.exc_info()[1] self.close() raise EOFError(ex) def poll(self, timeout, interval = 0.1): """a poor man's version of select()""" if timeout is None: timeout = maxint length = 0 tmax = time.time() + timeout try: while length == 0: length = win32pipe.PeekNamedPipe(self.incoming, 0)[1] if time.time() >= tmax: break time.sleep(interval) except TypeError: ex = sys.exc_info()[1] if not self.closed: raise raise EOFError(ex) return length != 0 class NamedPipeStream(Win32PipeStream): """A stream over two named pipes (one used to input, another for output). Windows implementation.""" NAMED_PIPE_PREFIX = r'\\.\pipe\rpyc_' PIPE_IO_TIMEOUT = 3 CONNECT_TIMEOUT = 3 __slots__ = ("is_server_side",) def __init__(self, handle, is_server_side): Win32PipeStream.__init__(self, handle, handle) self.is_server_side = is_server_side @classmethod def from_std(cls): raise NotImplementedError() @classmethod def create_pair(cls): raise NotImplementedError() @classmethod def create_server(cls, pipename, connect = True): """factory method that creates a server-side ``NamedPipeStream``, over a newly-created *named pipe* of the given name. :param pipename: the name of the pipe. It will be considered absolute if it starts with ``\\\\.``; otherwise ``\\\\.\\pipe\\rpyc`` will be prepended. :param connect: whether to connect on creation or not :returns: a :class:`NamedPipeStream` instance """ if not pipename.startswith("\\\\."): pipename = cls.NAMED_PIPE_PREFIX + pipename handle = win32pipe.CreateNamedPipe( pipename, win32pipe.PIPE_ACCESS_DUPLEX, win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE | win32pipe.PIPE_WAIT, 1, cls.PIPE_BUFFER_SIZE, cls.PIPE_BUFFER_SIZE, cls.PIPE_IO_TIMEOUT * 1000, None ) inst = cls(handle, True) if connect: inst.connect_server() return inst def connect_server(self): """connects the server side of an unconnected named pipe (blocks until a connection arrives)""" if not self.is_server_side: raise ValueError("this must be the server side") win32pipe.ConnectNamedPipe(self.incoming, None) @classmethod def create_client(cls, pipename): """factory method that creates a client-side ``NamedPipeStream``, over a newly-created *named pipe* of the given name. :param pipename: the name of the pipe. It will be considered absolute if it starts with ``\\\\.``; otherwise ``\\\\.\\pipe\\rpyc`` will be prepended. :returns: a :class:`NamedPipeStream` instance """ if not pipename.startswith("\\\\."): pipename = cls.NAMED_PIPE_PREFIX + pipename handle = win32file.CreateFile( pipename, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, 0, None ) return cls(handle, False) def close(self): if self.closed: return if self.is_server_side: win32file.FlushFileBuffers(self.outgoing) win32pipe.DisconnectNamedPipe(self.outgoing) Win32PipeStream.close(self) if sys.platform == "win32": PipeStream = Win32PipeStream
unknown
codeparrot/codeparrot-clean
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.compute.legacy_v2.contrib import admin_actions as \ suspend_server_v2 from nova.api.openstack.compute import suspend_server as \ suspend_server_v21 from nova import exception from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common from nova.tests.unit.api.openstack import fakes class SuspendServerTestsV21(admin_only_action_common.CommonTests): suspend_server = suspend_server_v21 controller_name = 'SuspendServerController' _api_version = '2.1' def setUp(self): super(SuspendServerTestsV21, self).setUp() self.controller = getattr(self.suspend_server, self.controller_name)() self.compute_api = self.controller.compute_api def _fake_controller(*args, **kwargs): return self.controller self.stubs.Set(self.suspend_server, self.controller_name, _fake_controller) self.mox.StubOutWithMock(self.compute_api, 'get') def test_suspend_resume(self): self._test_actions(['_suspend', '_resume']) def test_suspend_resume_with_non_existed_instance(self): self._test_actions_with_non_existed_instance(['_suspend', '_resume']) def test_suspend_resume_raise_conflict_on_invalid_state(self): self._test_actions_raise_conflict_on_invalid_state(['_suspend', '_resume']) def test_actions_with_locked_instance(self): self._test_actions_with_locked_instance(['_suspend', '_resume']) class SuspendServerTestsV2(SuspendServerTestsV21): suspend_server = suspend_server_v2 controller_name = 'AdminActionsController' _api_version = '2' class SuspendServerPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): super(SuspendServerPolicyEnforcementV21, self).setUp() self.controller = suspend_server_v21.SuspendServerController() self.req = fakes.HTTPRequest.blank('') def test_suspend_policy_failed(self): rule_name = "os_compute_api:os-suspend-server:suspend" self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._suspend, self.req, fakes.FAKE_UUID, body={'suspend': {}}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_resume_policy_failed(self): rule_name = "os_compute_api:os-suspend-server:resume" self.policy.set_rules({rule_name: "project:non_fake"}) exc = self.assertRaises( exception.PolicyNotAuthorized, self.controller._resume, self.req, fakes.FAKE_UUID, body={'resume': {}}) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message())
unknown
codeparrot/codeparrot-clean
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Union from typing_extensions import Annotated, TypeAlias from ...._utils import PropertyInfo from .file_path_delta_annotation import FilePathDeltaAnnotation from .file_citation_delta_annotation import FileCitationDeltaAnnotation __all__ = ["AnnotationDelta"] AnnotationDelta: TypeAlias = Annotated[ Union[FileCitationDeltaAnnotation, FilePathDeltaAnnotation], PropertyInfo(discriminator="type") ]
python
github
https://github.com/openai/openai-python
src/openai/types/beta/threads/annotation_delta.py
package opts import ( "errors" "fmt" "os" "runtime" "testing" "gotest.tools/v3/assert" ) func TestValidateEnv(t *testing.T) { type testCase struct { value string expected string err error } tests := []testCase{ { value: "a", expected: "a", }, { value: "something", expected: "something", }, { value: "_=a", expected: "_=a", }, { value: "env1=value1", expected: "env1=value1", }, { value: "_env1=value1", expected: "_env1=value1", }, { value: "env2=value2=value3", expected: "env2=value2=value3", }, { value: "env3=abc!qwe", expected: "env3=abc!qwe", }, { value: "env_4=value 4", expected: "env_4=value 4", }, { value: "PATH", expected: fmt.Sprintf("PATH=%v", os.Getenv("PATH")), }, { value: "=a", err: errors.New("invalid environment variable: =a"), }, { value: "PATH=", expected: "PATH=", }, { value: "PATH=something", expected: "PATH=something", }, { value: "asd!qwe", expected: "asd!qwe", }, { value: "1asd", expected: "1asd", }, { value: "123", expected: "123", }, { value: "some space", expected: "some space", }, { value: " some space before", expected: " some space before", }, { value: "some space after ", expected: "some space after ", }, { value: "=", err: errors.New("invalid environment variable: ="), }, } if runtime.GOOS == "windows" { // Environment variables are case in-sensitive on Windows tests = append(tests, testCase{ value: "PaTh", expected: fmt.Sprintf("PaTh=%v", os.Getenv("PATH")), err: nil, }) } for _, tc := range tests { t.Run(tc.value, func(t *testing.T) { actual, err := ValidateEnv(tc.value) if tc.err == nil { assert.NilError(t, err) } else { assert.Error(t, err, tc.err.Error()) } assert.Equal(t, actual, tc.expected) }) } }
go
github
https://github.com/moby/moby
daemon/pkg/opts/env_test.go
Title in your templates: {{ obj }}
html
github
https://github.com/django/django
tests/syndication_tests/templates/syndication/title.html
# # LMS Interface to external queueing system (xqueue) # import hashlib import json import logging import requests import dogstats_wrapper as dog_stats_api log = logging.getLogger(__name__) dateformat = '%Y%m%d%H%M%S' XQUEUE_METRIC_NAME = 'edxapp.xqueue' # Wait time for response from Xqueue. XQUEUE_TIMEOUT = 35 # seconds def make_hashkey(seed): """ Generate a string key by hashing """ h = hashlib.md5() h.update(str(seed)) return h.hexdigest() def make_xheader(lms_callback_url, lms_key, queue_name): """ Generate header for delivery and reply of queue request. Xqueue header is a JSON-serialized dict: { 'lms_callback_url': url to which xqueue will return the request (string), 'lms_key': secret key used by LMS to protect its state (string), 'queue_name': designate a specific queue within xqueue server, e.g. 'MITx-6.00x' (string) } """ return json.dumps({ 'lms_callback_url': lms_callback_url, 'lms_key': lms_key, 'queue_name': queue_name }) def parse_xreply(xreply): """ Parse the reply from xqueue. Messages are JSON-serialized dict: { 'return_code': 0 (success), 1 (fail) 'content': Message from xqueue (string) } """ try: xreply = json.loads(xreply) except ValueError, err: log.error(err) return (1, 'unexpected reply from server') return_code = xreply['return_code'] content = xreply['content'] return (return_code, content) class XQueueInterface(object): """ Interface to the external grading system """ def __init__(self, url, django_auth, requests_auth=None): self.url = unicode(url) self.auth = django_auth self.session = requests.Session() self.session.auth = requests_auth def send_to_queue(self, header, body, files_to_upload=None): """ Submit a request to xqueue. header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader' body: Serialized data for the receipient behind the queueing service. The operation of xqueue is agnostic to the contents of 'body' files_to_upload: List of file objects to be uploaded to xqueue along with queue request Returns (error_code, msg) where error_code != 0 indicates an error """ # log the send to xqueue header_info = json.loads(header) queue_name = header_info.get('queue_name', u'') dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[ u'action:send_to_queue', u'queue:{}'.format(queue_name) ]) # Attempt to send to queue (error, msg) = self._send_to_queue(header, body, files_to_upload) # Log in, then try again if error and (msg == 'login_required'): (error, content) = self._login() if error != 0: # when the login fails log.debug("Failed to login to queue: %s", content) return (error, content) if files_to_upload is not None: # Need to rewind file pointers for f in files_to_upload: f.seek(0) (error, msg) = self._send_to_queue(header, body, files_to_upload) return (error, msg) def _login(self): payload = { 'username': self.auth['username'], 'password': self.auth['password'] } return self._http_post(self.url + '/xqueue/login/', payload) def _send_to_queue(self, header, body, files_to_upload): payload = { 'xqueue_header': header, 'xqueue_body': body } files = {} if files_to_upload is not None: for f in files_to_upload: files.update({f.name: f}) return self._http_post(self.url + '/xqueue/submit/', payload, files=files) def _http_post(self, url, data, files=None): try: r = self.session.post(url, data=data, files=files) except requests.exceptions.ConnectionError, err: log.error(err) return (1, 'cannot connect to server') if r.status_code not in [200]: return (1, 'unexpected HTTP status code [%d]' % r.status_code) return parse_xreply(r.text)
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.List; import java.util.Set; /** * This class defines an immutable universally unique identifier (UUID). It represents a 128-bit value. * More specifically, the random UUIDs generated by this class are variant 2 (Leach-Salz) version 4 UUIDs. * This is the same type of UUID as the ones generated by java.util.UUID. The toString() method prints * using the base64 string encoding. Likewise, the fromString method expects a base64 string encoding. */ public class Uuid implements Comparable<Uuid> { /** * A reserved UUID. Will never be returned by the randomUuid method. */ public static final Uuid ONE_UUID = new Uuid(0L, 1L); /** * A UUID for the metadata topic in KRaft mode. Will never be returned by the randomUuid method. */ public static final Uuid METADATA_TOPIC_ID = ONE_UUID; /** * A UUID that represents a null or empty UUID. Will never be returned by the randomUuid method. */ public static final Uuid ZERO_UUID = new Uuid(0L, 0L); /** * The set of reserved UUIDs that will never be returned by the randomUuid method. */ public static final Set<Uuid> RESERVED = Set.of(ZERO_UUID, ONE_UUID); private final long mostSignificantBits; private final long leastSignificantBits; /** * Constructs a 128-bit type 4 UUID where the first long represents the most significant 64 bits * and the second long represents the least significant 64 bits. */ public Uuid(long mostSigBits, long leastSigBits) { this.mostSignificantBits = mostSigBits; this.leastSignificantBits = leastSigBits; } private static Uuid unsafeRandomUuid() { java.util.UUID jUuid = java.util.UUID.randomUUID(); return new Uuid(jUuid.getMostSignificantBits(), jUuid.getLeastSignificantBits()); } /** * Static factory to retrieve a type 4 (pseudo randomly generated) UUID. * * This will not generate a UUID equal to 0, 1, or one whose string representation starts with a dash ("-") */ public static Uuid randomUuid() { Uuid uuid = unsafeRandomUuid(); while (RESERVED.contains(uuid) || uuid.toString().startsWith("-")) { uuid = unsafeRandomUuid(); } return uuid; } /** * Returns the most significant bits of the UUID's 128 value. */ public long getMostSignificantBits() { return this.mostSignificantBits; } /** * Returns the least significant bits of the UUID's 128 value. */ public long getLeastSignificantBits() { return this.leastSignificantBits; } /** * Returns true iff obj is another Uuid represented by the same two long values. */ @Override public boolean equals(Object obj) { if ((null == obj) || (obj.getClass() != this.getClass())) return false; Uuid id = (Uuid) obj; return this.mostSignificantBits == id.mostSignificantBits && this.leastSignificantBits == id.leastSignificantBits; } /** * Returns a hash code for this UUID */ @Override public int hashCode() { long xor = mostSignificantBits ^ leastSignificantBits; return (int) (xor >> 32) ^ (int) xor; } /** * Returns a base64 string encoding of the UUID. */ @Override public String toString() { return Base64.getUrlEncoder().withoutPadding().encodeToString(getBytesFromUuid()); } /** * Creates a UUID based on a base64 string encoding used in the toString() method. */ public static Uuid fromString(String str) { if (str.length() > 24) { throw new IllegalArgumentException("Input string with prefix `" + str.substring(0, 24) + "` is too long to be decoded as a base64 UUID"); } ByteBuffer uuidBytes = ByteBuffer.wrap(Base64.getUrlDecoder().decode(str)); if (uuidBytes.remaining() != 16) { throw new IllegalArgumentException("Input string `" + str + "` decoded as " + uuidBytes.remaining() + " bytes, which is not equal to the expected 16 bytes " + "of a base64-encoded UUID"); } return new Uuid(uuidBytes.getLong(), uuidBytes.getLong()); } private byte[] getBytesFromUuid() { // Extract bytes for uuid which is 128 bits (or 16 bytes) long. ByteBuffer uuidBytes = ByteBuffer.wrap(new byte[16]); uuidBytes.putLong(this.mostSignificantBits); uuidBytes.putLong(this.leastSignificantBits); return uuidBytes.array(); } @Override public int compareTo(Uuid other) { if (mostSignificantBits > other.mostSignificantBits) { return 1; } else if (mostSignificantBits < other.mostSignificantBits) { return -1; } else if (leastSignificantBits > other.leastSignificantBits) { return 1; } else if (leastSignificantBits < other.leastSignificantBits) { return -1; } else { return 0; } } /** * Convert a list of Uuid to an array of Uuid. * * @param list The input list * @return The output array */ public static Uuid[] toArray(List<Uuid> list) { if (list == null) return null; Uuid[] array = new Uuid[list.size()]; for (int i = 0; i < list.size(); i++) { array[i] = list.get(i); } return array; } /** * Convert an array of Uuids to a list of Uuid. * * @param array The input array * @return The output list */ public static List<Uuid> toList(Uuid[] array) { if (array == null) return null; List<Uuid> list = new ArrayList<>(array.length); list.addAll(Arrays.asList(array)); return list; } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/Uuid.java
# types.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Compatiblity namespace for sqlalchemy.sql.types. """ __all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType', 'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'TEXT', 'Text', 'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME', 'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT', 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer', 'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime', 'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode', 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum'] from .sql.type_api import ( adapt_type, TypeEngine, TypeDecorator, Variant, to_instance, UserDefinedType ) from .sql.sqltypes import ( BIGINT, BINARY, BLOB, BOOLEAN, BigInteger, Binary, _Binary, Boolean, CHAR, CLOB, Concatenable, DATE, DATETIME, DECIMAL, Date, DateTime, Enum, FLOAT, Float, INT, INTEGER, Integer, Interval, LargeBinary, NCHAR, NVARCHAR, NullType, NULLTYPE, NUMERIC, Numeric, PickleType, REAL, SchemaType, SMALLINT, SmallInteger, String, STRINGTYPE, TEXT, TIME, TIMESTAMP, Text, Time, Unicode, UnicodeText, VARBINARY, VARCHAR, _type_map )
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains functions for evaluation and summarization of metrics. The evaluation.py module contains helper functions for evaluating TensorFlow modules using a variety of metrics and summarizing the results. ********************** * Evaluating Metrics * ********************** In the simplest use case, we use a model to create the predictions, then specify the metrics and finally call the `evaluation` method: # Create model and obtain the predictions: images, labels = LoadData(...) predictions = MyModel(images) # Choose the metrics to compute: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ "accuracy": slim.metrics.accuracy(predictions, labels), "mse": slim.metrics.mean_squared_error(predictions, labels), }) inital_op = tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) with tf.Session() as sess: metric_values = slim.evaluation( sess, num_evals=1, inital_op=initial_op, eval_op=names_to_updates.values(), final_op=name_to_values.values()) for metric, value in zip(names_to_values.keys(), metric_values): logging.info('Metric %s has value: %f', metric, value) ************************************************ * Evaluating a Checkpointed Model with Metrics * ************************************************ Often, one wants to evaluate a model checkpoint saved on disk. This can be performed once or repeatedly on a set schedule. To evaluate a particular model, users define zero or more metrics and zero or more summaries and call the evaluation_loop method: # Create model and obtain the predictions: images, labels = LoadData(...) predictions = MyModel(images) # Choose the metrics to compute: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ "accuracy": slim.metrics.accuracy(predictions, labels), "mse": slim.metrics.mean_squared_error(predictions, labels), }) # Define the summaries to write: for metric_name, metric_value in metrics_to_values.iteritems(): tf.summary.scalar(metric_name, metric_value) checkpoint_dir = '/tmp/my_model_dir/' log_dir = '/tmp/my_model_eval/' # We'll evaluate 1000 batches: num_evals = 1000 # Evaluate every 10 minutes: slim.evaluation_loop( '', checkpoint_dir, logdir, num_evals=num_evals, eval_op=names_to_updates.values(), summary_op=tf.contrib.deprecated.merge_summary(summary_ops), eval_interval_secs=600) ************************************************** * Evaluating a Checkpointed Model with Summaries * ************************************************** At times, an evaluation can be performed without metrics at all but rather with only summaries. The user need only leave out the 'eval_op' argument: # Create model and obtain the predictions: images, labels = LoadData(...) predictions = MyModel(images) # Define the summaries to write: tf.summary.scalar(...) tf.summary.histogram(...) checkpoint_dir = '/tmp/my_model_dir/' log_dir = '/tmp/my_model_eval/' # Evaluate once every 10 minutes. slim.evaluation_loop( master='', checkpoint_dir, logdir, num_evals=1, summary_op=tf.contrib.deprecated.merge_summary(summary_ops), eval_interval_secs=600) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.training.python.training import evaluation from tensorflow.python.summary import summary from tensorflow.python.training import monitored_session from tensorflow.python.training import saver as tf_saver __all__ = [ 'evaluate_once', 'evaluation_loop', 'wait_for_new_checkpoint', 'checkpoints_iterator', ] wait_for_new_checkpoint = evaluation.wait_for_new_checkpoint checkpoints_iterator = evaluation.checkpoints_iterator _USE_DEFAULT = 0 def evaluate_once(master, checkpoint_path, logdir, num_evals=1, initial_op=None, initial_op_feed_dict=None, eval_op=None, eval_op_feed_dict=None, final_op=None, final_op_feed_dict=None, summary_op=_USE_DEFAULT, summary_op_feed_dict=None, variables_to_restore=None, session_config=None): """Evaluates the model at the given checkpoint path. Args: master: The BNS address of the TensorFlow master. checkpoint_path: The path to a checkpoint to use for evaluation. logdir: The directory where the TensorFlow summaries are written to. num_evals: The number of times to run `eval_op`. initial_op: An operation run at the beginning of evaluation. initial_op_feed_dict: A feed dictionary to use when executing `initial_op`. eval_op: A operation run `num_evals` times. eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`. final_op: An operation to execute after all of the `eval_op` executions. The value of `final_op` is returned. final_op_feed_dict: A feed dictionary to use when executing `final_op`. summary_op: The summary_op to evaluate after running TF-Slims metric ops. By default the summary_op is set to tf.summary.merge_all(). summary_op_feed_dict: An optional feed dictionary to use when running the `summary_op`. variables_to_restore: A list of TensorFlow variables to restore during evaluation. If the argument is left as `None` then slim.variables.GetVariablesToRestore() is used. session_config: An instance of `tf.ConfigProto` that will be used to configure the `Session`. If left as `None`, the default will be used. Returns: The value of `final_op` or `None` if `final_op` is `None`. """ if summary_op == _USE_DEFAULT: summary_op = summary.merge_all() hooks = [evaluation.StopAfterNEvalsHook(num_evals),] if summary_op is not None: hooks.append(evaluation.SummaryAtEndHook( log_dir=logdir, summary_op=summary_op, feed_dict=summary_op_feed_dict)) saver = None if variables_to_restore is not None: saver = tf_saver.Saver(variables_to_restore) return evaluation.evaluate_once( checkpoint_path, master=master, scaffold=monitored_session.Scaffold( init_op=initial_op, init_feed_dict=initial_op_feed_dict, saver=saver), eval_ops=eval_op, feed_dict=eval_op_feed_dict, final_ops=final_op, final_ops_feed_dict=final_op_feed_dict, hooks=hooks, config=session_config) def evaluation_loop(master, checkpoint_dir, logdir, num_evals=1, initial_op=None, initial_op_feed_dict=None, init_fn=None, eval_op=None, eval_op_feed_dict=None, final_op=None, final_op_feed_dict=None, summary_op=_USE_DEFAULT, summary_op_feed_dict=None, variables_to_restore=None, eval_interval_secs=60, max_number_of_evaluations=None, session_config=None, timeout=None, hooks=None): """Runs TF-Slim's Evaluation Loop. Args: master: The BNS address of the TensorFlow master. checkpoint_dir: The directory where checkpoints are stored. logdir: The directory where the TensorFlow summaries are written to. num_evals: The number of times to run `eval_op`. initial_op: An operation run at the beginning of evaluation. initial_op_feed_dict: A feed dictionary to use when executing `initial_op`. init_fn: An optional callable to be executed after `init_op` is called. The callable must accept one argument, the session being initialized. eval_op: A operation run `num_evals` times. eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`. final_op: An operation to execute after all of the `eval_op` executions. The value of `final_op` is returned. final_op_feed_dict: A feed dictionary to use when executing `final_op`. summary_op: The summary_op to evaluate after running TF-Slims metric ops. By default the summary_op is set to tf.summary.merge_all(). summary_op_feed_dict: An optional feed dictionary to use when running the `summary_op`. variables_to_restore: A list of TensorFlow variables to restore during evaluation. If the argument is left as `None` then slim.variables.GetVariablesToRestore() is used. eval_interval_secs: The minimum number of seconds between evaluations. max_number_of_evaluations: the max number of iterations of the evaluation. If the value is left as 'None', the evaluation continues indefinitely. session_config: An instance of `tf.ConfigProto` that will be used to configure the `Session`. If left as `None`, the default will be used. timeout: The maximum amount of time to wait between checkpoints. If left as `None`, then the process will wait indefinitely. hooks: A list of additional SessionRunHook objects to pass during repeated evaluations. Returns: The value of `final_op` or `None` if `final_op` is `None`. """ if summary_op == _USE_DEFAULT: summary_op = summary.merge_all() all_hooks = [evaluation.StopAfterNEvalsHook(num_evals),] if summary_op is not None: all_hooks.append(evaluation.SummaryAtEndHook( log_dir=logdir, summary_op=summary_op, feed_dict=summary_op_feed_dict)) if hooks is not None: # Add custom hooks if provided. all_hooks.extend(hooks) saver = None if variables_to_restore is not None: saver = tf_saver.Saver(variables_to_restore) return evaluation.evaluate_repeatedly( checkpoint_dir, master=master, scaffold=monitored_session.Scaffold( init_op=initial_op, init_feed_dict=initial_op_feed_dict, init_fn=init_fn, saver=saver), eval_ops=eval_op, feed_dict=eval_op_feed_dict, final_ops=final_op, final_ops_feed_dict=final_op_feed_dict, eval_interval_secs=eval_interval_secs, hooks=all_hooks, config=session_config, max_number_of_evaluations=max_number_of_evaluations, timeout=timeout)
unknown
codeparrot/codeparrot-clean
/** * hdr_histogram.h * Written by Michael Barker and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * The source for the hdr_histogram utilises a few C99 constructs, specifically * the use of stdint/stdbool and inline variable declaration. */ #ifndef HDR_HISTOGRAM_H #define HDR_HISTOGRAM_H 1 #include <stdint.h> #include <stdbool.h> #include <stdio.h> struct hdr_histogram { int64_t lowest_discernible_value; int64_t highest_trackable_value; int32_t unit_magnitude; int32_t significant_figures; int32_t sub_bucket_half_count_magnitude; int32_t sub_bucket_half_count; int64_t sub_bucket_mask; int32_t sub_bucket_count; int32_t bucket_count; int64_t min_value; int64_t max_value; int32_t normalizing_index_offset; double conversion_ratio; int32_t counts_len; int64_t total_count; int64_t* counts; }; #ifdef __cplusplus extern "C" { #endif /** * Allocate the memory and initialise the hdr_histogram. * * Due to the size of the histogram being the result of some reasonably * involved math on the input parameters this function it is tricky to stack allocate. * The histogram should be released with hdr_close * * @param lowest_discernible_value The smallest possible value that is distinguishable from 0. * Must be a positive integer that is >= 1. May be internally rounded down to nearest power of 2. * @param highest_trackable_value The largest possible value to be put into the * histogram. * @param significant_figures The level of precision for this histogram, i.e. the number * of figures in a decimal number that will be maintained. E.g. a value of 3 will mean * the results from the histogram will be accurate up to the first three digits. Must * be a value between 1 and 5 (inclusive). * @param result Output parameter to capture allocated histogram. * @return 0 on success, EINVAL if lowest_discernible_value is < 1 or the * significant_figure value is outside of the allowed range, ENOMEM if malloc * failed. */ int hdr_init( int64_t lowest_discernible_value, int64_t highest_trackable_value, int significant_figures, struct hdr_histogram** result); /** * Free the memory and close the hdr_histogram. * * @param h The histogram you want to close. */ void hdr_close(struct hdr_histogram* h); /** * Allocate the memory and initialise the hdr_histogram. This is the equivalent of calling * hdr_init(1, highest_trackable_value, significant_figures, result); * * @deprecated use hdr_init. */ int hdr_alloc(int64_t highest_trackable_value, int significant_figures, struct hdr_histogram** result); /** * Reset a histogram to zero - empty out a histogram and re-initialise it * * If you want to re-use an existing histogram, but reset everything back to zero, this * is the routine to use. * * @param h The histogram you want to reset to empty. * */ void hdr_reset(struct hdr_histogram* h); /** * Get the memory size of the hdr_histogram. * * @param h "This" pointer * @return The amount of memory used by the hdr_histogram in bytes */ size_t hdr_get_memory_size(struct hdr_histogram* h); /** * Records a value in the histogram, will round this value of to a precision at or better * than the significant_figure specified at construction time. * * @param h "This" pointer * @param value Value to add to the histogram * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_value(struct hdr_histogram* h, int64_t value); /** * Records a value in the histogram, will round this value of to a precision at or better * than the significant_figure specified at construction time. * * Will record this value atomically, however the whole structure may appear inconsistent * when read concurrently with this update. Do NOT mix calls to this method with calls * to non-atomic updates. * * @param h "This" pointer * @param value Value to add to the histogram * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_value_atomic(struct hdr_histogram* h, int64_t value); /** * Records count values in the histogram, will round this value of to a * precision at or better than the significant_figure specified at construction * time. * * @param h "This" pointer * @param value Value to add to the histogram * @param count Number of 'value's to add to the histogram * @return false if any value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_values(struct hdr_histogram* h, int64_t value, int64_t count); /** * Records count values in the histogram, will round this value of to a * precision at or better than the significant_figure specified at construction * time. * * Will record this value atomically, however the whole structure may appear inconsistent * when read concurrently with this update. Do NOT mix calls to this method with calls * to non-atomic updates. * * @param h "This" pointer * @param value Value to add to the histogram * @param count Number of 'value's to add to the histogram * @return false if any value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count); /** * Record a value in the histogram and backfill based on an expected interval. * * Records a value in the histogram, will round this value of to a precision at or better * than the significant_figure specified at construction time. This is specifically used * for recording latency. If the value is larger than the expected_interval then the * latency recording system has experienced co-ordinated omission. This method fills in the * values that would have occurred had the client providing the load not been blocked. * @param h "This" pointer * @param value Value to add to the histogram * @param expected_interval The delay between recording values. * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_corrected_value(struct hdr_histogram* h, int64_t value, int64_t expected_interval); /** * Record a value in the histogram and backfill based on an expected interval. * * Records a value in the histogram, will round this value of to a precision at or better * than the significant_figure specified at construction time. This is specifically used * for recording latency. If the value is larger than the expected_interval then the * latency recording system has experienced co-ordinated omission. This method fills in the * values that would have occurred had the client providing the load not been blocked. * * Will record this value atomically, however the whole structure may appear inconsistent * when read concurrently with this update. Do NOT mix calls to this method with calls * to non-atomic updates. * * @param h "This" pointer * @param value Value to add to the histogram * @param expected_interval The delay between recording values. * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_corrected_value_atomic(struct hdr_histogram* h, int64_t value, int64_t expected_interval); /** * Record a value in the histogram 'count' times. Applies the same correcting logic * as 'hdr_record_corrected_value'. * * @param h "This" pointer * @param value Value to add to the histogram * @param count Number of 'value's to add to the histogram * @param expected_interval The delay between recording values. * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_corrected_values(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval); /** * Record a value in the histogram 'count' times. Applies the same correcting logic * as 'hdr_record_corrected_value'. * * Will record this value atomically, however the whole structure may appear inconsistent * when read concurrently with this update. Do NOT mix calls to this method with calls * to non-atomic updates. * * @param h "This" pointer * @param value Value to add to the histogram * @param count Number of 'value's to add to the histogram * @param expected_interval The delay between recording values. * @return false if the value is larger than the highest_trackable_value and can't be recorded, * true otherwise. */ bool hdr_record_corrected_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval); /** * Adds all of the values from 'from' to 'this' histogram. Will return the * number of values that are dropped when copying. Values will be dropped * if they around outside of h.lowest_discernible_value and * h.highest_trackable_value. * * @param h "This" pointer * @param from Histogram to copy values from. * @return The number of values dropped when copying. */ int64_t hdr_add(struct hdr_histogram* h, const struct hdr_histogram* from); /** * Adds all of the values from 'from' to 'this' histogram. Will return the * number of values that are dropped when copying. Values will be dropped * if they around outside of h.lowest_discernible_value and * h.highest_trackable_value. * * @param h "This" pointer * @param from Histogram to copy values from. * @return The number of values dropped when copying. */ int64_t hdr_add_while_correcting_for_coordinated_omission( struct hdr_histogram* h, struct hdr_histogram* from, int64_t expected_interval); /** * Get minimum value from the histogram. Will return 2^63-1 if the histogram * is empty. * * @param h "This" pointer */ int64_t hdr_min(const struct hdr_histogram* h); /** * Get maximum value from the histogram. Will return 0 if the histogram * is empty. * * @param h "This" pointer */ int64_t hdr_max(const struct hdr_histogram* h); /** * Get the value at a specific percentile. * * @param h "This" pointer. * @param percentile The percentile to get the value for */ int64_t hdr_value_at_percentile(const struct hdr_histogram* h, double percentile); /** * Get the values at the given percentiles. * * @param h "This" pointer. * @param percentiles The ordered percentiles array to get the values for. * @param length Number of elements in the arrays. * @param values Destination array containing the values at the given percentiles. * The values array should be allocated by the caller. * @return 0 on success, ENOMEM if the provided destination array is null. */ int hdr_value_at_percentiles(const struct hdr_histogram *h, const double *percentiles, int64_t *values, size_t length); /** * Gets the standard deviation for the values in the histogram. * * @param h "This" pointer * @return The standard deviation */ double hdr_stddev(const struct hdr_histogram* h); /** * Gets the mean for the values in the histogram. * * @param h "This" pointer * @return The mean */ double hdr_mean(const struct hdr_histogram* h); /** * Determine if two values are equivalent with the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param h "This" pointer * @param a first value to compare * @param b second value to compare * @return 'true' if values are equivalent with the histogram's resolution. */ bool hdr_values_are_equivalent(const struct hdr_histogram* h, int64_t a, int64_t b); /** * Get the lowest value that is equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param h "This" pointer * @param value The given value * @return The lowest value that is equivalent to the given value within the histogram's resolution. */ int64_t hdr_lowest_equivalent_value(const struct hdr_histogram* h, int64_t value); /** * Get the count of recorded values at a specific value * (to within the histogram resolution at the value level). * * @param h "This" pointer * @param value The value for which to provide the recorded count * @return The total count of values recorded in the histogram within the value range that is * {@literal >=} lowestEquivalentValue(<i>value</i>) and {@literal <=} highestEquivalentValue(<i>value</i>) */ int64_t hdr_count_at_value(const struct hdr_histogram* h, int64_t value); int64_t hdr_count_at_index(const struct hdr_histogram* h, int32_t index); int64_t hdr_value_at_index(const struct hdr_histogram* h, int32_t index); struct hdr_iter_percentiles { bool seen_last_value; int32_t ticks_per_half_distance; double percentile_to_iterate_to; double percentile; }; struct hdr_iter_recorded { int64_t count_added_in_this_iteration_step; }; struct hdr_iter_linear { int64_t value_units_per_bucket; int64_t count_added_in_this_iteration_step; int64_t next_value_reporting_level; int64_t next_value_reporting_level_lowest_equivalent; }; struct hdr_iter_log { double log_base; int64_t count_added_in_this_iteration_step; int64_t next_value_reporting_level; int64_t next_value_reporting_level_lowest_equivalent; }; /** * The basic iterator. This is a generic structure * that supports all of the types of iteration. Use * the appropriate initialiser to get the desired * iteration. * * @ */ struct hdr_iter { const struct hdr_histogram* h; /** raw index into the counts array */ int32_t counts_index; /** snapshot of the length at the time the iterator is created */ int64_t total_count; /** value directly from array for the current counts_index */ int64_t count; /** sum of all of the counts up to and including the count at this index */ int64_t cumulative_count; /** The current value based on counts_index */ int64_t value; int64_t highest_equivalent_value; int64_t lowest_equivalent_value; int64_t median_equivalent_value; int64_t value_iterated_from; int64_t value_iterated_to; union { struct hdr_iter_percentiles percentiles; struct hdr_iter_recorded recorded; struct hdr_iter_linear linear; struct hdr_iter_log log; } specifics; bool (* _next_fp)(struct hdr_iter* iter); }; /** * Initalises the basic iterator. * * @param itr 'This' pointer * @param h The histogram to iterate over */ void hdr_iter_init(struct hdr_iter* iter, const struct hdr_histogram* h); /** * Initialise the iterator for use with percentiles. */ void hdr_iter_percentile_init(struct hdr_iter* iter, const struct hdr_histogram* h, int32_t ticks_per_half_distance); /** * Initialise the iterator for use with recorded values. */ void hdr_iter_recorded_init(struct hdr_iter* iter, const struct hdr_histogram* h); /** * Initialise the iterator for use with linear values. */ void hdr_iter_linear_init( struct hdr_iter* iter, const struct hdr_histogram* h, int64_t value_units_per_bucket); /** * Update the iterator value units per bucket */ void hdr_iter_linear_set_value_units_per_bucket(struct hdr_iter* iter, int64_t value_units_per_bucket); /** * Initialise the iterator for use with logarithmic values */ void hdr_iter_log_init( struct hdr_iter* iter, const struct hdr_histogram* h, int64_t value_units_first_bucket, double log_base); /** * Iterate to the next value for the iterator. If there are no more values * available return faluse. * * @param itr 'This' pointer * @return 'false' if there are no values remaining for this iterator. */ bool hdr_iter_next(struct hdr_iter* iter); typedef enum { CLASSIC, CSV } format_type; /** * Print out a percentile based histogram to the supplied stream. Note that * this call will not flush the FILE, this is left up to the user. * * @param h 'This' pointer * @param stream The FILE to write the output to * @param ticks_per_half_distance The number of iteration steps per half-distance to 100% * @param value_scale Scale the output values by this amount * @param format_type Format to use, e.g. CSV. * @return 0 on success, error code on failure. EIO if an error occurs writing * the output. */ int hdr_percentiles_print( struct hdr_histogram* h, FILE* stream, int32_t ticks_per_half_distance, double value_scale, format_type format); /** * Internal allocation methods, used by hdr_dbl_histogram. */ struct hdr_histogram_bucket_config { int64_t lowest_discernible_value; int64_t highest_trackable_value; int64_t unit_magnitude; int64_t significant_figures; int32_t sub_bucket_half_count_magnitude; int32_t sub_bucket_half_count; int64_t sub_bucket_mask; int32_t sub_bucket_count; int32_t bucket_count; int32_t counts_len; }; int hdr_calculate_bucket_config( int64_t lowest_discernible_value, int64_t highest_trackable_value, int significant_figures, struct hdr_histogram_bucket_config* cfg); void hdr_init_preallocated(struct hdr_histogram* h, struct hdr_histogram_bucket_config* cfg); int64_t hdr_size_of_equivalent_value_range(const struct hdr_histogram* h, int64_t value); int64_t hdr_next_non_equivalent_value(const struct hdr_histogram* h, int64_t value); int64_t hdr_median_equivalent_value(const struct hdr_histogram* h, int64_t value); /** * Used to reset counters after importing data manually into the histogram, used by the logging code * and other custom serialisation tools. */ void hdr_reset_internal_counters(struct hdr_histogram* h); #ifdef __cplusplus } #endif #endif
c
github
https://github.com/redis/redis
deps/hdr_histogram/hdr_histogram.h
"""Thin Python wrapper around C binary reader for profiling data.""" import _remote_debugging from .gecko_collector import GeckoCollector from .stack_collector import FlamegraphCollector, CollapsedStackCollector from .pstats_collector import PstatsCollector class BinaryReader: """High-performance binary reader using C implementation. This reader uses memory-mapped I/O (on Unix) for fast replay of profiling data from binary files. Use as a context manager: with BinaryReader('profile.bin') as reader: info = reader.get_info() reader.replay_samples(collector, progress_callback) """ def __init__(self, filename): """Create a new binary reader. Args: filename: Path to input binary file """ self.filename = filename self._reader = None def __enter__(self): self._reader = _remote_debugging.BinaryReader(self.filename) return self def __exit__(self, exc_type, exc_val, exc_tb): if self._reader is not None: self._reader.close() self._reader = None return False def get_info(self): """Get metadata about the binary file. Returns: dict: File metadata including: - sample_count: Number of samples in the file - sample_interval_us: Sampling interval in microseconds - start_time_us: Start timestamp in microseconds - string_count: Number of unique strings - frame_count: Number of unique frames - compression: Compression type used """ if self._reader is None: raise RuntimeError("Reader not open. Use as context manager.") return self._reader.get_info() def replay_samples(self, collector, progress_callback=None): """Replay samples from binary file through a collector. This allows converting binary profiling data to other formats (e.g., flamegraph, pstats) by replaying through the appropriate collector. Args: collector: A Collector instance with a collect() method progress_callback: Optional callable(current, total) for progress Returns: int: Number of samples replayed """ if self._reader is None: raise RuntimeError("Reader not open. Use as context manager.") return self._reader.replay(collector, progress_callback) @property def sample_count(self): if self._reader is None: raise RuntimeError("Reader not open. Use as context manager.") return self._reader.get_info()['sample_count'] def get_stats(self): """Get reconstruction statistics from replay. Returns: dict: Statistics about record types decoded and samples reconstructed during replay. """ if self._reader is None: raise RuntimeError("Reader not open. Use as context manager.") return self._reader.get_stats() def convert_binary_to_format(input_file, output_file, output_format, sample_interval_usec=None, progress_callback=None): """Convert a binary profiling file to another format. Args: input_file: Path to input binary file output_file: Path to output file output_format: Target format ('flamegraph', 'collapsed', 'pstats', etc.) sample_interval_usec: Override sample interval (uses file's if None) progress_callback: Optional callable(current, total) for progress Returns: int: Number of samples converted """ with BinaryReader(input_file) as reader: info = reader.get_info() interval = sample_interval_usec or info['sample_interval_us'] # Create appropriate collector based on format if output_format == 'flamegraph': collector = FlamegraphCollector(interval) elif output_format == 'collapsed': collector = CollapsedStackCollector(interval) elif output_format == 'pstats': collector = PstatsCollector(interval) elif output_format == 'gecko': collector = GeckoCollector(interval) else: raise ValueError(f"Unknown output format: {output_format}") # Replay samples through collector count = reader.replay_samples(collector, progress_callback) # Export to target format collector.export(output_file) return count
python
github
https://github.com/python/cpython
Lib/profiling/sampling/binary_reader.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and methods related to model_fn (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import six from tensorflow.contrib.framework import get_graph_from_inputs from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import metric_key from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.python.estimator import model_fn as core_model_fn_lib from tensorflow.python.estimator.export import export_output as core_export_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import signature_constants from tensorflow.python.training import session_run_hook from tensorflow.python.util.deprecation import deprecated class ModeKeys(object): """Standard names for model modes (deprecated). THIS CLASS IS DEPRECATED. The following standard keys are defined: * `TRAIN`: training mode. * `EVAL`: evaluation mode. * `INFER`: inference mode. """ TRAIN = 'train' EVAL = 'eval' INFER = 'infer' @classmethod def validate(cls, key): if key not in (cls.TRAIN, cls.EVAL, cls.INFER): raise ValueError('Invalid mode %s.' % key) class ModelFnOps( collections.namedtuple('ModelFnOps', [ 'predictions', 'loss', 'train_op', 'eval_metric_ops', 'output_alternatives', 'training_chief_hooks', 'training_hooks', 'scaffold', 'mode' ])): """Ops returned from a model_fn. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. """ @deprecated(None, 'When switching to tf.estimator.Estimator, use ' 'tf.estimator.EstimatorSpec. You can use the `estimator_spec`' ' method to create an equivalent one.') def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metric_ops=None, output_alternatives=None, training_chief_hooks=None, training_hooks=None, scaffold=None): """Creates a validated `ModelFnOps` instance. For a multi-headed model, the predictions dict here will contain the outputs of all of the heads. However: at serving time, requests will be made specifically for one or more heads, and the RPCs used for these requests may differ by problem type (i.e., regression, classification, other). The purpose of the output_alternatives dict is to aid in exporting a SavedModel from which such head-specific queries can be served. These output_alternatives will be combined with input_alternatives (see `saved_model_export_utils`) to produce a set of `SignatureDef`s specifying the valid requests that can be served from this model. For a single-headed model, it is still adviseable to provide output_alternatives with a single entry, because this is how the problem type is communicated for export and serving. If output_alternatives is not given, the resulting SavedModel will support only one head of unspecified type. Args: mode: One of `ModeKeys`. Specifies if this training, evaluation or prediction. predictions: Predictions `Tensor` or dict of `Tensor`. loss: Training loss `Tensor`. train_op: Op for the training step. eval_metric_ops: Dict of metric results keyed by name. The values of the dict are the results of calling a metric function, such as `Tensor`. output_alternatives: a dict of `{submodel_name: (problem_type, {tensor_name: Tensor})}`, where `submodel_name` is a submodel identifier that should be consistent across the pipeline (here likely taken from the name of each `Head`, for models that use them), `problem_type` is a `ProblemType`, `tensor_name` is a symbolic name for an output Tensor possibly but not necessarily taken from `PredictionKey`, and `Tensor` is the corresponding output Tensor itself. training_chief_hooks: A list of `SessionRunHook` objects that will be run on the chief worker during training. training_hooks: A list of `SessionRunHook` objects that will be run on all workers during training. scaffold: A `tf.train.Scaffold` object that can be used to set initialization, saver, and more to be used in training. Returns: A validated `ModelFnOps` object. Raises: ValueError: If validation fails. """ ModeKeys.validate(mode) # Assert all ops are from the same graph. get_graph_from_inputs((predictions, loss, train_op)) # Validate train_op. if train_op is None: if mode == ModeKeys.TRAIN: raise ValueError('Missing train_op.') elif not isinstance(train_op, ops.Operation): # TODO(ptucker): Should this be allowed? Consider raising error. train_op = ops.convert_to_tensor(train_op).op # Validate loss. if loss is None: if mode in (ModeKeys.TRAIN, ModeKeys.EVAL): raise ValueError('Missing loss.') else: loss = ops.convert_to_tensor(loss) loss_shape = loss.get_shape() if loss_shape.num_elements() not in (None, 1): raise ValueError('Loss must be scalar: %s.' % loss) if not loss_shape.is_compatible_with(tensor_shape.scalar()): loss = array_ops.reshape(loss, []) # Validate predictions. if predictions is None: if mode == ModeKeys.INFER or mode == ModeKeys.EVAL: raise ValueError('Missing predictions.') else: if isinstance(predictions, dict): predictions = { k: sparse_tensor.convert_to_tensor_or_sparse_tensor(v) for k, v in six.iteritems(predictions) } else: predictions = sparse_tensor.convert_to_tensor_or_sparse_tensor( predictions) # Validate eval_metric_ops if eval_metric_ops is None: eval_metric_ops = {} else: if not isinstance(eval_metric_ops, dict): raise ValueError('eval_metric_ops must be a dict.') # Validate hooks if training_chief_hooks is None: training_chief_hooks = [] if training_hooks is None: training_hooks = [] for hook in training_hooks + training_chief_hooks: if not isinstance(hook, session_run_hook.SessionRunHook): raise TypeError('All hooks returned from model_fn must be ' 'SessionRunHook instances, got instance of %s: %s' % (type(hook), hook)) return super(ModelFnOps, cls).__new__( cls, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, output_alternatives=output_alternatives, training_chief_hooks=training_chief_hooks, training_hooks=training_hooks, scaffold=scaffold, mode=mode) def estimator_spec(self, default_serving_output_alternative_key=None): """Creates an equivalent `EstimatorSpec`. Args: default_serving_output_alternative_key: Required for multiple heads. If you have multiple entries in `output_alternatives` dict (comparable to multiple heads), `EstimatorSpec` requires a default head that will be used if a Servo request does not explicitly mention which head to infer on. Pass the key of the output alternative here that you want to designate as default. A separate ExportOutpout for this default head wil be added to the export_outputs dict with the special key signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, unless there is already an enry in output_alternatives with this special key. Returns: Instance of `EstimatorSpec` that is equivalent to this `ModelFnOps` Raises: ValueError: If problem type is unknown. """ def _scores(output_tensors): scores = output_tensors.get(prediction_key.PredictionKey.SCORES) if scores is None: scores = output_tensors.get(prediction_key.PredictionKey.PROBABILITIES) return scores def _classes(output_tensors): # pylint: disable=missing-docstring classes = output_tensors.get(prediction_key.PredictionKey.CLASSES) if classes is None: logging.warning( 'classes is None, Servo inference will not have class ids.') return None elif classes.dtype != dtypes.string: # Servo classification can only serve string classes logging.warning( 'classes is not string, Servo inference will not have class ids.') return None return classes def _export_output(problem_type, predictions): # pylint: disable=missing-docstring if problem_type == constants.ProblemType.LINEAR_REGRESSION: return core_export_lib.RegressionOutput(_scores(predictions)) if (problem_type == constants.ProblemType.CLASSIFICATION or problem_type == constants.ProblemType.LOGISTIC_REGRESSION): return core_export_lib.ClassificationOutput( scores=_scores(predictions), classes=_classes(predictions)) if problem_type == constants.ProblemType.UNSPECIFIED: return core_export_lib.PredictOutput(predictions) raise ValueError('Unknown problem_type=%s' % problem_type) # Converts output_alternatives export_outputs_dict = None if self.output_alternatives: output_alternatives = self.output_alternatives # Adds default output_alternative if needed. if (len(output_alternatives) > 1 and signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in output_alternatives): output_alternatives = output_alternatives.copy() output_alternatives[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = ( output_alternatives[default_serving_output_alternative_key]) export_outputs_dict = {key: _export_output(*val) for key, val in output_alternatives.items()} def _get_eval_metric_ops(): """Returns self.eval_metric_ops without loss metric.""" result = {} for key, value in six.iteritems(self.eval_metric_ops): if key != metric_key.MetricKey.LOSS: result[key] = value return result # Convert the contrib mode enum to the core mode enum. # Note: mode already validated in __new__(). if self.mode == ModeKeys.TRAIN: core_mode = core_model_fn_lib.ModeKeys.TRAIN elif self.mode == ModeKeys.EVAL: core_mode = core_model_fn_lib.ModeKeys.EVAL elif self.mode == ModeKeys.INFER: core_mode = core_model_fn_lib.ModeKeys.PREDICT return core_model_fn_lib.EstimatorSpec( mode=core_mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=_get_eval_metric_ops(), export_outputs=export_outputs_dict, training_chief_hooks=self.training_chief_hooks, training_hooks=self.training_hooks, scaffold=self.scaffold)
unknown
codeparrot/codeparrot-clean
@file:Suppress("INVISIBLE_MEMBER", "INVISIBLE_REFERENCE", "UNUSED") package kotlinx.coroutines.debug import kotlinx.coroutines.* import kotlinx.coroutines.debug.internal.* import kotlin.coroutines.* import kotlin.coroutines.jvm.internal.* /** * Class describing coroutine info such as its context, state and stacktrace. */ @ExperimentalCoroutinesApi public class CoroutineInfo internal constructor(delegate: DebugCoroutineInfo) { /** * [Coroutine context][coroutineContext] of the coroutine */ public val context: CoroutineContext = delegate.context /** * Last observed state of the coroutine */ public val state: State = State.valueOf(delegate.state) private val creationStackBottom: CoroutineStackFrame? = delegate.creationStackBottom /** * [Job] associated with a current coroutine or null. * May be later used in [DebugProbes.printJob]. */ public val job: Job? get() = context[Job] /** * Creation stacktrace of the coroutine. * Can be empty if [DebugProbes.enableCreationStackTraces] is not set. */ public val creationStackTrace: List<StackTraceElement> get() = creationStackTrace() private val lastObservedFrame: CoroutineStackFrame? = delegate.lastObservedFrame /** * Last observed stacktrace of the coroutine captured on its suspension or resumption point. * It means that for [running][State.RUNNING] coroutines resulting stacktrace is inaccurate and * reflects stacktrace of the resumption point, not the actual current stacktrace. */ public fun lastObservedStackTrace(): List<StackTraceElement> { var frame: CoroutineStackFrame? = lastObservedFrame ?: return emptyList() val result = ArrayList<StackTraceElement>() while (frame != null) { frame.getStackTraceElement()?.let { result.add(it) } frame = frame.callerFrame } return result } private fun creationStackTrace(): List<StackTraceElement> { val bottom = creationStackBottom ?: return emptyList() // Skip "Coroutine creation stacktrace" frame return sequence<StackTraceElement> { yieldFrames(bottom.callerFrame) }.toList() } private tailrec suspend fun SequenceScope<StackTraceElement>.yieldFrames(frame: CoroutineStackFrame?) { if (frame == null) return frame.getStackTraceElement()?.let { yield(it) } val caller = frame.callerFrame if (caller != null) { yieldFrames(caller) } } override fun toString(): String = "CoroutineInfo(state=$state,context=$context)" } /** * Current state of the coroutine. */ public enum class State { /** * Created, but not yet started. */ CREATED, /** * Started and running. */ RUNNING, /** * Suspended. */ SUSPENDED }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
kotlinx-coroutines-debug/src/CoroutineInfo.kt
import sys import itertools if sys.version_info[0] < 3: PY3 = False basestring = basestring import __builtin__ as builtins import ConfigParser from StringIO import StringIO BytesIO = StringIO execfile = execfile func_code = lambda o: o.func_code func_globals = lambda o: o.func_globals im_func = lambda o: o.im_func from htmlentitydefs import name2codepoint import httplib from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import BaseHTTPRequestHandler iteritems = lambda o: o.iteritems() long_type = long maxsize = sys.maxint next = lambda o: o.next() numeric_types = (int, long, float) unichr = unichr unicode = unicode bytes = str from urllib import url2pathname, splittag, pathname2url import urllib2 from urllib2 import urlopen, HTTPError, URLError, unquote, splituser from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit filterfalse = itertools.ifilterfalse exec("""def reraise(tp, value, tb=None): raise tp, value, tb""") else: PY3 = True basestring = str import builtins import configparser as ConfigParser from io import StringIO, BytesIO func_code = lambda o: o.__code__ func_globals = lambda o: o.__globals__ im_func = lambda o: o.__func__ from html.entities import name2codepoint import http.client as httplib from http.server import HTTPServer, SimpleHTTPRequestHandler from http.server import BaseHTTPRequestHandler iteritems = lambda o: o.items() long_type = int maxsize = sys.maxsize next = next numeric_types = (int, float) unichr = chr unicode = str bytes = bytes from urllib.error import HTTPError, URLError import urllib.request as urllib2 from urllib.request import urlopen, url2pathname, pathname2url from urllib.parse import ( urlparse, urlunparse, unquote, splituser, urljoin, urlsplit, urlunsplit, splittag, ) filterfalse = itertools.filterfalse def execfile(fn, globs=None, locs=None): if globs is None: globs = globals() if locs is None: locs = globs f = open(fn, 'rb') try: source = f.read() finally: f.close() exec(compile(source, fn, 'exec'), globs, locs) def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python import numpy as np import matplotlib.pyplot as plt ratio = np.arange(0, 1.01, 0.05) # Data from AS 7018 # Other inputs are: main(7018, ratio, 3, 0.7, 0) #cost7018Algo0= [0.32816886868289885, 0.2208175601847247, 0.19251093813725606, 0.19213511824943674, 0.13080090885015863, 0.11537045883303, 0.09633102587606322, 0.07323609921126092, 0.09878431485204239, 0.05596993118756234, 0.049715428600860076, 0.03831044043548079, 0.06096882335052019, 0.028069296806645244, 0.022701113201547628, 0.021278104632073535, 0.013507052814121217, 0.026153265780405686, 0.0077527277733091485, 0.0031476608134015918, 0.0] #cost7018Algo1= [0.4642333921367071, 0.4639016472999131, 0.34012817677104124, 0.3340296081862053, 0.2969448732402059, 0.2461004974625, 0.2293186323481248, 0.24091214725010093, 0.19757595572768757, 0.1573243340317513, 0.18058477722058466, 0.12481420007040704, 0.10774395529769458, 0.09029783974164379, 0.09634065693940172, 0.08223281518431566, 0.09567750993009355, 0.04277884030146324, 0.028210440967304917, 0.017676653196543032, 0.004847344478943003] cost7018Algo0= [0.2688624303959933, 0.230281717027491, 0.18411789823954128, 0.1588118516922253, 0.12096749983086721, 0.1113147673120107, 0.1030153423862696, 0.09073808693426455, 0.0694635759217686, 0.05730782846758849, 0.046710212601724366, 0.048981939171668, 0.04710864836168703, 0.031022713107087735, 0.029085479109200608, 0.02071990211759934, 0.01666252096967843, 0.010496896997432531, 0.008803391094234036, 0.003640309725101591, 0.0] cost7018Algo1= [0.5487645391435545, 0.44189141471566334, 0.38568493049488173, 0.4030654170364792, 0.31721279641850014, 0.3272248867582743, 0.2916132651694123, 0.26046316542276504, 0.19867071620188387, 0.20522911767970617, 0.1809405986969883, 0.17916122132774306, 0.14308778463715827, 0.12360898556019714, 0.11913902854109173, 0.08162315898761406, 0.07484068337659229, 0.05497196024192774, 0.03447745192320582, 0.019541786345988885, 0.0063849960404094115] max7018 = max(cost7018Algo0+ cost7018Algo1) cost7018Algo0 = np.array(cost7018Algo0)/max7018 cost7018Algo1 = np.array(cost7018Algo1)/max7018 cost3356Algo0= [0.2783048731245389, 0.2872432864239341, 0.2230858096827762, 0.2056502418089665, 0.19798701702638456, 0.17357493305249336, 0.1469459407732046, 0.10761056872908284, 0.0997999691042518, 0.09213445289884156, 0.092158282436989, 0.0660532265325018, 0.06295908184506546, 0.048358314991151494, 0.04045873505958472, 0.0315771474462309, 0.02590217889984478, 0.01820272272729274, 0.012580744472094682, 0.007715350715371399, 0.0] cost3356Algo1= [0.465620584858123, 0.40481693415398573, 0.3875690687641232, 0.43758852738211623, 0.3165725611715618, 0.3024823915734399, 0.26220346926993093, 0.2474230687700476, 0.24382857244792552, 0.24630388944790402, 0.1923745910236931, 0.1700306921862092, 0.16211949681867985, 0.16401967368367437, 0.1757433455069511, 0.10131859386229136, 0.11147997172614468, 0.0849289440996695, 0.0715091535328963, 0.06055701261597678, 0.02902765654616049] max3356 = max(cost3356Algo0+ cost3356Algo1) cost3356Algo0 = np.array(cost3356Algo0)/max3356 cost3356Algo1 = np.array(cost3356Algo1)/max3356 #########################Calculate reduced ratio############################ avg7018Alg0 = sum(cost7018Algo0)/len(cost7018Algo0) avg7018Alg1 = sum(cost7018Algo1)/len(cost7018Algo1) print 'cost reduced ratio of 7018' print 'reduced ration = ', (avg7018Alg1 - avg7018Alg0)/avg7018Alg1 avg3356Alg0 = sum(cost3356Algo0)/len(cost3356Algo0) avg3356Alg1 = sum(cost3356Algo1)/len(cost3356Algo1) print 'cost reduced ratio of 3356' print 'reduced ration = ', (avg3356Alg1 - avg3356Alg0)/avg3356Alg1 ####################################################################### cost2914Algo0= [0.2985484020511996, 0.3678774231683738, 0.22471345430585563, 0.16939513694727085, 0.16470375883534655, 0.1873554643571797, 0.12180635895053124, 0.08917217445175339, 0.09418009720668301, 0.08602255944435719, 0.06227133969465918, 0.05742111452629443, 0.08499951928274836, 0.04435617393119315, 0.03557660115877804, 0.04948879687046496, 0.027409612835835533, 0.01611151133230808, 0.011949694759354836, 0.005433847749368355, 0.0] cost2914Algo1= [0.4797497500434348, 0.36931227813986284, 0.3535122401694411, 0.3566576719805158, 0.38640821560707106, 0.716213466629919, 0.2935314839639993, 0.39111587647765933, 0.25428746739263486, 0.2130828579687878, 0.20757244130193064, 0.17380156664179708, 0.2115447151728378, 0.15569996960790217, 0.12753165995647436, 0.16329856974988097, 0.1115194699256139, 0.10639665060119938, 0.0932164906718553, 0.09090296502695513, 0.29667733078299985] max2914 = max(cost2914Algo0+ cost2914Algo1) cost2914Algo0 = np.array(cost2914Algo0)/max2914 cost2914Algo1 = np.array(cost2914Algo1)/max2914 # Comment plt.plot(ratio, cost7018Algo0, "kD-", label="SCM-base AS7018", markersize = 8, linewidth=2) plt.plot(ratio, cost7018Algo1, "ro--", label="Baseline AS7018", markersize = 8, linewidth=2) plt.plot(ratio, cost3356Algo0, "b^-",label="SCM-base AS3356", markersize = 8, linewidth=2) plt.plot(ratio, cost3356Algo1, "gv--",label="Baseline AS3356", markersize = 8, linewidth=2) #plt.plot(ratio, cost2914Algo0, "g<-", label ="SCM-base AS2914", markersize = 8, linewidth=2) #plt.plot(ratio, cost2914Algo1, "g>--", label ="Baseline AS2914", markersize = 8, linewidth=2) plt.xticks(np.arange(0,1.1,0.1),('0', '0.1', '0.2', '0.3', '0.4', '0.5', \ '0.6', '0.7', '0.8', '0.9', '1.0'), fontsize = 14) plt.yticks(np.arange(0, 1.1, 0.2), ('0', '0.2', '0.4', '0.6', '0.8', '1.0'), fontsize = 14) plt.ylim([0, 1.03]) plt.xlabel('Replicate ratio (%)',fontsize = 14) plt.ylabel('Normalized cost',fontsize = 14) plt.legend(fontsize = 14) #pylab.legend(loc='upper right') plt.tight_layout() plt.show()
unknown
codeparrot/codeparrot-clean
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2016-2017 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from urllib.request import urlretrieve from progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength def _init_progress_bar(total_length, destination, message=None): if not message: message = "Downloading {!r}".format(os.path.basename(destination)) valid_length = total_length and total_length > 0 if valid_length and is_dumb_terminal(): widgets = [message, " ", Percentage()] maxval = total_length elif valid_length and not is_dumb_terminal(): widgets = [message, Bar(marker="=", left="[", right="]"), " ", Percentage()] maxval = total_length elif not valid_length and is_dumb_terminal(): widgets = [message] maxval = UnknownLength else: widgets = [message, AnimatedMarker()] maxval = UnknownLength return ProgressBar(widgets=widgets, maxval=maxval) def download_requests_stream(request_stream, destination, message=None, total_read=0): """This is a facility to download a request with nice progress bars.""" # Doing len(request_stream.content) may defeat the purpose of a # progress bar total_length = 0 if not request_stream.headers.get("Content-Encoding", ""): total_length = int(request_stream.headers.get("Content-Length", "0")) # Content-Length in the case of resuming will be # Content-Length - total_read so we add back up to have the feel of # resuming if os.path.exists(destination): total_length += total_read progress_bar = _init_progress_bar(total_length, destination, message) progress_bar.start() if os.path.exists(destination): mode = "ab" else: mode = "wb" with open(destination, mode) as destination_file: for buf in request_stream.iter_content(1024): destination_file.write(buf) if not is_dumb_terminal(): total_read += len(buf) progress_bar.update(total_read) progress_bar.finish() class UrllibDownloader(object): """This is a facility to download an uri with nice progress bars.""" def __init__(self, uri, destination, message=None): self.uri = uri self.destination = destination self.message = message self.progress_bar = None def download(self): urlretrieve(self.uri, self.destination, self._progress_callback) if self.progress_bar: self.progress_bar.finish() def _progress_callback(self, block_num, block_size, total_length): if not self.progress_bar: self.progress_bar = _init_progress_bar( total_length, self.destination, self.message ) self.progress_bar.start() total_read = block_num * block_size self.progress_bar.update( min(total_read, total_length) if total_length > 0 else total_read ) def download_urllib_source(uri, destination, message=None): UrllibDownloader(uri, destination, message).download() def is_dumb_terminal(): """Return True if on a dumb terminal.""" is_stdout_tty = os.isatty(1) is_term_dumb = os.environ.get("TERM", "") == "dumb" return not is_stdout_tty or is_term_dumb
unknown
codeparrot/codeparrot-clean
from MenuList import MenuList from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT from Tools.LoadPixmap import LoadPixmap selectionpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/selectioncross.png")) def SelectionEntryComponent(description, value, index, selected): res = [ (description, value, index, selected), (eListboxPythonMultiContent.TYPE_TEXT, 30, 3, 500, 30, 0, RT_HALIGN_LEFT, description) ] if selected: res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 0, 0, 30, 30, selectionpng)) return res class SelectionList(MenuList): def __init__(self, list = None, enableWrapAround = False): MenuList.__init__(self, list or [], enableWrapAround, content = eListboxPythonMultiContent) self.l.setFont(0, gFont("Regular", 20)) self.l.setItemHeight(30) def addSelection(self, description, value, index, selected = True): self.list.append(SelectionEntryComponent(description, value, index, selected)) self.setList(self.list) def toggleSelection(self): idx = self.getSelectedIndex() item = self.list[idx][0] self.list[idx] = SelectionEntryComponent(item[0], item[1], item[2], not item[3]) self.setList(self.list) def getSelectionsList(self): return [ (item[0][0], item[0][1], item[0][2]) for item in self.list if item[0][3] ]
unknown
codeparrot/codeparrot-clean
""" Improved support for Microsoft Visual C++ compilers. Known supported compilers: -------------------------- Microsoft Visual C++ 9.0: Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64); Microsoft Windows SDK 7.0 (x86, x64, ia64); Microsoft Windows SDK 6.1 (x86, x64, ia64) Microsoft Visual C++ 10.0: Microsoft Windows SDK 7.1 (x86, x64, ia64) Microsoft Visual C++ 14.0: Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) """ import os import sys import platform import itertools import distutils.errors from packaging.version import LegacyVersion from six.moves import filterfalse from .monkey import get_unpatched if platform.system() == 'Windows': from six.moves import winreg safe_env = os.environ else: """ Mock winreg and environ so the module can be imported on this platform. """ class winreg: HKEY_USERS = None HKEY_CURRENT_USER = None HKEY_LOCAL_MACHINE = None HKEY_CLASSES_ROOT = None safe_env = dict() try: from distutils.msvc9compiler import Reg except ImportError: pass def msvc9_find_vcvarsall(version): """ Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone compiler build for Python (VCForPython). Fall back to original behavior when the standalone compiler is not available. Redirect the path of "vcvarsall.bat". Known supported compilers ------------------------- Microsoft Visual C++ 9.0: Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) Parameters ---------- version: float Required Microsoft Visual C++ version. Return ------ vcvarsall.bat path: str """ VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' key = VC_BASE % ('', version) try: # Per-user installs register the compiler path here productdir = Reg.get_value(key, "installdir") except KeyError: try: # All-user installs on a 64-bit system register here key = VC_BASE % ('Wow6432Node\\', version) productdir = Reg.get_value(key, "installdir") except KeyError: productdir = None if productdir: vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat") if os.path.isfile(vcvarsall): return vcvarsall return get_unpatched(msvc9_find_vcvarsall)(version) def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): """ Patched "distutils.msvc9compiler.query_vcvarsall" for support standalones compilers. Set environment without use of "vcvarsall.bat". Known supported compilers ------------------------- Microsoft Visual C++ 9.0: Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64); Microsoft Windows SDK 7.0 (x86, x64, ia64); Microsoft Windows SDK 6.1 (x86, x64, ia64) Microsoft Visual C++ 10.0: Microsoft Windows SDK 7.1 (x86, x64, ia64) Parameters ---------- ver: float Required Microsoft Visual C++ version. arch: str Target architecture. Return ------ environment: dict """ # Try to get environement from vcvarsall.bat (Classical way) try: orig = get_unpatched(msvc9_query_vcvarsall) return orig(ver, arch, *args, **kwargs) except distutils.errors.DistutilsPlatformError: # Pass error if Vcvarsall.bat is missing pass except ValueError: # Pass error if environment not set after executing vcvarsall.bat pass # If error, try to set environment directly try: return EnvironmentInfo(arch, ver).return_env() except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, ver, arch) raise def msvc14_get_vc_env(plat_spec): """ Patched "distutils._msvccompiler._get_vc_env" for support standalones compilers. Set environment without use of "vcvarsall.bat". Known supported compilers ------------------------- Microsoft Visual C++ 14.0: Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) Parameters ---------- plat_spec: str Target architecture. Return ------ environment: dict """ # Try to get environment from vcvarsall.bat (Classical way) try: return get_unpatched(msvc14_get_vc_env)(plat_spec) except distutils.errors.DistutilsPlatformError: # Pass error Vcvarsall.bat is missing pass # If error, try to set environment directly try: return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, 14.0) raise def msvc14_gen_lib_options(*args, **kwargs): """ Patched "distutils._msvccompiler.gen_lib_options" for fix compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2) """ if "numpy.distutils" in sys.modules: import numpy as np if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) def _augment_exception(exc, version, arch=''): """ Add details to the exception message to help guide the user as to what action will resolve it. """ # Error if MSVC++ directory not found or environment not set message = exc.args[0] if "vcvarsall" in message.lower() or "visual c" in message.lower(): # Special error message if MSVC++ not installed tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' message = tmpl.format(**locals()) msdownload = 'www.microsoft.com/download/details.aspx?id=%d' if version == 9.0: if arch.lower().find('ia64') > -1: # For VC++ 9.0, if IA64 support is needed, redirect user # to Windows SDK 7.0 message += ' Get it with "Microsoft Windows SDK 7.0": ' message += msdownload % 3138 else: # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : # This redirection link is maintained by Microsoft. # Contact vspython@microsoft.com if it needs updating. message += ' Get it from http://aka.ms/vcpython27' elif version == 10.0: # For VC++ 10.0 Redirect user to Windows SDK 7.1 message += ' Get it with "Microsoft Windows SDK 7.1": ' message += msdownload % 8279 elif version >= 14.0: # For VC++ 14.0 Redirect user to Visual C++ Build Tools message += (' Get it with "Microsoft Visual C++ Build Tools": ' r'http://landinghub.visualstudio.com/' 'visual-cpp-build-tools') exc.args = (message, ) class PlatformInfo: """ Current and Target Architectures informations. Parameters ---------- arch: str Target architecture. """ current_cpu = safe_env.get('processor_architecture', '').lower() def __init__(self, arch): self.arch = arch.lower().replace('x64', 'amd64') @property def target_cpu(self): return self.arch[self.arch.find('_') + 1:] def target_is_x86(self): return self.target_cpu == 'x86' def current_is_x86(self): return self.current_cpu == 'x86' def current_dir(self, hidex86=False, x64=False): """ Current platform specific subfolder. Parameters ---------- hidex86: bool return '' and not '\x86' if architecture is x86. x64: bool return '\x64' and not '\amd64' if architecture is amd64. Return ------ subfolder: str '\target', or '' (see hidex86 parameter) """ return ( '' if (self.current_cpu == 'x86' and hidex86) else r'\x64' if (self.current_cpu == 'amd64' and x64) else r'\%s' % self.current_cpu ) def target_dir(self, hidex86=False, x64=False): r""" Target platform specific subfolder. Parameters ---------- hidex86: bool return '' and not '\x86' if architecture is x86. x64: bool return '\x64' and not '\amd64' if architecture is amd64. Return ------ subfolder: str '\current', or '' (see hidex86 parameter) """ return ( '' if (self.target_cpu == 'x86' and hidex86) else r'\x64' if (self.target_cpu == 'amd64' and x64) else r'\%s' % self.target_cpu ) def cross_dir(self, forcex86=False): r""" Cross platform specific subfolder. Parameters ---------- forcex86: bool Use 'x86' as current architecture even if current acritecture is not x86. Return ------ subfolder: str '' if target architecture is current architecture, '\current_target' if not. """ current = 'x86' if forcex86 else self.current_cpu return ( '' if self.target_cpu == current else self.target_dir().replace('\\', '\\%s_' % current) ) class RegistryInfo: """ Microsoft Visual Studio related registry informations. Parameters ---------- platform_info: PlatformInfo "PlatformInfo" instance. """ HKEYS = (winreg.HKEY_USERS, winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CLASSES_ROOT) def __init__(self, platform_info): self.pi = platform_info @property def visualstudio(self): """ Microsoft Visual Studio root registry key. """ return 'VisualStudio' @property def sxs(self): """ Microsoft Visual Studio SxS registry key. """ return os.path.join(self.visualstudio, 'SxS') @property def vc(self): """ Microsoft Visual C++ VC7 registry key. """ return os.path.join(self.sxs, 'VC7') @property def vs(self): """ Microsoft Visual Studio VS7 registry key. """ return os.path.join(self.sxs, 'VS7') @property def vc_for_python(self): """ Microsoft Visual C++ for Python registry key. """ return r'DevDiv\VCForPython' @property def microsoft_sdk(self): """ Microsoft SDK registry key. """ return 'Microsoft SDKs' @property def windows_sdk(self): """ Microsoft Windows/Platform SDK registry key. """ return os.path.join(self.microsoft_sdk, 'Windows') @property def netfx_sdk(self): """ Microsoft .NET Framework SDK registry key. """ return os.path.join(self.microsoft_sdk, 'NETFXSDK') @property def windows_kits_roots(self): """ Microsoft Windows Kits Roots registry key. """ return r'Windows Kits\Installed Roots' def microsoft(self, key, x86=False): """ Return key in Microsoft software registry. Parameters ---------- key: str Registry key path where look. x86: str Force x86 software registry. Return ------ str: value """ node64 = '' if self.pi.current_is_x86() or x86 else r'\Wow6432Node' return os.path.join('Software', node64, 'Microsoft', key) def lookup(self, key, name): """ Look for values in registry in Microsoft software registry. Parameters ---------- key: str Registry key path where look. name: str Value name to find. Return ------ str: value """ KEY_READ = winreg.KEY_READ openkey = winreg.OpenKey ms = self.microsoft for hkey in self.HKEYS: try: bkey = openkey(hkey, ms(key), 0, KEY_READ) except (OSError, IOError): if not self.pi.current_is_x86(): try: bkey = openkey(hkey, ms(key, True), 0, KEY_READ) except (OSError, IOError): continue else: continue try: return winreg.QueryValueEx(bkey, name)[0] except (OSError, IOError): pass class SystemInfo: """ Microsoft Windows and Visual Studio related system inormations. Parameters ---------- registry_info: RegistryInfo "RegistryInfo" instance. vc_ver: float Required Microsoft Visual C++ version. """ # Variables and properties in this class use originals CamelCase variables # names from Microsoft source files for more easy comparaison. WinDir = safe_env.get('WinDir', '') ProgramFiles = safe_env.get('ProgramFiles', '') ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles) def __init__(self, registry_info, vc_ver=None): self.ri = registry_info self.pi = self.ri.pi if vc_ver: self.vc_ver = vc_ver else: try: self.vc_ver = self.find_available_vc_vers()[-1] except IndexError: err = 'No Microsoft Visual C++ version found' raise distutils.errors.DistutilsPlatformError(err) def find_available_vc_vers(self): """ Find all available Microsoft Visual C++ versions. """ vckeys = (self.ri.vc, self.ri.vc_for_python) vc_vers = [] for hkey in self.ri.HKEYS: for key in vckeys: try: bkey = winreg.OpenKey(hkey, key, 0, winreg.KEY_READ) except (OSError, IOError): continue subkeys, values, _ = winreg.QueryInfoKey(bkey) for i in range(values): try: ver = float(winreg.EnumValue(bkey, i)[0]) if ver not in vc_vers: vc_vers.append(ver) except ValueError: pass for i in range(subkeys): try: ver = float(winreg.EnumKey(bkey, i)) if ver not in vc_vers: vc_vers.append(ver) except ValueError: pass return sorted(vc_vers) @property def VSInstallDir(self): """ Microsoft Visual Studio directory. """ # Default path name = 'Microsoft Visual Studio %0.1f' % self.vc_ver default = os.path.join(self.ProgramFilesx86, name) # Try to get path from registry, if fail use default path return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default @property def VCInstallDir(self): """ Microsoft Visual C++ directory. """ # Default path default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver guess_vc = os.path.join(self.ProgramFilesx86, default) # Try to get "VC++ for Python" path from registry as default path reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) python_vc = self.ri.lookup(reg_path, 'installdir') default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc # Try to get path from registry, if fail use default path path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc if not os.path.isdir(path): msg = 'Microsoft Visual C++ directory not found' raise distutils.errors.DistutilsPlatformError(msg) return path @property def WindowsSdkVersion(self): """ Microsoft Windows SDK versions. """ # Set Windows SDK versions for specified MSVC++ version if self.vc_ver <= 9.0: return ('7.0', '6.1', '6.0a') elif self.vc_ver == 10.0: return ('7.1', '7.0a') elif self.vc_ver == 11.0: return ('8.0', '8.0a') elif self.vc_ver == 12.0: return ('8.1', '8.1a') elif self.vc_ver >= 14.0: return ('10.0', '8.1') @property def WindowsSdkDir(self): """ Microsoft Windows SDK directory. """ sdkdir = '' for ver in self.WindowsSdkVersion: # Try to get it from registry loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver) sdkdir = self.ri.lookup(loc, 'installationfolder') if sdkdir: break if not sdkdir or not os.path.isdir(sdkdir): # Try to get "VC++ for Python" version from registry path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) install_base = self.ri.lookup(path, 'installdir') if install_base: sdkdir = os.path.join(install_base, 'WinSDK') if not sdkdir or not os.path.isdir(sdkdir): # If fail, use default new path for ver in self.WindowsSdkVersion: intver = ver[:ver.rfind('.')] path = r'Microsoft SDKs\Windows Kits\%s' % (intver) d = os.path.join(self.ProgramFiles, path) if os.path.isdir(d): sdkdir = d if not sdkdir or not os.path.isdir(sdkdir): # If fail, use default old path for ver in self.WindowsSdkVersion: path = r'Microsoft SDKs\Windows\v%s' % ver d = os.path.join(self.ProgramFiles, path) if os.path.isdir(d): sdkdir = d if not sdkdir: # If fail, use Platform SDK sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK') return sdkdir @property def WindowsSDKExecutablePath(self): """ Microsoft Windows SDK executable directory. """ # Find WinSDK NetFx Tools registry dir name if self.vc_ver <= 11.0: netfxver = 35 arch = '' else: netfxver = 40 hidex86 = True if self.vc_ver <= 12.0 else False arch = self.pi.current_dir(x64=True, hidex86=hidex86) fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) # liste all possibles registry paths regpaths = [] if self.vc_ver >= 14.0: for ver in self.NetFxSdkVersion: regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)] for ver in self.WindowsSdkVersion: regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)] # Return installation folder from the more recent path for path in regpaths: execpath = self.ri.lookup(path, 'installationfolder') if execpath: break return execpath @property def FSharpInstallDir(self): """ Microsoft Visual F# directory. """ path = r'%0.1f\Setup\F#' % self.vc_ver path = os.path.join(self.ri.visualstudio, path) return self.ri.lookup(path, 'productdir') or '' @property def UniversalCRTSdkDir(self): """ Microsoft Universal CRT SDK directory. """ # Set Kit Roots versions for specified MSVC++ version if self.vc_ver >= 14.0: vers = ('10', '81') else: vers = () # Find path of the more recent Kit for ver in vers: sdkdir = self.ri.lookup(self.ri.windows_kits_roots, 'kitsroot%s' % ver) if sdkdir: break return sdkdir or '' @property def NetFxSdkVersion(self): """ Microsoft .NET Framework SDK versions. """ # Set FxSdk versions for specified MSVC++ version if self.vc_ver >= 14.0: return ('4.6.1', '4.6') else: return () @property def NetFxSdkDir(self): """ Microsoft .NET Framework SDK directory. """ for ver in self.NetFxSdkVersion: loc = os.path.join(self.ri.netfx_sdk, ver) sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') if sdkdir: break return sdkdir or '' @property def FrameworkDir32(self): """ Microsoft .NET Framework 32bit directory. """ # Default path guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework') # Try to get path from registry, if fail use default path return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw @property def FrameworkDir64(self): """ Microsoft .NET Framework 64bit directory. """ # Default path guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64') # Try to get path from registry, if fail use default path return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw @property def FrameworkVersion32(self): """ Microsoft .NET Framework 32bit versions. """ return self._find_dot_net_versions(32) @property def FrameworkVersion64(self): """ Microsoft .NET Framework 64bit versions. """ return self._find_dot_net_versions(64) def _find_dot_net_versions(self, bits=32): """ Find Microsoft .NET Framework versions. Parameters ---------- bits: int Platform number of bits: 32 or 64. """ # Find actual .NET version ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) or '' # Set .NET versions for specified MSVC++ version if self.vc_ver >= 12.0: frameworkver = (ver, 'v4.0') elif self.vc_ver >= 10.0: frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5') elif self.vc_ver == 9.0: frameworkver = ('v3.5', 'v2.0.50727') if self.vc_ver == 8.0: frameworkver = ('v3.0', 'v2.0.50727') return frameworkver class EnvironmentInfo: """ Return environment variables for specified Microsoft Visual C++ version and platform : Lib, Include, Path and libpath. This function is compatible with Microsoft Visual C++ 9.0 to 14.0. Script created by analysing Microsoft environment configuration files like "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... Parameters ---------- arch: str Target architecture. vc_ver: float Required Microsoft Visual C++ version. If not set, autodetect the last version. vc_min_ver: float Minimum Microsoft Visual C++ version. """ # Variables and properties in this class use originals CamelCase variables # names from Microsoft source files for more easy comparaison. def __init__(self, arch, vc_ver=None, vc_min_ver=None): self.pi = PlatformInfo(arch) self.ri = RegistryInfo(self.pi) self.si = SystemInfo(self.ri, vc_ver) if vc_min_ver: if self.vc_ver < vc_min_ver: err = 'No suitable Microsoft Visual C++ version found' raise distutils.errors.DistutilsPlatformError(err) @property def vc_ver(self): """ Microsoft Visual C++ version. """ return self.si.vc_ver @property def VSTools(self): """ Microsoft Visual Studio Tools """ paths = [r'Common7\IDE', r'Common7\Tools'] if self.vc_ver >= 14.0: arch_subdir = self.pi.current_dir(hidex86=True, x64=True) paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] paths += [r'Team Tools\Performance Tools'] paths += [r'Team Tools\Performance Tools%s' % arch_subdir] return [os.path.join(self.si.VSInstallDir, path) for path in paths] @property def VCIncludes(self): """ Microsoft Visual C++ & Microsoft Foundation Class Includes """ return [os.path.join(self.si.VCInstallDir, 'Include'), os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')] @property def VCLibraries(self): """ Microsoft Visual C++ & Microsoft Foundation Class Libraries """ arch_subdir = self.pi.target_dir(hidex86=True) paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] if self.vc_ver >= 14.0: paths += [r'Lib\store%s' % arch_subdir] return [os.path.join(self.si.VCInstallDir, path) for path in paths] @property def VCStoreRefs(self): """ Microsoft Visual C++ store references Libraries """ if self.vc_ver < 14.0: return [] return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')] @property def VCTools(self): """ Microsoft Visual C++ Tools """ si = self.si tools = [os.path.join(si.VCInstallDir, 'VCPackages')] forcex86 = True if self.vc_ver <= 10.0 else False arch_subdir = self.pi.cross_dir(forcex86) if arch_subdir: tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)] if self.vc_ver >= 14.0: path = 'Bin%s' % self.pi.current_dir(hidex86=True) tools += [os.path.join(si.VCInstallDir, path)] else: tools += [os.path.join(si.VCInstallDir, 'Bin')] return tools @property def OSLibraries(self): """ Microsoft Windows SDK Libraries """ if self.vc_ver <= 10.0: arch_subdir = self.pi.target_dir(hidex86=True, x64=True) return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] else: arch_subdir = self.pi.target_dir(x64=True) lib = os.path.join(self.si.WindowsSdkDir, 'lib') libver = self._get_content_dirname(lib) return [os.path.join(lib, '%sum%s' % (libver, arch_subdir))] @property def OSIncludes(self): """ Microsoft Windows SDK Include """ include = os.path.join(self.si.WindowsSdkDir, 'include') if self.vc_ver <= 10.0: return [include, os.path.join(include, 'gl')] else: if self.vc_ver >= 14.0: sdkver = self._get_content_dirname(include) else: sdkver = '' return [os.path.join(include, '%sshared' % sdkver), os.path.join(include, '%sum' % sdkver), os.path.join(include, '%swinrt' % sdkver)] @property def OSLibpath(self): """ Microsoft Windows SDK Libraries Paths """ ref = os.path.join(self.si.WindowsSdkDir, 'References') libpath = [] if self.vc_ver <= 9.0: libpath += self.OSLibraries if self.vc_ver >= 11.0: libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')] if self.vc_ver >= 14.0: libpath += [ ref, os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), os.path.join( ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0', ), os.path.join( ref, 'Windows.Foundation.FoundationContract', '1.0.0.0', ), os.path.join( ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0', ), os.path.join( self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', '%0.1f' % self.vc_ver, 'References', 'CommonConfiguration', 'neutral', ), ] return libpath @property def SdkTools(self): """ Microsoft Windows SDK Tools """ bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86' tools = [os.path.join(self.si.WindowsSdkDir, bin_dir)] if not self.pi.current_is_x86(): arch_subdir = self.pi.current_dir(x64=True) path = 'Bin%s' % arch_subdir tools += [os.path.join(self.si.WindowsSdkDir, path)] if self.vc_ver == 10.0 or self.vc_ver == 11.0: if self.pi.target_is_x86(): arch_subdir = '' else: arch_subdir = self.pi.current_dir(hidex86=True, x64=True) path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir tools += [os.path.join(self.si.WindowsSdkDir, path)] if self.si.WindowsSDKExecutablePath: tools += [self.si.WindowsSDKExecutablePath] return tools @property def SdkSetup(self): """ Microsoft Windows SDK Setup """ if self.vc_ver > 9.0: return [] return [os.path.join(self.si.WindowsSdkDir, 'Setup')] @property def FxTools(self): """ Microsoft .NET Framework Tools """ pi = self.pi si = self.si if self.vc_ver <= 10.0: include32 = True include64 = not pi.target_is_x86() and not pi.current_is_x86() else: include32 = pi.target_is_x86() or pi.current_is_x86() include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' tools = [] if include32: tools += [os.path.join(si.FrameworkDir32, ver) for ver in si.FrameworkVersion32] if include64: tools += [os.path.join(si.FrameworkDir64, ver) for ver in si.FrameworkVersion64] return tools @property def NetFxSDKLibraries(self): """ Microsoft .Net Framework SDK Libraries """ if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: return [] arch_subdir = self.pi.target_dir(x64=True) return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] @property def NetFxSDKIncludes(self): """ Microsoft .Net Framework SDK Includes """ if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: return [] return [os.path.join(self.si.NetFxSdkDir, r'include\um')] @property def VsTDb(self): """ Microsoft Visual Studio Team System Database """ return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')] @property def MSBuild(self): """ Microsoft Build Engine """ if self.vc_ver < 12.0: return [] arch_subdir = self.pi.current_dir(hidex86=True) path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir) return [os.path.join(self.si.ProgramFilesx86, path)] @property def HTMLHelpWorkshop(self): """ Microsoft HTML Help Workshop """ if self.vc_ver < 11.0: return [] return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] @property def UCRTLibraries(self): """ Microsoft Universal CRT Libraries """ if self.vc_ver < 14.0: return [] arch_subdir = self.pi.target_dir(x64=True) lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib') ucrtver = self._get_content_dirname(lib) return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] @property def UCRTIncludes(self): """ Microsoft Universal CRT Include """ if self.vc_ver < 14.0: return [] include = os.path.join(self.si.UniversalCRTSdkDir, 'include') ucrtver = self._get_content_dirname(include) return [os.path.join(include, '%sucrt' % ucrtver)] @property def FSharp(self): """ Microsoft Visual F# """ if self.vc_ver < 11.0 and self.vc_ver > 12.0: return [] return self.si.FSharpInstallDir @property def VCRuntimeRedist(self): """ Microsoft Visual C++ runtime redistribuable dll """ arch_subdir = self.pi.target_dir(x64=True) vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' vcruntime = vcruntime % (arch_subdir, self.vc_ver, self.vc_ver) return os.path.join(self.si.VCInstallDir, vcruntime) def return_env(self, exists=True): """ Return environment dict. Parameters ---------- exists: bool It True, only return existing paths. """ env = dict( include=self._build_paths('include', [self.VCIncludes, self.OSIncludes, self.UCRTIncludes, self.NetFxSDKIncludes], exists), lib=self._build_paths('lib', [self.VCLibraries, self.OSLibraries, self.FxTools, self.UCRTLibraries, self.NetFxSDKLibraries], exists), libpath=self._build_paths('libpath', [self.VCLibraries, self.FxTools, self.VCStoreRefs, self.OSLibpath], exists), path=self._build_paths('path', [self.VCTools, self.VSTools, self.VsTDb, self.SdkTools, self.SdkSetup, self.FxTools, self.MSBuild, self.HTMLHelpWorkshop, self.FSharp], exists), ) if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist): env['py_vcruntime_redist'] = self.VCRuntimeRedist return env def _build_paths(self, name, spec_path_lists, exists): """ Given an environment variable name and specified paths, return a pathsep-separated string of paths containing unique, extant, directories from those paths and from the environment variable. Raise an error if no paths are resolved. """ # flatten spec_path_lists spec_paths = itertools.chain.from_iterable(spec_path_lists) env_paths = safe_env.get(name, '').split(os.pathsep) paths = itertools.chain(spec_paths, env_paths) extant_paths = list(filter(os.path.isdir, paths)) if exists else paths if not extant_paths: msg = "%s environment variable is empty" % name.upper() raise distutils.errors.DistutilsPlatformError(msg) unique_paths = self._unique_everseen(extant_paths) return os.pathsep.join(unique_paths) # from Python docs def _unique_everseen(self, iterable, key=None): """ List unique elements, preserving order. Remember all elements ever seen. _unique_everseen('AAAABBBCCDAABBB') --> A B C D _unique_everseen('ABBCcAD', str.lower) --> A B C D """ seen = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element def _get_content_dirname(self, path): """ Return name of the first dir in path or '' if no dir found. Parameters ---------- path: str Path where search dir. Return ------ foldername: str "name\" or "" """ try: name = os.listdir(path) if name: return '%s\\' % name[0] return '' except (OSError, IOError): return ''
unknown
codeparrot/codeparrot-clean
//// [tests/cases/conformance/async/es6/awaitCallExpression/awaitCallExpression1_es6.ts] //// //// [awaitCallExpression1_es6.ts] declare var a: boolean; declare var p: Promise<boolean>; declare function fn(arg0: boolean, arg1: boolean, arg2: boolean): void; declare var o: { fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }; declare var pfn: Promise<{ (arg0: boolean, arg1: boolean, arg2: boolean): void; }>; declare var po: Promise<{ fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }>; declare function before(): void; declare function after(): void; async function func(): Promise<void> { before(); var b = fn(a, a, a); after(); } //// [awaitCallExpression1_es6.js] "use strict"; function func() { return __awaiter(this, void 0, void 0, function* () { before(); var b = fn(a, a, a); after(); }); }
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/awaitCallExpression1_es6.js
import glob import pandas as pd import numpy as np pd.set_option('display.max_columns', 50) # print all rows import os os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files") cw154 = glob.glob("binary_position_RRBS_cw154*") trito = glob.glob("binary_position_RRBS_trito_pool*") print(len(cw154)) print(len(trito)) totalfiles = cw154 + trito print(len(totalfiles)) df_list = [] for file in totalfiles: df = pd.read_csv(file) df = df.drop("Unnamed: 0", axis=1) df["chromosome"] = df["position"].map(lambda x: str(x)[:5]) df = df[df["chromosome"] == "chr11"] df = df.drop("chromosome", axis=1) df_list.append(df) print(len(df_list)) total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object) total_matrix = total_matrix.drop("index", axis=1) len(total_matrix.columns) total_matrix.columns = ["RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC", "RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG", "RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC", "RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG", "RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG", "RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC", "RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG", "RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG", "RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC", "RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC", "RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG", "RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG", "RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG", "RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC", "RRBS_cw154_Tris_protease_CTCTCTAC.GACACG", "RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC", "RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC", "RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC", "RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG", "RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG", "RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG", "RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC", "RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG", "RBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC", "RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG", "RRBS_trito_pool_1_TAAGGCGA.ACAACC", "RRBS_trito_pool_1_TAAGGCGA.ACGTGG", "RRBS_trito_pool_1_TAAGGCGA.ACTCAC", "RRBS_trito_pool_1_TAAGGCGA.ATAGCG", "RRBS_trito_pool_1_TAAGGCGA.ATCGAC", "RRBS_trito_pool_1_TAAGGCGA.CAAGAG", "RRBS_trito_pool_1_TAAGGCGA.CATGAC", "RRBS_trito_pool_1_TAAGGCGA.CCTTCG", "RRBS_trito_pool_1_TAAGGCGA.CGGTAG", "RRBS_trito_pool_1_TAAGGCGA.CTATTG", "RRBS_trito_pool_1_TAAGGCGA.GACACG", "RRBS_trito_pool_1_TAAGGCGA.GCATTC", "RRBS_trito_pool_1_TAAGGCGA.GCTGCC", "RRBS_trito_pool_1_TAAGGCGA.GGCATC", "RRBS_trito_pool_1_TAAGGCGA.GTGAGG", "RRBS_trito_pool_1_TAAGGCGA.GTTGAG", "RRBS_trito_pool_1_TAAGGCGA.TAGCGG", "RRBS_trito_pool_1_TAAGGCGA.TATCTC", "RRBS_trito_pool_1_TAAGGCGA.TCTCTG", "RRBS_trito_pool_1_TAAGGCGA.TGACAG", "RRBS_trito_pool_1_TAAGGCGA.TGCTGC", "RRBS_trito_pool_2_CGTACTAG.ACAACC", "RRBS_trito_pool_2_CGTACTAG.ACGTGG", "RRBS_trito_pool_2_CGTACTAG.ACTCAC", "RRBS_trito_pool_2_CGTACTAG.AGGATG", "RRBS_trito_pool_2_CGTACTAG.ATAGCG", "RRBS_trito_pool_2_CGTACTAG.ATCGAC", "RRBS_trito_pool_2_CGTACTAG.CAAGAG", "RRBS_trito_pool_2_CGTACTAG.CATGAC", "RRBS_trito_pool_2_CGTACTAG.CCTTCG", "RRBS_trito_pool_2_CGTACTAG.CGGTAG", "RRBS_trito_pool_2_CGTACTAG.CTATTG", "RRBS_trito_pool_2_CGTACTAG.GACACG", "RRBS_trito_pool_2_CGTACTAG.GCATTC", "RRBS_trito_pool_2_CGTACTAG.GCTGCC", "RRBS_trito_pool_2_CGTACTAG.GGCATC", "RRBS_trito_pool_2_CGTACTAG.GTGAGG", "RRBS_trito_pool_2_CGTACTAG.GTTGAG", "RRBS_trito_pool_2_CGTACTAG.TAGCGG", "RRBS_trito_pool_2_CGTACTAG.TATCTC", "RRBS_trito_pool_2_CGTACTAG.TCTCTG", "RRBS_trito_pool_2_CGTACTAG.TGACAG"] print(total_matrix.shape) total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?")) total_matrix = total_matrix.astype(str).apply(''.join) tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' ')) os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests") tott.to_csv("total_CLL_chrom11.phy", header=None, index=None) print(tott.shape)
unknown
codeparrot/codeparrot-clean
--- title: StaticRouterProvider --- # StaticRouterProvider <!-- ⚠️ ⚠️ IMPORTANT ⚠️ ⚠️ Thank you for helping improve our documentation! This file is auto-generated from the JSDoc comments in the source code, so please edit the JSDoc comments in the file below and this file will be re-generated once those changes are merged. https://github.com/remix-run/react-router/blob/main/packages/react-router/lib/dom/server.tsx --> [MODES: data] ## Summary [Reference Documentation ↗](https://api.reactrouter.com/v7/functions/react-router.StaticRouterProvider.html) A [`DataRouter`](https://api.reactrouter.com/v7/interfaces/react-router.DataRouter.html) that may not navigate to any other [`Location`](https://api.reactrouter.com/v7/interfaces/react-router.Location.html). This is useful on the server where there is no stateful UI. ```tsx export async function handleRequest(request: Request) { let { query, dataRoutes } = createStaticHandler(routes); let context = await query(request)); if (context instanceof Response) { return context; } let router = createStaticRouter(dataRoutes, context); return new Response( ReactDOMServer.renderToString(<StaticRouterProvider ... />), { headers: { "Content-Type": "text/html" } } ); } ``` ## Signature ```tsx function StaticRouterProvider({ context, router, hydrate = true, nonce, }: StaticRouterProviderProps) ``` ## Props ### context The [`StaticHandlerContext`](https://api.reactrouter.com/v7/interfaces/react-router.StaticHandlerContext.html) returned from [`StaticHandler`](https://api.reactrouter.com/v7/interfaces/react-router.StaticHandler.html)'s `query` ### hydrate Whether to hydrate the router on the client (default `true`) ### nonce The [`nonce`](https://developer.mozilla.org/en-US/docs/Web/HTML/Reference/Global_attributes/nonce) to use for the hydration [`<script>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/script) tag ### router The static [`DataRouter`](https://api.reactrouter.com/v7/interfaces/react-router.DataRouter.html) from [`createStaticRouter`](../data-routers/createStaticRouter)
unknown
github
https://github.com/remix-run/react-router
docs/api/data-routers/StaticRouterProvider.md
import unittest from test import test_support, test_genericpath import posixpath, os from posixpath import realpath, abspath, dirname, basename # An absolute path to a temporary filename for testing. We can't rely on TESTFN # being an absolute path, so we need this. ABSTFN = abspath(test_support.TESTFN) def skip_if_ABSTFN_contains_backslash(test): """ On Windows, posixpath.abspath still returns paths with backslashes instead of posix forward slashes. If this is the case, several tests fail, so skip them. """ found_backslash = '\\' in ABSTFN msg = "ABSTFN is not a posix path - tests fail" return [test, unittest.skip(msg)(test)][found_backslash] def safe_rmdir(dirname): try: os.rmdir(dirname) except OSError: pass class PosixPathTest(unittest.TestCase): def setUp(self): self.tearDown() def tearDown(self): for suffix in ["", "1", "2"]: test_support.unlink(test_support.TESTFN + suffix) safe_rmdir(test_support.TESTFN + suffix) def test_join(self): self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz") self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz") self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/") def test_split(self): self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar")) self.assertEqual(posixpath.split("/"), ("/", "")) self.assertEqual(posixpath.split("foo"), ("", "foo")) self.assertEqual(posixpath.split("////foo"), ("////", "foo")) self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar")) def splitextTest(self, path, filename, ext): self.assertEqual(posixpath.splitext(path), (filename, ext)) self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext)) self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext)) self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", "")) def test_splitext(self): self.splitextTest("foo.bar", "foo", ".bar") self.splitextTest("foo.boo.bar", "foo.boo", ".bar") self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar") self.splitextTest(".csh.rc", ".csh", ".rc") self.splitextTest("nodots", "nodots", "") self.splitextTest(".cshrc", ".cshrc", "") self.splitextTest("...manydots", "...manydots", "") self.splitextTest("...manydots.ext", "...manydots", ".ext") self.splitextTest(".", ".", "") self.splitextTest("..", "..", "") self.splitextTest("........", "........", "") self.splitextTest("", "", "") def test_isabs(self): self.assertIs(posixpath.isabs(""), False) self.assertIs(posixpath.isabs("/"), True) self.assertIs(posixpath.isabs("/foo"), True) self.assertIs(posixpath.isabs("/foo/bar"), True) self.assertIs(posixpath.isabs("foo/bar"), False) def test_basename(self): self.assertEqual(posixpath.basename("/foo/bar"), "bar") self.assertEqual(posixpath.basename("/"), "") self.assertEqual(posixpath.basename("foo"), "foo") self.assertEqual(posixpath.basename("////foo"), "foo") self.assertEqual(posixpath.basename("//foo//bar"), "bar") def test_dirname(self): self.assertEqual(posixpath.dirname("/foo/bar"), "/foo") self.assertEqual(posixpath.dirname("/"), "/") self.assertEqual(posixpath.dirname("foo"), "") self.assertEqual(posixpath.dirname("////foo"), "////") self.assertEqual(posixpath.dirname("//foo//bar"), "//foo") def test_islink(self): self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False) f = open(test_support.TESTFN + "1", "wb") try: f.write("foo") f.close() self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False) if hasattr(os, "symlink"): os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2") self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True) os.remove(test_support.TESTFN + "1") self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True) self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False) self.assertIs(posixpath.lexists(test_support.TESTFN + "2"), True) finally: if not f.close(): f.close() def test_samefile(self): f = open(test_support.TESTFN + "1", "wb") try: f.write("foo") f.close() self.assertIs( posixpath.samefile( test_support.TESTFN + "1", test_support.TESTFN + "1" ), True ) # If we don't have links, assume that os.stat doesn't return # reasonable inode information and thus, that samefile() doesn't # work. if hasattr(os, "symlink"): os.symlink( test_support.TESTFN + "1", test_support.TESTFN + "2" ) self.assertIs( posixpath.samefile( test_support.TESTFN + "1", test_support.TESTFN + "2" ), True ) os.remove(test_support.TESTFN + "2") f = open(test_support.TESTFN + "2", "wb") f.write("bar") f.close() self.assertIs( posixpath.samefile( test_support.TESTFN + "1", test_support.TESTFN + "2" ), False ) finally: if not f.close(): f.close() def test_samestat(self): f = open(test_support.TESTFN + "1", "wb") try: f.write("foo") f.close() self.assertIs( posixpath.samestat( os.stat(test_support.TESTFN + "1"), os.stat(test_support.TESTFN + "1") ), True ) # If we don't have links, assume that os.stat() doesn't return # reasonable inode information and thus, that samestat() doesn't # work. if hasattr(os, "symlink"): os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2") self.assertIs( posixpath.samestat( os.stat(test_support.TESTFN + "1"), os.stat(test_support.TESTFN + "2") ), True ) os.remove(test_support.TESTFN + "2") f = open(test_support.TESTFN + "2", "wb") f.write("bar") f.close() self.assertIs( posixpath.samestat( os.stat(test_support.TESTFN + "1"), os.stat(test_support.TESTFN + "2") ), False ) finally: if not f.close(): f.close() def test_ismount(self): self.assertIs(posixpath.ismount("/"), True) def test_expanduser(self): self.assertEqual(posixpath.expanduser("foo"), "foo") try: import pwd except ImportError: pass else: self.assertIsInstance(posixpath.expanduser("~/"), basestring) # if home directory == root directory, this test makes no sense if posixpath.expanduser("~") != '/': self.assertEqual( posixpath.expanduser("~") + "/", posixpath.expanduser("~/") ) self.assertIsInstance(posixpath.expanduser("~root/"), basestring) self.assertIsInstance(posixpath.expanduser("~foo/"), basestring) with test_support.EnvironmentVarGuard() as env: env['HOME'] = '/' self.assertEqual(posixpath.expanduser("~"), "/") self.assertEqual(posixpath.expanduser("~/foo"), "/foo") def test_normpath(self): self.assertEqual(posixpath.normpath(""), ".") self.assertEqual(posixpath.normpath("/"), "/") self.assertEqual(posixpath.normpath("//"), "//") self.assertEqual(posixpath.normpath("///"), "/") self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar") self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz") self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar") @skip_if_ABSTFN_contains_backslash def test_realpath_curdir(self): self.assertEqual(realpath('.'), os.getcwd()) self.assertEqual(realpath('./.'), os.getcwd()) self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd()) @skip_if_ABSTFN_contains_backslash def test_realpath_pardir(self): self.assertEqual(realpath('..'), dirname(os.getcwd())) self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd()))) self.assertEqual(realpath('/'.join(['..'] * 100)), '/') if hasattr(os, "symlink"): def test_realpath_basic(self): # Basic operation. try: os.symlink(ABSTFN+"1", ABSTFN) self.assertEqual(realpath(ABSTFN), ABSTFN+"1") finally: test_support.unlink(ABSTFN) def test_realpath_symlink_loops(self): # Bug #930024, return the path unchanged if we get into an infinite # symlink loop. try: old_path = abspath('.') os.symlink(ABSTFN, ABSTFN) self.assertEqual(realpath(ABSTFN), ABSTFN) os.symlink(ABSTFN+"1", ABSTFN+"2") os.symlink(ABSTFN+"2", ABSTFN+"1") self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1") self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2") self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x") self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN)) self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x") os.symlink(ABSTFN+"x", ABSTFN+"y") self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"), ABSTFN + "y") self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"), ABSTFN + "1") os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a") self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b") os.symlink("../" + basename(dirname(ABSTFN)) + "/" + basename(ABSTFN) + "c", ABSTFN+"c") self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c") # Test using relative path as well. os.chdir(dirname(ABSTFN)) self.assertEqual(realpath(basename(ABSTFN)), ABSTFN) finally: os.chdir(old_path) test_support.unlink(ABSTFN) test_support.unlink(ABSTFN+"1") test_support.unlink(ABSTFN+"2") test_support.unlink(ABSTFN+"y") test_support.unlink(ABSTFN+"c") test_support.unlink(ABSTFN+"a") def test_realpath_repeated_indirect_symlinks(self): # Issue #6975. try: os.mkdir(ABSTFN) os.symlink('../' + basename(ABSTFN), ABSTFN + '/self') os.symlink('self/self/self', ABSTFN + '/link') self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN) finally: test_support.unlink(ABSTFN + '/self') test_support.unlink(ABSTFN + '/link') safe_rmdir(ABSTFN) def test_realpath_deep_recursion(self): depth = 10 old_path = abspath('.') try: os.mkdir(ABSTFN) for i in range(depth): os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1)) os.symlink('.', ABSTFN + '/0') self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN) # Test using relative path as well. os.chdir(ABSTFN) self.assertEqual(realpath('%d' % depth), ABSTFN) finally: os.chdir(old_path) for i in range(depth + 1): test_support.unlink(ABSTFN + '/%d' % i) safe_rmdir(ABSTFN) def test_realpath_resolve_parents(self): # We also need to resolve any symlinks in the parents of a relative # path passed to realpath. E.g.: current working directory is # /usr/doc with 'doc' being a symlink to /usr/share/doc. We call # realpath("a"). This should return /usr/share/doc/a/. try: old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/y") os.symlink(ABSTFN + "/y", ABSTFN + "/k") os.chdir(ABSTFN + "/k") self.assertEqual(realpath("a"), ABSTFN + "/y/a") finally: os.chdir(old_path) test_support.unlink(ABSTFN + "/k") safe_rmdir(ABSTFN + "/y") safe_rmdir(ABSTFN) def test_realpath_resolve_before_normalizing(self): # Bug #990669: Symbolic links should be resolved before we # normalize the path. E.g.: if we have directories 'a', 'k' and 'y' # in the following hierarchy: # a/k/y # # and a symbolic link 'link-y' pointing to 'y' in directory 'a', # then realpath("link-y/..") should return 'k', not 'a'. try: old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/k") os.mkdir(ABSTFN + "/k/y") os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y") # Absolute path. self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k") # Relative path. os.chdir(dirname(ABSTFN)) self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."), ABSTFN + "/k") finally: os.chdir(old_path) test_support.unlink(ABSTFN + "/link-y") safe_rmdir(ABSTFN + "/k/y") safe_rmdir(ABSTFN + "/k") safe_rmdir(ABSTFN) def test_realpath_resolve_first(self): # Bug #1213894: The first component of the path, if not absolute, # must be resolved too. try: old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/k") os.symlink(ABSTFN, ABSTFN + "link") os.chdir(dirname(ABSTFN)) base = basename(ABSTFN) self.assertEqual(realpath(base + "link"), ABSTFN) self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k") finally: os.chdir(old_path) test_support.unlink(ABSTFN + "link") safe_rmdir(ABSTFN + "/k") safe_rmdir(ABSTFN) def test_relpath(self): (real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar") try: curdir = os.path.split(os.getcwd())[-1] self.assertRaises(ValueError, posixpath.relpath, "") self.assertEqual(posixpath.relpath("a"), "a") self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a") self.assertEqual(posixpath.relpath("a/b"), "a/b") self.assertEqual(posixpath.relpath("../a/b"), "../a/b") self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a") self.assertEqual(posixpath.relpath("a/b", "../c"), "../"+curdir+"/a/b") self.assertEqual(posixpath.relpath("a", "b/c"), "../../a") self.assertEqual(posixpath.relpath("a", "a"), ".") self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat') self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat') self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat') self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..') self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat') self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x') self.assertEqual(posixpath.relpath("/", "/"), '.') self.assertEqual(posixpath.relpath("/a", "/a"), '.') self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.') finally: os.getcwd = real_getcwd class PosixCommonTest(test_genericpath.CommonTest): pathmodule = posixpath attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat'] def test_main(): test_support.run_unittest(PosixPathTest, PosixCommonTest) if __name__=="__main__": test_main()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.maven; import java.util.function.Supplier; import org.apache.maven.plugin.logging.Log; import org.jspecify.annotations.Nullable; import org.springframework.boot.loader.tools.Packager.MainClassTimeoutWarningListener; /** * {@link MainClassTimeoutWarningListener} backed by a supplied Maven {@link Log}. * * @author Phillip Webb */ class LoggingMainClassTimeoutWarningListener implements MainClassTimeoutWarningListener { private final Supplier<Log> log; LoggingMainClassTimeoutWarningListener(Supplier<Log> log) { this.log = log; } @Override public void handleTimeoutWarning(long duration, @Nullable String mainMethod) { this.log.get() .warn("Searching for the main-class is taking some time, " + "consider using the mainClass configuration parameter"); } }
java
github
https://github.com/spring-projects/spring-boot
build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/LoggingMainClassTimeoutWarningListener.java
--- navigation_title: "Bucket sort" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-sort-aggregation.html --- # Bucket sort aggregation [search-aggregations-pipeline-bucket-sort-aggregation] A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. Zero or more sort fields may be specified together with the corresponding sort order. Each bucket may be sorted based on its `_key`, `_count` or its sub-aggregations. In addition, parameters `from` and `size` may be set in order to truncate the result buckets. ::::{note} The `bucket_sort` aggregation, like all pipeline aggregations, is executed after all other non-pipeline aggregations. This means the sorting only applies to whatever buckets are already returned from the parent aggregation. For example, if the parent aggregation is `terms` and its `size` is set to `10`, the `bucket_sort` will only sort over those 10 returned term buckets. :::: ## Syntax [_syntax_10] A `bucket_sort` aggregation looks like this in isolation: ```js { "bucket_sort": { "sort": [ { "sort_field_1": { "order": "asc" } }, <1> { "sort_field_2": { "order": "desc" } }, "sort_field_3" ], "from": 1, "size": 3 } } ``` % NOTCONSOLE 1. Here, `sort_field_1` is the bucket path to the variable to be used as the primary sort and its order is ascending. $$$bucket-sort-params$$$ | Parameter Name | Description | Required | Default Value | | --- | --- | --- | --- | | `sort` | The list of fields to sort on. See [`sort`](/reference/elasticsearch/rest-apis/sort-search-results.md) for more details. | Optional | | | `from` | Buckets in positions prior to the set value will be truncated. | Optional | `0` | | `size` | The number of buckets to return. Defaults to all buckets of the parent aggregation. | Optional | | | `gap_policy` | The policy to apply when gaps are found in the data (see [Dealing with gaps in the data](/reference/aggregations/pipeline.md#gap-policy) for more details) | Optional | `skip` | The following snippet returns the buckets corresponding to the 3 months with the highest total sales in descending order: ```console POST /sales/_search { "size": 0, "aggs": { "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "total_sales": { "sum": { "field": "price" } }, "sales_bucket_sort": { "bucket_sort": { "sort": [ { "total_sales": { "order": "desc" } } <1> ], "size": 3 <2> } } } } } } ``` % TEST[setup:sales] 1. `sort` is set to use the values of `total_sales` in descending order 2. `size` is set to `3` meaning only the top 3 months in `total_sales` will be returned And the following may be the response: ```console-result { "took": 82, "timed_out": false, "_shards": ..., "hits": ..., "aggregations": { "sales_per_month": { "buckets": [ { "key_as_string": "2015/01/01 00:00:00", "key": 1420070400000, "doc_count": 3, "total_sales": { "value": 550.0 } }, { "key_as_string": "2015/03/01 00:00:00", "key": 1425168000000, "doc_count": 2, "total_sales": { "value": 375.0 } }, { "key_as_string": "2015/02/01 00:00:00", "key": 1422748800000, "doc_count": 2, "total_sales": { "value": 60.0 } } ] } } } ``` % TESTRESPONSE[s/"took": 82/"took": $body.took/] % TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] % TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] ## Truncating without sorting [_truncating_without_sorting] It is also possible to use this aggregation in order to truncate the result buckets without doing any sorting. To do so, just use the `from` and/or `size` parameters without specifying `sort`. The following example simply truncates the result so that only the second bucket is returned: ```console POST /sales/_search { "size": 0, "aggs": { "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "bucket_truncate": { "bucket_sort": { "from": 1, "size": 1 } } } } } } ``` % TEST[setup:sales] Response: ```console-result { "took": 11, "timed_out": false, "_shards": ..., "hits": ..., "aggregations": { "sales_per_month": { "buckets": [ { "key_as_string": "2015/02/01 00:00:00", "key": 1422748800000, "doc_count": 2 } ] } } } ``` % TESTRESPONSE[s/"took": 11/"took": $body.took/] % TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] % TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/aggregations/search-aggregations-pipeline-bucket-sort-aggregation.md
use rustc_abi::Endian; use crate::spec::{Arch, LinkSelfContainedDefault, Target, TargetMetadata, TargetOptions, base}; pub(crate) fn target() -> Target { let mut base = base::linux_gnu::opts(); base.cpu = "M68020".into(); base.max_atomic_width = Some(32); Target { llvm_target: "m68k-unknown-linux-gnu".into(), metadata: TargetMetadata { description: Some("Motorola 680x0 Linux".into()), tier: Some(3), host_tools: Some(false), std: Some(true), }, pointer_width: 32, data_layout: "E-m:e-p:32:16:32-i8:8:8-i16:16:16-i32:16:32-n8:16:32-a:0:16-S16".into(), arch: Arch::M68k, options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), // LLD currently does not have support for M68k link_self_contained: LinkSelfContainedDefault::False, ..base }, } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_target/src/spec/targets/m68k_unknown_linux_gnu.rs
# -*- coding: utf-8 -*- from collections import OrderedDict palette = [ 0x788084, 0x0000fc, 0x0000c4, 0x4028c4, 0x94008c, 0xac0028, 0xac1000, 0x8c1800, 0x503000, 0x007800, 0x006800, 0x005800, 0x004058, 0x000000, 0x000000, 0x000008, 0xbcc0c4, 0x0078fc, 0x0088fc, 0x6848fc, 0xdc00d4, 0xe40060, 0xfc3800, 0xe46918, 0xac8000, 0x00b800, 0x00a800, 0x00a848, 0x008894, 0x2c2c2c, 0x000000, 0x000000, 0xfcf8fc, 0x38c0fc, 0x6888fc, 0x9c78fc, 0xfc78fc, 0xfc589c, 0xfc7858, 0xfca048, 0xfcb800, 0xbcf818, 0x58d858, 0x58f89c, 0x00e8e4, 0x606060, 0x000000, 0x000000, 0xfcf8fc, 0xa4e8fc, 0xbcb8fc, 0xdcb8fc, 0xfcb8fc, 0xf4c0e0, 0xf4d0b4, 0xfce0b4, 0xfcd884, 0xdcf878, 0xb8f878, 0xb0f0d8, 0x00f8fc, 0xc8c0c0, 0x000000, 0x000000 ] def load_sprites(src): f = open(src, 'rb') content = f.read() f.close() assert len(content) % 16 == 0 bin = [ord(c) for c in content] return bin def load_indexed_sprites(src): f = open(src, 'rb') content = f.read() assert len(content) % 16 == 0 bin = [ord(c) for c in content] assert len(bin) % 16 == 0 indexes = OrderedDict() for i in range(len(content) / 16): indexes[content[i * 16: i * 16 + 16]] = i return bin, indexes def decode_sprite(channelA, channelB): s = [] y = 0 for y in range(0, 8): a = channelA[y] b = channelB[y] line = [] for x in range(0, 8): bit = pow(2, 7 - x) pixel = -1 if (not (a & bit) and not (b & bit)): pixel = 0 elif ((a & bit) and not (b & bit)): pixel = 1 elif (not (a & bit) and (b & bit)): pixel = 2 elif ((a & bit) and (b & bit)): pixel = 3 line.append(pixel) s.append(line) return s def get_sprite(index, sprites): assert len(sprites) > index iA = index * 16 iB = iA + 8 iC = iB + 8 channelA = sprites[iA:iB] channelB = sprites[iB:iC] return decode_sprite(channelA, channelB) def encode_sprite(sprite): channelA = [] channelB = [] for y in range(8): a = 0 b = 0 for x in range(8): pixel = sprite[y][x] bit = pow(2, 7 - x) if pixel == 1: a = a | bit elif pixel == 2: b = b | bit elif pixel == 3: a = a | bit b = b | bit channelA.append(a) channelB.append(b) return channelA + channelB def put_sprite(index, sprites, spr): start = index * 16 encoded = encode_sprite(spr) j = 0 for i in range(start, start + 16): sprites[i] = encoded[j] j += 1 return sprites def length(sprites): return len(sprites) / 16 def find_sprite(sprites, spr, start=0): for index in range(start, length(sprites)): if spr == get_sprite(index, sprites): return index - start return -1 class SpriteSet(): def __init__(self, sprite_data=None): if isinstance(sprite_data, str): self.sprs, self.indexes = load_indexed_sprites(sprite_data) else: (self.sprs, self.indexes) = sprite_data def __len__(self): return length(self.sprs) def get(self, index): return get_sprite(index, self.sprs) def put(self, index, spr): return put_sprite(index, spr, self.sprs) def has_sprite(self, spr): if isinstance(spr, list): spr = encode_sprite(spr) spr = ''.join(chr(c) for c in spr) if spr in self.indexes: return self.indexes[spr] return False
unknown
codeparrot/codeparrot-clean
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re from ansible.plugins.terminal import TerminalBase from ansible.errors import AnsibleConnectionFailure class TerminalModule(TerminalBase): terminal_stdout_re = [ re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), re.compile(r"\@[\w\-\.]+:\S+?[>#\$] ?$") ] terminal_stderr_re = [ re.compile(r"\n\s*Invalid command:"), re.compile(r"\nCommit failed"), re.compile(r"\n\s+Set failed"), ] terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000) def on_open_shell(self): try: self._exec_cli_command('set terminal length 0') self._exec_cli_command('set terminal length %s' % self.terminal_length) except AnsibleConnectionFailure: raise AnsibleConnectionFailure('unable to set terminal parameters')
unknown
codeparrot/codeparrot-clean
"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys # set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections') # set by ansible-test to the minimum python version supported on the controller ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION = tuple(int(x) for x in os.environ['ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION'].split('.')) # this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0 # NB: this code should never run under py2 def collection_resolve_package_path(path): """Configure the Python package path so that pytest can find our collections.""" for parent in path.parents: if str(parent) == ANSIBLE_COLLECTIONS_PATH: return parent raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH)) # this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0 def collection_pypkgpath(self): """Configure the Python package path so that pytest can find our collections.""" for parent in self.parts(reverse=True): if str(parent) == ANSIBLE_COLLECTIONS_PATH: return parent raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH)) def pytest_configure(): """Configure this pytest plugin.""" try: if pytest_configure.executed: return except AttributeError: pytest_configure.executed = True if sys.version_info >= ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION: # noinspection PyProtectedMember from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder else: # noinspection PyProtectedMember from ansible_test._internal.legacy_collection_loader._collection_finder import _AnsibleCollectionFinder # allow unit tests to import code from collections # noinspection PyProtectedMember _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access try: # noinspection PyProtectedMember from _pytest import pathlib as _pytest_pathlib except ImportError: _pytest_pathlib = None if hasattr(_pytest_pathlib, 'resolve_package_path'): _pytest_pathlib.resolve_package_path = collection_resolve_package_path else: # looks like pytest <= 6.0.0, use the old hack against py.path # noinspection PyProtectedMember import py._path.local # force collections unit tests to be loaded with the ansible_collections namespace # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552 # noinspection PyProtectedMember py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access pytest_configure()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import collections from .compat import cookielib, urlparse, Morsel try: import threading # grr, pyflakes: this fixes "redefinition of unused 'threading'" threading except ImportError: import dummy_threading as threading class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): return self._r.url def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `cookielib` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """Produce an appropriate Cookie header string to be sent with `request`, or None.""" r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name == name: if domain is None or domain == cookie.domain: if path is None or path == cookie.path: clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific.""" class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Don't use the dict interface internally; it's just for compatibility with with external client code. All `requests` code should work out of the box with externally provided instances of CookieJar, e.g., LWPCookieJar and FileCookieJar. Caution: dictionary operations that are normally O(1) may be O(n). Unlike a regular CookieJar, this class is pickleable. """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. Caution: operation is O(n), not O(1).""" try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains.""" # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().""" keys = [] for cookie in iter(self): keys.append(cookie.name) return keys def values(self): """Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().""" values = [] for cookie in iter(self): values.append(cookie.value) return values def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. See keys() and values(). Allows client-code to call "dict(RequestsCookieJar) and get a vanilla python dict of key value pairs.""" items = [] for cookie in iter(self): items.append((cookie.name, cookie.value)) return items def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise.""" domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements.""" dictionary = {} for cookie in iter(self): if (domain is None or cookie.domain == domain) and (path is None or cookie.path == path): dictionary[cookie.name] = cookie.value return dictionary def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. Caution: operation is O(n), not O(1).""" return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead.""" self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name().""" remove_cookie_by_name(self, name) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. Takes as args name and optional domain and path. Returns a cookie.value. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies.""" for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """__get_item__ and get call _find_no_duplicates -- never used in Requests internally. Takes as args name and optional domain and path. Returns a cookie.value. Throws KeyError if cookie is not found and CookieConflictError if there are multiple cookies that match name and optionally domain and path.""" toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError('There are multiple cookies with name, %r' % (name)) toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """This is not implemented. Calling this will throw an exception.""" raise NotImplementedError def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = dict( version=0, name=name, value=value, port=None, domain='', path='/', secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,) badargs = set(kwargs) - set(result) if badargs: err = 'create_cookie() got unexpected keyword arguments: %s' raise TypeError(err % list(badargs)) result.update(kwargs) result['port_specified'] = bool(result['port']) result['domain_specified'] = bool(result['domain']) result['domain_initial_dot'] = result['domain'].startswith('.') result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" c = create_cookie( name=morsel.key, value=morsel.value, version=morsel['version'] or 0, port=None, port_specified=False, domain=morsel['domain'], domain_specified=bool(morsel['domain']), domain_initial_dot=morsel['domain'].startswith('.'), path=morsel['path'], path_specified=bool(morsel['path']), secure=bool(morsel['secure']), expires=morsel['max-age'] or morsel['expires'], discard=False, comment=morsel['comment'], comment_url=bool(morsel['comment']), rest={'HttpOnly': morsel['httponly']}, rfc2109=False,) return c def cookiejar_from_dict(cookie_dict, cookiejar=None): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: for name in cookie_dict: cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar
unknown
codeparrot/codeparrot-clean
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re import netaddr from oslo_config import cfg from oslo_log import log as logging import six from neutron.agent import firewall from neutron.agent.linux import ip_conntrack from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.extensions import portsecurity as psec from neutron.i18n import _LI LOG = logging.getLogger(__name__) SG_CHAIN = 'sg-chain' SPOOF_FILTER = 'spoof-filter' CHAIN_NAME_PREFIX = {firewall.INGRESS_DIRECTION: 'i', firewall.EGRESS_DIRECTION: 'o', SPOOF_FILTER: 's'} DIRECTION_IP_PREFIX = {firewall.INGRESS_DIRECTION: 'source_ip_prefix', firewall.EGRESS_DIRECTION: 'dest_ip_prefix'} IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src', firewall.EGRESS_DIRECTION: 'dst'} # length of all device prefixes (e.g. qvo, tap, qvb) LINUX_DEV_PREFIX_LEN = 3 LINUX_DEV_LEN = 14 MAX_CONNTRACK_ZONES = 65535 comment_rule = iptables_manager.comment_rule class IptablesFirewallDriver(firewall.FirewallDriver): """Driver which enforces security groups through iptables rules.""" IPTABLES_DIRECTION = {firewall.INGRESS_DIRECTION: 'physdev-out', firewall.EGRESS_DIRECTION: 'physdev-in'} def __init__(self, namespace=None): self.iptables = iptables_manager.IptablesManager( use_ipv6=ipv6_utils.is_enabled(), namespace=namespace) # TODO(majopela, shihanzhang): refactor out ipset to a separate # driver composed over this one self.ipset = ipset_manager.IpsetManager(namespace=namespace) self.ipconntrack = ip_conntrack.IpConntrackManager( self.get_device_zone, namespace=namespace) self._populate_initial_zone_map() # list of port which has security group self.filtered_ports = {} self.unfiltered_ports = {} self._add_fallback_chain_v4v6() self._defer_apply = False self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None # List of security group rules for ports residing on this host self.sg_rules = {} self.pre_sg_rules = None # List of security group member ips for ports residing on this host self.sg_members = collections.defaultdict( lambda: collections.defaultdict(list)) self.pre_sg_members = None self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset self._enabled_netfilter_for_bridges = False self.updated_rule_sg_ids = set() self.updated_sg_members = set() self.devices_with_udpated_sg_members = collections.defaultdict(list) def _enable_netfilter_for_bridges(self): # we only need to set these values once, but it has to be when # we create a bridge; before that the bridge module might not # be loaded and the proc values aren't there. if self._enabled_netfilter_for_bridges: return else: self._enabled_netfilter_for_bridges = True # These proc values ensure that netfilter is enabled on # bridges; essential for enforcing security groups rules with # OVS Hybrid. Distributions can differ on whether this is # enabled by default or not (Ubuntu - yes, Redhat - no, for # example). LOG.debug("Enabling netfilter for bridges") utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-arptables=1'], run_as_root=True) utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-ip6tables=1'], run_as_root=True) utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-iptables=1'], run_as_root=True) @property def ports(self): return dict(self.filtered_ports, **self.unfiltered_ports) def _update_remote_security_group_members(self, sec_group_ids): for sg_id in sec_group_ids: for device in self.filtered_ports.values(): if sg_id in device.get('security_group_source_groups', []): self.devices_with_udpated_sg_members[sg_id].append(device) def security_group_updated(self, action_type, sec_group_ids, device_ids=[]): if action_type == 'sg_rule': self.updated_rule_sg_ids.update(sec_group_ids) elif action_type == 'sg_member': if device_ids: self.updated_sg_members.update(device_ids) else: self._update_remote_security_group_members(sec_group_ids) def update_security_group_rules(self, sg_id, sg_rules): LOG.debug("Update rules of security group (%s)", sg_id) self.sg_rules[sg_id] = sg_rules def update_security_group_members(self, sg_id, sg_members): LOG.debug("Update members of security group (%s)", sg_id) self.sg_members[sg_id] = collections.defaultdict(list, sg_members) def _ps_enabled(self, port): return port.get(psec.PORTSECURITY, True) def _set_ports(self, port): if not self._ps_enabled(port): self.unfiltered_ports[port['device']] = port self.filtered_ports.pop(port['device'], None) else: self.filtered_ports[port['device']] = port self.unfiltered_ports.pop(port['device'], None) def _unset_ports(self, port): self.unfiltered_ports.pop(port['device'], None) self.filtered_ports.pop(port['device'], None) def prepare_port_filter(self, port): LOG.debug("Preparing device (%s) filter", port['device']) self._remove_chains() self._set_ports(port) self._enable_netfilter_for_bridges() # each security group has it own chains self._setup_chains() self.iptables.apply() def update_port_filter(self, port): LOG.debug("Updating device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info(_LI('Attempted to update port filter which is not ' 'filtered %s'), port['device']) return self._remove_chains() self._set_ports(port) self._setup_chains() self.iptables.apply() def remove_port_filter(self, port): LOG.debug("Removing device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info(_LI('Attempted to remove port filter which is not ' 'filtered %r'), port) return self._remove_chains() self._unset_ports(port) self._setup_chains() self.iptables.apply() def _add_accept_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=True) def _remove_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=False) def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules): for rule in ipv4_rules: self.iptables.ipv4['filter'].remove_rule(chain_name, rule) for rule in ipv6_rules: self.iptables.ipv6['filter'].remove_rule(chain_name, rule) def _setup_chains(self): """Setup ingress and egress chain for a port.""" if not self._defer_apply: self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) def _setup_chains_apply(self, ports, unfiltered_ports): self._add_chain_by_name_v4v6(SG_CHAIN) for port in ports.values(): self._setup_chain(port, firewall.INGRESS_DIRECTION) self._setup_chain(port, firewall.EGRESS_DIRECTION) self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT') self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT') for port in unfiltered_ports.values(): self._add_accept_rule_port_sec(port, firewall.INGRESS_DIRECTION) self._add_accept_rule_port_sec(port, firewall.EGRESS_DIRECTION) def _remove_chains(self): """Remove ingress and egress chain for a port.""" if not self._defer_apply: self._remove_chains_apply(self.filtered_ports, self.unfiltered_ports) def _remove_chains_apply(self, ports, unfiltered_ports): for port in ports.values(): self._remove_chain(port, firewall.INGRESS_DIRECTION) self._remove_chain(port, firewall.EGRESS_DIRECTION) self._remove_chain(port, SPOOF_FILTER) for port in unfiltered_ports.values(): self._remove_rule_port_sec(port, firewall.INGRESS_DIRECTION) self._remove_rule_port_sec(port, firewall.EGRESS_DIRECTION) self._remove_chain_by_name_v4v6(SG_CHAIN) def _setup_chain(self, port, DIRECTION): self._add_chain(port, DIRECTION) self._add_rules_by_security_group(port, DIRECTION) def _remove_chain(self, port, DIRECTION): chain_name = self._port_chain_name(port, DIRECTION) self._remove_chain_by_name_v4v6(chain_name) def _add_fallback_chain_v4v6(self): self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) def _add_raw_chain(self, chain_name): self.iptables.ipv4['raw'].add_chain(chain_name) self.iptables.ipv6['raw'].add_chain(chain_name) def _add_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].add_chain(chain_name) self.iptables.ipv6['filter'].add_chain(chain_name) def _remove_raw_chain(self, chain_name): self.iptables.ipv4['raw'].remove_chain(chain_name) self.iptables.ipv6['raw'].remove_chain(chain_name) def _remove_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].remove_chain(chain_name) self.iptables.ipv6['filter'].remove_chain(chain_name) def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules, comment=None): for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule(chain_name, rule, comment=comment) for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule, comment=comment) def _get_device_name(self, port): return port['device'] def _update_port_sec_rules(self, port, direction, add=False): # add/remove rules in FORWARD and INPUT chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction], device)] if add: self._add_rules_to_chain_v4v6( 'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule) if direction == firewall.EGRESS_DIRECTION: jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction], device)] if add: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6( 'INPUT', jump_rule, jump_rule) def _add_chain(self, port, direction): chain_name = self._port_chain_name(port, direction) self._add_chain_by_name_v4v6(chain_name) # Note(nati) jump to the security group chain (SG_CHAIN) # This is needed because the packet may much two rule in port # if the two port is in the same host # We accept the packet at the end of SG_CHAIN. # jump to the security group chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, SG_CHAIN)] self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule, comment=ic.VM_INT_SG) # jump to the chain based on the device jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, chain_name)] self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule, comment=ic.SG_TO_VM_SG) if direction == firewall.EGRESS_DIRECTION: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.INPUT_TO_SG) def _split_sgr_by_ethertype(self, security_group_rules): ipv4_sg_rules = [] ipv6_sg_rules = [] for rule in security_group_rules: if rule.get('ethertype') == constants.IPv4: ipv4_sg_rules.append(rule) elif rule.get('ethertype') == constants.IPv6: if rule.get('protocol') == 'icmp': rule['protocol'] = 'icmpv6' ipv6_sg_rules.append(rule) return ipv4_sg_rules, ipv6_sg_rules def _select_sgr_by_direction(self, port, direction): return [rule for rule in port.get('security_group_rules', []) if rule['direction'] == direction] def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules): if mac_ip_pairs: chain_name = self._port_chain_name(port, SPOOF_FILTER) table.add_chain(chain_name) for mac, ip in mac_ip_pairs: if ip is None: # If fixed_ips is [] this rule will be added to the end # of the list after the allowed_address_pair rules. table.add_rule(chain_name, '-m mac --mac-source %s -j RETURN' % mac.upper(), comment=ic.PAIR_ALLOW) else: table.add_rule(chain_name, '-s %s -m mac --mac-source %s -j RETURN' % (ip, mac.upper()), comment=ic.PAIR_ALLOW) table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP) rules.append('-j $%s' % chain_name) def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, mac_ipv6_pairs): mac = str(netaddr.EUI(mac, dialect=netaddr.mac_unix)) if netaddr.IPNetwork(ip_address).version == 4: mac_ipv4_pairs.append((mac, ip_address)) else: mac_ipv6_pairs.append((mac, ip_address)) def _spoofing_rule(self, port, ipv4_rules, ipv6_rules): # Allow dhcp client packets ipv4_rules += [comment_rule('-p udp -m udp --sport 68 --dport 67 ' '-j RETURN', comment=ic.DHCP_CLIENT)] # Drop Router Advts from the port. ipv6_rules += [comment_rule('-p icmpv6 --icmpv6-type %s ' '-j DROP' % constants.ICMPV6_TYPE_RA, comment=ic.IPV6_RA_DROP)] ipv6_rules += [comment_rule('-p icmpv6 -j RETURN', comment=ic.IPV6_ICMP_ALLOW)] ipv6_rules += [comment_rule('-p udp -m udp --sport 546 --dport 547 ' '-j RETURN', comment=ic.DHCP_CLIENT)] mac_ipv4_pairs = [] mac_ipv6_pairs = [] if isinstance(port.get('allowed_address_pairs'), list): for address_pair in port['allowed_address_pairs']: self._build_ipv4v6_mac_ip_list(address_pair['mac_address'], address_pair['ip_address'], mac_ipv4_pairs, mac_ipv6_pairs) for ip in port['fixed_ips']: self._build_ipv4v6_mac_ip_list(port['mac_address'], ip, mac_ipv4_pairs, mac_ipv6_pairs) if not port['fixed_ips']: mac_ipv4_pairs.append((port['mac_address'], None)) mac_ipv6_pairs.append((port['mac_address'], None)) self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'], mac_ipv4_pairs, ipv4_rules) self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'], mac_ipv6_pairs, ipv6_rules) def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules): #Note(nati) Drop dhcp packet from VM ipv4_rules += [comment_rule('-p udp -m udp --sport 67 --dport 68 ' '-j DROP', comment=ic.DHCP_SPOOF)] ipv6_rules += [comment_rule('-p udp -m udp --sport 547 --dport 546 ' '-j DROP', comment=ic.DHCP_SPOOF)] def _accept_inbound_icmpv6(self): # Allow multicast listener, neighbor solicitation and # neighbor advertisement into the instance icmpv6_rules = [] for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' % icmp6_type] return icmpv6_rules def _select_sg_rules_for_port(self, port, direction): """Select rules from the security groups the port is member of.""" port_sg_ids = port.get('security_groups', []) port_rules = [] for sg_id in port_sg_ids: for rule in self.sg_rules.get(sg_id, []): if rule['direction'] == direction: if self.enable_ipset: port_rules.append(rule) else: port_rules.extend( self._expand_sg_rule_with_remote_ips( rule, port, direction)) return port_rules def _expand_sg_rule_with_remote_ips(self, rule, port, direction): """Expand a remote group rule to rule per remote group IP.""" remote_group_id = rule.get('remote_group_id') if remote_group_id: ethertype = rule['ethertype'] port_ips = port.get('fixed_ips', []) for ip in self.sg_members[remote_group_id][ethertype]: if ip not in port_ips: ip_rule = rule.copy() direction_ip_prefix = DIRECTION_IP_PREFIX[direction] ip_prefix = str(netaddr.IPNetwork(ip).cidr) ip_rule[direction_ip_prefix] = ip_prefix yield ip_rule else: yield rule def _get_remote_sg_ids(self, port, direction=None): sg_ids = port.get('security_groups', []) remote_sg_ids = {constants.IPv4: set(), constants.IPv6: set()} for sg_id in sg_ids: for rule in self.sg_rules.get(sg_id, []): if not direction or rule['direction'] == direction: remote_sg_id = rule.get('remote_group_id') ether_type = rule.get('ethertype') if remote_sg_id and ether_type: remote_sg_ids[ether_type].add(remote_sg_id) return remote_sg_ids def _add_rules_by_security_group(self, port, direction): # select rules for current port and direction security_group_rules = self._select_sgr_by_direction(port, direction) security_group_rules += self._select_sg_rules_for_port(port, direction) # make sure ipset members are updated for remote security groups if self.enable_ipset: remote_sg_ids = self._get_remote_sg_ids(port, direction) self._update_ipset_members(remote_sg_ids) # split groups by ip version # for ipv4, iptables command is used # for ipv6, iptables6 command is used ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype( security_group_rules) ipv4_iptables_rules = [] ipv6_iptables_rules = [] # include fixed egress/ingress rules if direction == firewall.EGRESS_DIRECTION: self._add_fixed_egress_rules(port, ipv4_iptables_rules, ipv6_iptables_rules) elif direction == firewall.INGRESS_DIRECTION: ipv6_iptables_rules += self._accept_inbound_icmpv6() # include IPv4 and IPv6 iptable rules from security group ipv4_iptables_rules += self._convert_sgr_to_iptables_rules( ipv4_sg_rules) ipv6_iptables_rules += self._convert_sgr_to_iptables_rules( ipv6_sg_rules) # finally add the rules to the port chain for a given direction self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction), ipv4_iptables_rules, ipv6_iptables_rules) def _add_fixed_egress_rules(self, port, ipv4_iptables_rules, ipv6_iptables_rules): self._spoofing_rule(port, ipv4_iptables_rules, ipv6_iptables_rules) self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules) def _update_ipset_members(self, security_group_ids): for ip_version, sg_ids in security_group_ids.items(): for sg_id in sg_ids: current_ips = self.sg_members[sg_id][ip_version] self.ipset.set_members(sg_id, ip_version, current_ips) def _generate_ipset_rule_args(self, sg_rule, remote_gid): ethertype = sg_rule.get('ethertype') ipset_name = self.ipset.get_name(remote_gid, ethertype) if not self.ipset.set_exists(remote_gid, ethertype): #NOTE(mangelajo): ipsets for empty groups are not created # thus we can't reference them. return None ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')] args = self._generate_protocol_and_port_args(sg_rule) args += ['-m set', '--match-set', ipset_name, ipset_direction] args += ['-j RETURN'] return args def _generate_protocol_and_port_args(self, sg_rule): args = self._protocol_arg(sg_rule.get('protocol')) args += self._port_arg('sport', sg_rule.get('protocol'), sg_rule.get('source_port_range_min'), sg_rule.get('source_port_range_max')) args += self._port_arg('dport', sg_rule.get('protocol'), sg_rule.get('port_range_min'), sg_rule.get('port_range_max')) return args def _generate_plain_rule_args(self, sg_rule): # These arguments MUST be in the format iptables-save will # display them: source/dest, protocol, sport, dport, target # Otherwise the iptables_manager code won't be able to find # them to preserve their [packet:byte] counts. args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix')) args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix')) args += self._generate_protocol_and_port_args(sg_rule) args += ['-j RETURN'] return args def _convert_sg_rule_to_iptables_args(self, sg_rule): remote_gid = sg_rule.get('remote_group_id') if self.enable_ipset and remote_gid: return self._generate_ipset_rule_args(sg_rule, remote_gid) else: return self._generate_plain_rule_args(sg_rule) def _convert_sgr_to_iptables_rules(self, security_group_rules): iptables_rules = [] self._drop_invalid_packets(iptables_rules) self._allow_established(iptables_rules) for rule in security_group_rules: args = self._convert_sg_rule_to_iptables_args(rule) if args: iptables_rules += [' '.join(args)] iptables_rules += [comment_rule('-j $sg-fallback', comment=ic.UNMATCHED)] return iptables_rules def _drop_invalid_packets(self, iptables_rules): # Always drop invalid packets iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP', comment=ic.INVALID_DROP)] return iptables_rules def _allow_established(self, iptables_rules): # Allow established connections iptables_rules += [comment_rule( '-m state --state RELATED,ESTABLISHED -j RETURN', comment=ic.ALLOW_ASSOC)] return iptables_rules def _protocol_arg(self, protocol): if not protocol: return [] iptables_rule = ['-p', protocol] # iptables always adds '-m protocol' for udp and tcp if protocol in ['udp', 'tcp']: iptables_rule += ['-m', protocol] return iptables_rule def _port_arg(self, direction, protocol, port_range_min, port_range_max): if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6'] or port_range_min is None): return [] if protocol in ['icmp', 'icmpv6']: # Note(xuhanp): port_range_min/port_range_max represent # icmp type/code when protocol is icmp or icmpv6 # icmp code can be 0 so we cannot use "if port_range_max" here if port_range_max is not None: return ['--%s-type' % protocol, '%s/%s' % (port_range_min, port_range_max)] return ['--%s-type' % protocol, '%s' % port_range_min] elif port_range_min == port_range_max: return ['--%s' % direction, '%s' % (port_range_min,)] else: return ['-m', 'multiport', '--%ss' % direction, '%s:%s' % (port_range_min, port_range_max)] def _ip_prefix_arg(self, direction, ip_prefix): #NOTE (nati) : source_group_id is converted to list of source_ # ip_prefix in server side if ip_prefix: return ['-%s' % direction, ip_prefix] return [] def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) def filter_defer_apply_on(self): if not self._defer_apply: self.iptables.defer_apply_on() self._pre_defer_filtered_ports = dict(self.filtered_ports) self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports) self.pre_sg_members = dict(self.sg_members) self.pre_sg_rules = dict(self.sg_rules) self._defer_apply = True def _remove_unused_security_group_info(self): """Remove any unnecessary local security group info or unused ipsets. This function has to be called after applying the last iptables rules, so we're in a point where no iptable rule depends on an ipset we're going to delete. """ filtered_ports = self.filtered_ports.values() remote_sgs_to_remove = self._determine_remote_sgs_to_remove( filtered_ports) for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove): if self.enable_ipset: self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids) self._remove_sg_members(remote_sgs_to_remove) # Remove unused security group rules for remove_group_id in self._determine_sg_rules_to_remove( filtered_ports): self.sg_rules.pop(remove_group_id, None) def _determine_remote_sgs_to_remove(self, filtered_ports): """Calculate which remote security groups we don't need anymore. We do the calculation for each ip_version. """ sgs_to_remove_per_ipversion = {constants.IPv4: set(), constants.IPv6: set()} remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion( filtered_ports) for ip_version, remote_group_id_set in ( six.iteritems(remote_group_id_sets)): sgs_to_remove_per_ipversion[ip_version].update( set(self.pre_sg_members) - remote_group_id_set) return sgs_to_remove_per_ipversion def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports): """Given a port, calculates the remote sg references by ip_version.""" remote_group_id_sets = {constants.IPv4: set(), constants.IPv6: set()} for port in filtered_ports: remote_sg_ids = self._get_remote_sg_ids(port) for ip_version in (constants.IPv4, constants.IPv6): remote_group_id_sets[ip_version] |= remote_sg_ids[ip_version] return remote_group_id_sets def _determine_sg_rules_to_remove(self, filtered_ports): """Calculate which security groups need to be removed. We find out by subtracting our previous sg group ids, with the security groups associated to a set of ports. """ port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports) return set(self.pre_sg_rules) - port_group_ids def _get_sg_ids_set_for_ports(self, filtered_ports): """Get the port security group ids as a set.""" port_group_ids = set() for port in filtered_ports: port_group_ids.update(port.get('security_groups', [])) return port_group_ids def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids): """Remove system ipsets matching the provided parameters.""" for remote_sg_id in remote_sg_ids: self.ipset.destroy(remote_sg_id, ip_version) def _remove_sg_members(self, remote_sgs_to_remove): """Remove sg_member entries.""" ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4) ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6) for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set): if sg_id in self.sg_members: del self.sg_members[sg_id] def _find_deleted_sg_rules(self, sg_id): del_rules = list() for pre_rule in self.pre_sg_rules.get(sg_id, []): if pre_rule not in self.sg_rules.get(sg_id, []): del_rules.append(pre_rule) return del_rules def _find_devices_on_security_group(self, sg_id): device_list = list() for device in self.filtered_ports.values(): if sg_id in device.get('security_groups', []): device_list.append(device) return device_list def _clean_deleted_sg_rule_conntrack_entries(self): deleted_sg_ids = set() for sg_id in self.updated_rule_sg_ids: del_rules = self._find_deleted_sg_rules(sg_id) if not del_rules: continue device_list = self._find_devices_on_security_group(sg_id) for rule in del_rules: self.ipconntrack.delete_conntrack_state_by_rule( device_list, rule) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.updated_rule_sg_ids.remove(id) def _clean_updated_sg_member_conntrack_entries(self): updated_device_ids = set() for device in self.updated_sg_members: sec_group_change = False device_info = self.filtered_ports.get(device) pre_device_info = self._pre_defer_filtered_ports.get(device) if not device_info or not pre_device_info: continue for sg_id in pre_device_info.get('security_groups', []): if sg_id not in device_info.get('security_groups', []): sec_group_change = True break if not sec_group_change: continue for ethertype in [constants.IPv4, constants.IPv6]: self.ipconntrack.delete_conntrack_state_by_remote_ips( [device_info], ethertype, set()) updated_device_ids.add(device) for id in updated_device_ids: self.updated_sg_members.remove(id) def _clean_deleted_remote_sg_members_conntrack_entries(self): deleted_sg_ids = set() for sg_id, devices in self.devices_with_udpated_sg_members.items(): for ethertype in [constants.IPv4, constants.IPv6]: pre_ips = self._get_sg_members( self.pre_sg_members, sg_id, ethertype) cur_ips = self._get_sg_members( self.sg_members, sg_id, ethertype) ips = (pre_ips - cur_ips) if devices and ips: self.ipconntrack.delete_conntrack_state_by_remote_ips( devices, ethertype, ips) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.devices_with_udpated_sg_members.pop(id, None) def _remove_conntrack_entries_from_sg_updates(self): self._clean_deleted_sg_rule_conntrack_entries() self._clean_updated_sg_member_conntrack_entries() self._clean_deleted_remote_sg_members_conntrack_entries() def _get_sg_members(self, sg_info, sg_id, ethertype): return set(sg_info.get(sg_id, {}).get(ethertype, [])) def filter_defer_apply_off(self): if self._defer_apply: self._defer_apply = False self._remove_chains_apply(self._pre_defer_filtered_ports, self._pre_defer_unfiltered_ports) self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) self.iptables.defer_apply_off() self._remove_conntrack_entries_from_sg_updates() self._remove_unused_security_group_info() self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None def _populate_initial_zone_map(self): """Setup the map between devices and zones based on current rules.""" self._device_zone_map = {} rules = self.iptables.get_rules_for_table('raw') for rule in rules: match = re.match(r'.* --physdev-in (?P<dev>[a-zA-Z0-9\-]+)' r'.* -j CT --zone (?P<zone>\d+).*', rule) if match: # strip off any prefix that the interface is using short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:] self._device_zone_map[short_port_id] = int(match.group('zone')) LOG.debug("Populated conntrack zone map: %s", self._device_zone_map) def get_device_zone(self, port_id): # we have to key the device_zone_map based on the fragment of the port # UUID that shows up in the interface name. This is because the initial # map is populated strictly based on interface names that we don't know # the full UUID of. short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] try: return self._device_zone_map[short_port_id] except KeyError: return self._generate_device_zone(short_port_id) def _free_zones_from_removed_ports(self): """Clears any entries from the zone map of removed ports.""" existing_ports = [ port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] for port in (list(self.filtered_ports.values()) + list(self.unfiltered_ports.values())) ] removed = set(self._device_zone_map) - set(existing_ports) for dev in removed: self._device_zone_map.pop(dev, None) def _generate_device_zone(self, short_port_id): """Generates a unique conntrack zone for the passed in ID.""" try: zone = self._find_open_zone() except n_exc.CTZoneExhaustedError: # Free some zones and try again, repeat failure will not be caught self._free_zones_from_removed_ports() zone = self._find_open_zone() self._device_zone_map[short_port_id] = zone LOG.debug("Assigned CT zone %(z)s to port %(dev)s.", {'z': zone, 'dev': short_port_id}) return self._device_zone_map[short_port_id] def _find_open_zone(self): # call set to dedup because old ports may be mapped to the same zone. zones_in_use = sorted(set(self._device_zone_map.values())) if not zones_in_use: return 1 # attempt to increment onto the highest used zone first. if we hit the # end, go back and look for any gaps left by removed devices. last = zones_in_use[-1] if last < MAX_CONNTRACK_ZONES: return last + 1 for index, used in enumerate(zones_in_use): if used - index != 1: # gap found, let's use it! return index + 1 # conntrack zones exhausted :( :( raise n_exc.CTZoneExhaustedError() class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) def _get_device_name(self, port): return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] def _get_br_device_name(self, port): return ('qvb' + port['device'])[:LINUX_DEV_LEN] def _get_jump_rule(self, port, direction): if direction == firewall.INGRESS_DIRECTION: device = self._get_br_device_name(port) else: device = self._get_device_name(port) jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % ( device, self.get_device_zone(port['device'])) return jump_rule def _add_raw_chain_rules(self, port, direction): jump_rule = self._get_jump_rule(port, direction) self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule) self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule) def _remove_raw_chain_rules(self, port, direction): jump_rule = self._get_jump_rule(port, direction) self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule) self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule) def _add_chain(self, port, direction): super(OVSHybridIptablesFirewallDriver, self)._add_chain(port, direction) if direction in [firewall.INGRESS_DIRECTION, firewall.EGRESS_DIRECTION]: self._add_raw_chain_rules(port, direction) def _remove_chain(self, port, direction): super(OVSHybridIptablesFirewallDriver, self)._remove_chain(port, direction) if direction in [firewall.INGRESS_DIRECTION, firewall.EGRESS_DIRECTION]: self._remove_raw_chain_rules(port, direction)
unknown
codeparrot/codeparrot-clean
import datetime import os import uuid from decimal import Decimal import pytest from django.http import QueryDict from django.test import TestCase, override_settings from django.utils import six, timezone import rest_framework from rest_framework import serializers # Tests for field keyword arguments and core functionality. # --------------------------------------------------------- class TestEmpty: """ Tests for `required`, `allow_null`, `allow_blank`, `default`. """ def test_required(self): """ By default a field must be included in the input. """ field = serializers.IntegerField() with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation() assert exc_info.value.detail == ['This field is required.'] def test_not_required(self): """ If `required=False` then a field may be omitted from the input. """ field = serializers.IntegerField(required=False) with pytest.raises(serializers.SkipField): field.run_validation() def test_disallow_null(self): """ By default `None` is not a valid input. """ field = serializers.IntegerField() with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation(None) assert exc_info.value.detail == ['This field may not be null.'] def test_allow_null(self): """ If `allow_null=True` then `None` is a valid input. """ field = serializers.IntegerField(allow_null=True) output = field.run_validation(None) assert output is None def test_disallow_blank(self): """ By default '' is not a valid input. """ field = serializers.CharField() with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation('') assert exc_info.value.detail == ['This field may not be blank.'] def test_allow_blank(self): """ If `allow_blank=True` then '' is a valid input. """ field = serializers.CharField(allow_blank=True) output = field.run_validation('') assert output == '' def test_default(self): """ If `default` is set, then omitted values get the default input. """ field = serializers.IntegerField(default=123) output = field.run_validation() assert output is 123 class TestSource: def test_source(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.CharField(source='other') serializer = ExampleSerializer(data={'example_field': 'abc'}) assert serializer.is_valid() assert serializer.validated_data == {'other': 'abc'} def test_redundant_source(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.CharField(source='example_field') with pytest.raises(AssertionError) as exc_info: ExampleSerializer().fields assert str(exc_info.value) == ( "It is redundant to specify `source='example_field'` on field " "'CharField' in serializer 'ExampleSerializer', because it is the " "same as the field name. Remove the `source` keyword argument." ) def test_callable_source(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.CharField(source='example_callable') class ExampleInstance(object): def example_callable(self): return 'example callable value' serializer = ExampleSerializer(ExampleInstance()) assert serializer.data['example_field'] == 'example callable value' def test_callable_source_raises(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.CharField(source='example_callable', read_only=True) class ExampleInstance(object): def example_callable(self): raise AttributeError('method call failed') with pytest.raises(ValueError) as exc_info: serializer = ExampleSerializer(ExampleInstance()) serializer.data.items() assert 'method call failed' in str(exc_info.value) class TestReadOnly: def setup(self): class TestSerializer(serializers.Serializer): read_only = serializers.ReadOnlyField() writable = serializers.IntegerField() self.Serializer = TestSerializer def test_validate_read_only(self): """ Read-only serializers.should not be included in validation. """ data = {'read_only': 123, 'writable': 456} serializer = self.Serializer(data=data) assert serializer.is_valid() assert serializer.validated_data == {'writable': 456} def test_serialize_read_only(self): """ Read-only serializers.should be serialized. """ instance = {'read_only': 123, 'writable': 456} serializer = self.Serializer(instance) assert serializer.data == {'read_only': 123, 'writable': 456} class TestWriteOnly: def setup(self): class TestSerializer(serializers.Serializer): write_only = serializers.IntegerField(write_only=True) readable = serializers.IntegerField() self.Serializer = TestSerializer def test_validate_write_only(self): """ Write-only serializers.should be included in validation. """ data = {'write_only': 123, 'readable': 456} serializer = self.Serializer(data=data) assert serializer.is_valid() assert serializer.validated_data == {'write_only': 123, 'readable': 456} def test_serialize_write_only(self): """ Write-only serializers.should not be serialized. """ instance = {'write_only': 123, 'readable': 456} serializer = self.Serializer(instance) assert serializer.data == {'readable': 456} class TestInitial: def setup(self): class TestSerializer(serializers.Serializer): initial_field = serializers.IntegerField(initial=123) blank_field = serializers.IntegerField() self.serializer = TestSerializer() def test_initial(self): """ Initial values should be included when serializing a new representation. """ assert self.serializer.data == { 'initial_field': 123, 'blank_field': None } class TestInitialWithCallable: def setup(self): def initial_value(): return 123 class TestSerializer(serializers.Serializer): initial_field = serializers.IntegerField(initial=initial_value) self.serializer = TestSerializer() def test_initial_should_accept_callable(self): """ Follows the default ``Field.initial`` behaviour where they accept a callable to produce the initial value""" assert self.serializer.data == { 'initial_field': 123, } class TestLabel: def setup(self): class TestSerializer(serializers.Serializer): labeled = serializers.IntegerField(label='My label') self.serializer = TestSerializer() def test_label(self): """ A field's label may be set with the `label` argument. """ fields = self.serializer.fields assert fields['labeled'].label == 'My label' class TestInvalidErrorKey: def setup(self): class ExampleField(serializers.Field): def to_native(self, data): self.fail('incorrect') self.field = ExampleField() def test_invalid_error_key(self): """ If a field raises a validation error, but does not have a corresponding error message, then raise an appropriate assertion error. """ with pytest.raises(AssertionError) as exc_info: self.field.to_native(123) expected = ( 'ValidationError raised by `ExampleField`, but error key ' '`incorrect` does not exist in the `error_messages` dictionary.' ) assert str(exc_info.value) == expected class TestBooleanHTMLInput: def test_empty_html_checkbox(self): """ HTML checkboxes do not send any value, but should be treated as `False` by BooleanField. """ class TestSerializer(serializers.Serializer): archived = serializers.BooleanField() serializer = TestSerializer(data=QueryDict('')) assert serializer.is_valid() assert serializer.validated_data == {'archived': False} def test_empty_html_checkbox_not_required(self): """ HTML checkboxes do not send any value, but should be treated as `False` by BooleanField, even if the field is required=False. """ class TestSerializer(serializers.Serializer): archived = serializers.BooleanField(required=False) serializer = TestSerializer(data=QueryDict('')) assert serializer.is_valid() assert serializer.validated_data == {'archived': False} class TestHTMLInput: def test_empty_html_charfield_with_default(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(default='happy') serializer = TestSerializer(data=QueryDict('')) assert serializer.is_valid() assert serializer.validated_data == {'message': 'happy'} def test_empty_html_charfield_without_default(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(allow_blank=True) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert serializer.validated_data == {'message': ''} def test_empty_html_charfield_without_default_not_required(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(allow_blank=True, required=False) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert serializer.validated_data == {'message': ''} def test_empty_html_integerfield(self): class TestSerializer(serializers.Serializer): message = serializers.IntegerField(default=123) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert serializer.validated_data == {'message': 123} def test_empty_html_uuidfield_with_default(self): class TestSerializer(serializers.Serializer): message = serializers.UUIDField(default=uuid.uuid4) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert list(serializer.validated_data.keys()) == ['message'] def test_empty_html_uuidfield_with_optional(self): class TestSerializer(serializers.Serializer): message = serializers.UUIDField(required=False) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert list(serializer.validated_data.keys()) == [] def test_empty_html_charfield_allow_null(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(allow_null=True) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert serializer.validated_data == {'message': None} def test_empty_html_datefield_allow_null(self): class TestSerializer(serializers.Serializer): expiry = serializers.DateField(allow_null=True) serializer = TestSerializer(data=QueryDict('expiry=')) assert serializer.is_valid() assert serializer.validated_data == {'expiry': None} def test_empty_html_charfield_allow_null_allow_blank(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(allow_null=True, allow_blank=True) serializer = TestSerializer(data=QueryDict('message=')) assert serializer.is_valid() assert serializer.validated_data == {'message': ''} def test_empty_html_charfield_required_false(self): class TestSerializer(serializers.Serializer): message = serializers.CharField(required=False) serializer = TestSerializer(data=QueryDict('')) assert serializer.is_valid() assert serializer.validated_data == {} def test_querydict_list_input(self): class TestSerializer(serializers.Serializer): scores = serializers.ListField(child=serializers.IntegerField()) serializer = TestSerializer(data=QueryDict('scores=1&scores=3')) assert serializer.is_valid() assert serializer.validated_data == {'scores': [1, 3]} def test_querydict_list_input_only_one_input(self): class TestSerializer(serializers.Serializer): scores = serializers.ListField(child=serializers.IntegerField()) serializer = TestSerializer(data=QueryDict('scores=1&')) assert serializer.is_valid() assert serializer.validated_data == {'scores': [1]} class TestCreateOnlyDefault: def setup(self): default = serializers.CreateOnlyDefault('2001-01-01') class TestSerializer(serializers.Serializer): published = serializers.HiddenField(default=default) text = serializers.CharField() self.Serializer = TestSerializer def test_create_only_default_is_provided(self): serializer = self.Serializer(data={'text': 'example'}) assert serializer.is_valid() assert serializer.validated_data == { 'text': 'example', 'published': '2001-01-01' } def test_create_only_default_is_not_provided_on_update(self): instance = { 'text': 'example', 'published': '2001-01-01' } serializer = self.Serializer(instance, data={'text': 'example'}) assert serializer.is_valid() assert serializer.validated_data == { 'text': 'example', } def test_create_only_default_callable_sets_context(self): """ CreateOnlyDefault instances with a callable default should set_context on the callable if possible """ class TestCallableDefault: def set_context(self, serializer_field): self.field = serializer_field def __call__(self): return "success" if hasattr(self, 'field') else "failure" class TestSerializer(serializers.Serializer): context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault())) serializer = TestSerializer(data={}) assert serializer.is_valid() assert serializer.validated_data['context_set'] == 'success' # Tests for field input and output values. # ---------------------------------------- def get_items(mapping_or_list_of_two_tuples): # Tests accept either lists of two tuples, or dictionaries. if isinstance(mapping_or_list_of_two_tuples, dict): # {value: expected} return mapping_or_list_of_two_tuples.items() # [(value, expected), ...] return mapping_or_list_of_two_tuples class FieldValues: """ Base class for testing valid and invalid input values. """ def test_valid_inputs(self): """ Ensure that valid values return the expected validated data. """ for input_value, expected_output in get_items(self.valid_inputs): assert self.field.run_validation(input_value) == expected_output def test_invalid_inputs(self): """ Ensure that invalid values raise the expected validation error. """ for input_value, expected_failure in get_items(self.invalid_inputs): with pytest.raises(serializers.ValidationError) as exc_info: self.field.run_validation(input_value) assert exc_info.value.detail == expected_failure def test_outputs(self): for output_value, expected_output in get_items(self.outputs): assert self.field.to_representation(output_value) == expected_output # Boolean types... class TestBooleanField(FieldValues): """ Valid and invalid values for `BooleanField`. """ valid_inputs = { 'true': True, 'false': False, '1': True, '0': False, 1: True, 0: False, True: True, False: False, } invalid_inputs = { 'foo': ['"foo" is not a valid boolean.'], None: ['This field may not be null.'] } outputs = { 'true': True, 'false': False, '1': True, '0': False, 1: True, 0: False, True: True, False: False, 'other': True } field = serializers.BooleanField() def test_disallow_unhashable_collection_types(self): inputs = ( [], {}, ) field = serializers.BooleanField() for input_value in inputs: with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation(input_value) expected = ['"{0}" is not a valid boolean.'.format(input_value)] assert exc_info.value.detail == expected class TestNullBooleanField(FieldValues): """ Valid and invalid values for `BooleanField`. """ valid_inputs = { 'true': True, 'false': False, 'null': None, True: True, False: False, None: None } invalid_inputs = { 'foo': ['"foo" is not a valid boolean.'], } outputs = { 'true': True, 'false': False, 'null': None, True: True, False: False, None: None, 'other': True } field = serializers.NullBooleanField() # String types... class TestCharField(FieldValues): """ Valid and invalid values for `CharField`. """ valid_inputs = { 1: '1', 'abc': 'abc' } invalid_inputs = { (): ['Not a valid string.'], True: ['Not a valid string.'], '': ['This field may not be blank.'] } outputs = { 1: '1', 'abc': 'abc' } field = serializers.CharField() def test_trim_whitespace_default(self): field = serializers.CharField() assert field.to_internal_value(' abc ') == 'abc' def test_trim_whitespace_disabled(self): field = serializers.CharField(trim_whitespace=False) assert field.to_internal_value(' abc ') == ' abc ' def test_disallow_blank_with_trim_whitespace(self): field = serializers.CharField(allow_blank=False, trim_whitespace=True) with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation(' ') assert exc_info.value.detail == ['This field may not be blank.'] class TestEmailField(FieldValues): """ Valid and invalid values for `EmailField`. """ valid_inputs = { 'example@example.com': 'example@example.com', ' example@example.com ': 'example@example.com', } invalid_inputs = { 'examplecom': ['Enter a valid email address.'] } outputs = {} field = serializers.EmailField() class TestRegexField(FieldValues): """ Valid and invalid values for `RegexField`. """ valid_inputs = { 'a9': 'a9', } invalid_inputs = { 'A9': ["This value does not match the required pattern."] } outputs = {} field = serializers.RegexField(regex='[a-z][0-9]') class TestSlugField(FieldValues): """ Valid and invalid values for `SlugField`. """ valid_inputs = { 'slug-99': 'slug-99', } invalid_inputs = { 'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.'] } outputs = {} field = serializers.SlugField() class TestURLField(FieldValues): """ Valid and invalid values for `URLField`. """ valid_inputs = { 'http://example.com': 'http://example.com', } invalid_inputs = { 'example.com': ['Enter a valid URL.'] } outputs = {} field = serializers.URLField() class TestUUIDField(FieldValues): """ Valid and invalid values for `UUIDField`. """ valid_inputs = { '825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), '825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), 'urn:uuid:213b7d9b-244f-410d-828c-dabce7a2615d': uuid.UUID('213b7d9b-244f-410d-828c-dabce7a2615d'), 284758210125106368185219588917561929842: uuid.UUID('d63a6fb6-88d5-40c7-a91c-9edf73283072') } invalid_inputs = { '825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'], (1, 2, 3): ['"(1, 2, 3)" is not a valid UUID.'] } outputs = { uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda' } field = serializers.UUIDField() def _test_format(self, uuid_format, formatted_uuid_0): field = serializers.UUIDField(format=uuid_format) assert field.to_representation(uuid.UUID(int=0)) == formatted_uuid_0 assert field.to_internal_value(formatted_uuid_0) == uuid.UUID(int=0) def test_formats(self): self._test_format('int', 0) self._test_format('hex_verbose', '00000000-0000-0000-0000-000000000000') self._test_format('urn', 'urn:uuid:00000000-0000-0000-0000-000000000000') self._test_format('hex', '0' * 32) class TestIPAddressField(FieldValues): """ Valid and invalid values for `IPAddressField` """ valid_inputs = { '127.0.0.1': '127.0.0.1', '192.168.33.255': '192.168.33.255', '2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334', '2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652', '2001:cdba::3257:9652': '2001:cdba::3257:9652' } invalid_inputs = { '127001': ['Enter a valid IPv4 or IPv6 address.'], '127.122.111.2231': ['Enter a valid IPv4 or IPv6 address.'], '2001:::9652': ['Enter a valid IPv4 or IPv6 address.'], '2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'], 1000: ['Enter a valid IPv4 or IPv6 address.'], } outputs = {} field = serializers.IPAddressField() class TestIPv4AddressField(FieldValues): """ Valid and invalid values for `IPAddressField` """ valid_inputs = { '127.0.0.1': '127.0.0.1', '192.168.33.255': '192.168.33.255', } invalid_inputs = { '127001': ['Enter a valid IPv4 address.'], '127.122.111.2231': ['Enter a valid IPv4 address.'], } outputs = {} field = serializers.IPAddressField(protocol='IPv4') class TestIPv6AddressField(FieldValues): """ Valid and invalid values for `IPAddressField` """ valid_inputs = { '2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334', '2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652', '2001:cdba::3257:9652': '2001:cdba::3257:9652' } invalid_inputs = { '2001:::9652': ['Enter a valid IPv4 or IPv6 address.'], '2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'], } outputs = {} field = serializers.IPAddressField(protocol='IPv6') class TestFilePathField(FieldValues): """ Valid and invalid values for `FilePathField` """ valid_inputs = { __file__: __file__, } invalid_inputs = { 'wrong_path': ['"wrong_path" is not a valid path choice.'] } outputs = { } field = serializers.FilePathField( path=os.path.abspath(os.path.dirname(__file__)) ) # Number types... class TestIntegerField(FieldValues): """ Valid and invalid values for `IntegerField`. """ valid_inputs = { '1': 1, '0': 0, 1: 1, 0: 0, 1.0: 1, 0.0: 0, '1.0': 1 } invalid_inputs = { 0.5: ['A valid integer is required.'], 'abc': ['A valid integer is required.'], '0.5': ['A valid integer is required.'] } outputs = { '1': 1, '0': 0, 1: 1, 0: 0, 1.0: 1, 0.0: 0 } field = serializers.IntegerField() class TestMinMaxIntegerField(FieldValues): """ Valid and invalid values for `IntegerField` with min and max limits. """ valid_inputs = { '1': 1, '3': 3, 1: 1, 3: 3, } invalid_inputs = { 0: ['Ensure this value is greater than or equal to 1.'], 4: ['Ensure this value is less than or equal to 3.'], '0': ['Ensure this value is greater than or equal to 1.'], '4': ['Ensure this value is less than or equal to 3.'], } outputs = {} field = serializers.IntegerField(min_value=1, max_value=3) class TestFloatField(FieldValues): """ Valid and invalid values for `FloatField`. """ valid_inputs = { '1': 1.0, '0': 0.0, 1: 1.0, 0: 0.0, 1.0: 1.0, 0.0: 0.0, } invalid_inputs = { 'abc': ["A valid number is required."] } outputs = { '1': 1.0, '0': 0.0, 1: 1.0, 0: 0.0, 1.0: 1.0, 0.0: 0.0, } field = serializers.FloatField() class TestMinMaxFloatField(FieldValues): """ Valid and invalid values for `FloatField` with min and max limits. """ valid_inputs = { '1': 1, '3': 3, 1: 1, 3: 3, 1.0: 1.0, 3.0: 3.0, } invalid_inputs = { 0.9: ['Ensure this value is greater than or equal to 1.'], 3.1: ['Ensure this value is less than or equal to 3.'], '0.0': ['Ensure this value is greater than or equal to 1.'], '3.1': ['Ensure this value is less than or equal to 3.'], } outputs = {} field = serializers.FloatField(min_value=1, max_value=3) class TestDecimalField(FieldValues): """ Valid and invalid values for `DecimalField`. """ valid_inputs = { '12.3': Decimal('12.3'), '0.1': Decimal('0.1'), 10: Decimal('10'), 0: Decimal('0'), 12.3: Decimal('12.3'), 0.1: Decimal('0.1'), '2E+1': Decimal('20'), } invalid_inputs = ( ('abc', ["A valid number is required."]), (Decimal('Nan'), ["A valid number is required."]), (Decimal('Inf'), ["A valid number is required."]), ('12.345', ["Ensure that there are no more than 3 digits in total."]), (200000000000.0, ["Ensure that there are no more than 3 digits in total."]), ('0.01', ["Ensure that there are no more than 1 decimal places."]), (123, ["Ensure that there are no more than 2 digits before the decimal point."]), ('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."]) ) outputs = { '1': '1.0', '0': '0.0', '1.09': '1.1', '0.04': '0.0', 1: '1.0', 0: '0.0', Decimal('1.0'): '1.0', Decimal('0.0'): '0.0', Decimal('1.09'): '1.1', Decimal('0.04'): '0.0' } field = serializers.DecimalField(max_digits=3, decimal_places=1) class TestMinMaxDecimalField(FieldValues): """ Valid and invalid values for `DecimalField` with min and max limits. """ valid_inputs = { '10.0': Decimal('10.0'), '20.0': Decimal('20.0'), } invalid_inputs = { '9.9': ['Ensure this value is greater than or equal to 10.'], '20.1': ['Ensure this value is less than or equal to 20.'], } outputs = {} field = serializers.DecimalField( max_digits=3, decimal_places=1, min_value=10, max_value=20 ) class TestNoMaxDigitsDecimalField(FieldValues): field = serializers.DecimalField( max_value=100, min_value=0, decimal_places=2, max_digits=None ) valid_inputs = { '10': Decimal('10.00') } invalid_inputs = {} outputs = {} class TestNoStringCoercionDecimalField(FieldValues): """ Output values for `DecimalField` with `coerce_to_string=False`. """ valid_inputs = {} invalid_inputs = {} outputs = { 1.09: Decimal('1.1'), 0.04: Decimal('0.0'), '1.09': Decimal('1.1'), '0.04': Decimal('0.0'), Decimal('1.09'): Decimal('1.1'), Decimal('0.04'): Decimal('0.0'), } field = serializers.DecimalField( max_digits=3, decimal_places=1, coerce_to_string=False ) class TestLocalizedDecimalField(TestCase): @override_settings(USE_L10N=True, LANGUAGE_CODE='pl') def test_to_internal_value(self): field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True) self.assertEqual(field.to_internal_value('1,1'), Decimal('1.1')) @override_settings(USE_L10N=True, LANGUAGE_CODE='pl') def test_to_representation(self): field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True) self.assertEqual(field.to_representation(Decimal('1.1')), '1,1') def test_localize_forces_coerce_to_string(self): field = serializers.DecimalField(max_digits=2, decimal_places=1, coerce_to_string=False, localize=True) self.assertTrue(isinstance(field.to_representation(Decimal('1.1')), six.string_types)) class TestQuantizedValueForDecimal(TestCase): def test_int_quantized_value_for_decimal(self): field = serializers.DecimalField(max_digits=4, decimal_places=2) value = field.to_internal_value(12).as_tuple() expected_digit_tuple = (0, (1, 2, 0, 0), -2) self.assertEqual(value, expected_digit_tuple) def test_string_quantized_value_for_decimal(self): field = serializers.DecimalField(max_digits=4, decimal_places=2) value = field.to_internal_value('12').as_tuple() expected_digit_tuple = (0, (1, 2, 0, 0), -2) self.assertEqual(value, expected_digit_tuple) def test_part_precision_string_quantized_value_for_decimal(self): field = serializers.DecimalField(max_digits=4, decimal_places=2) value = field.to_internal_value('12.0').as_tuple() expected_digit_tuple = (0, (1, 2, 0, 0), -2) self.assertEqual(value, expected_digit_tuple) class TestNoDecimalPlaces(FieldValues): valid_inputs = { '0.12345': Decimal('0.12345'), } invalid_inputs = { '0.1234567': ['Ensure that there are no more than 6 digits in total.'] } outputs = { '1.2345': '1.2345', '0': '0', '1.1': '1.1', } field = serializers.DecimalField(max_digits=6, decimal_places=None) # Date & time serializers... class TestDateField(FieldValues): """ Valid and invalid values for `DateField`. """ valid_inputs = { '2001-01-01': datetime.date(2001, 1, 1), datetime.date(2001, 1, 1): datetime.date(2001, 1, 1), } invalid_inputs = { 'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'], '2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'], datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'], } outputs = { datetime.date(2001, 1, 1): '2001-01-01', '2001-01-01': '2001-01-01', six.text_type('2016-01-10'): '2016-01-10', None: None, '': None, } field = serializers.DateField() class TestCustomInputFormatDateField(FieldValues): """ Valid and invalid values for `DateField` with a custom input format. """ valid_inputs = { '1 Jan 2001': datetime.date(2001, 1, 1), } invalid_inputs = { '2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.'] } outputs = {} field = serializers.DateField(input_formats=['%d %b %Y']) class TestCustomOutputFormatDateField(FieldValues): """ Values for `DateField` with a custom output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.date(2001, 1, 1): '01 Jan 2001' } field = serializers.DateField(format='%d %b %Y') class TestNoOutputFormatDateField(FieldValues): """ Values for `DateField` with no output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.date(2001, 1, 1): datetime.date(2001, 1, 1) } field = serializers.DateField(format=None) class TestDateTimeField(FieldValues): """ Valid and invalid values for `DateTimeField`. """ valid_inputs = { '2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()), '2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()), '2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()), datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()), datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()), # Django 1.4 does not support timezone string parsing. '2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()) } invalid_inputs = { 'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'], '2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'], datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'], } outputs = { datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00', datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z', '2001-01-01T00:00:00': '2001-01-01T00:00:00', six.text_type('2016-01-10T00:00:00'): '2016-01-10T00:00:00', None: None, '': None, } field = serializers.DateTimeField(default_timezone=timezone.UTC()) class TestCustomInputFormatDateTimeField(FieldValues): """ Valid and invalid values for `DateTimeField` with a custom input format. """ valid_inputs = { '1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()), } invalid_inputs = { '2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.'] } outputs = {} field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y']) class TestCustomOutputFormatDateTimeField(FieldValues): """ Values for `DateTimeField` with a custom output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001', } field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y') class TestNoOutputFormatDateTimeField(FieldValues): """ Values for `DateTimeField` with no output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00), } field = serializers.DateTimeField(format=None) class TestNaiveDateTimeField(FieldValues): """ Valid and invalid values for `DateTimeField` with naive datetimes. """ valid_inputs = { datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00), '2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00), } invalid_inputs = {} outputs = {} field = serializers.DateTimeField(default_timezone=None) class TestTimeField(FieldValues): """ Valid and invalid values for `TimeField`. """ valid_inputs = { '13:00': datetime.time(13, 00), datetime.time(13, 00): datetime.time(13, 00), } invalid_inputs = { 'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'], '99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'], } outputs = { datetime.time(13, 0): '13:00:00', datetime.time(0, 0): '00:00:00', '00:00:00': '00:00:00', None: None, '': None, } field = serializers.TimeField() class TestCustomInputFormatTimeField(FieldValues): """ Valid and invalid values for `TimeField` with a custom input format. """ valid_inputs = { '1:00pm': datetime.time(13, 00), } invalid_inputs = { '13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'], } outputs = {} field = serializers.TimeField(input_formats=['%I:%M%p']) class TestCustomOutputFormatTimeField(FieldValues): """ Values for `TimeField` with a custom output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.time(13, 00): '01:00PM' } field = serializers.TimeField(format='%I:%M%p') class TestNoOutputFormatTimeField(FieldValues): """ Values for `TimeField` with a no output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.time(13, 00): datetime.time(13, 00) } field = serializers.TimeField(format=None) class TestDurationField(FieldValues): """ Valid and invalid values for `DurationField`. """ valid_inputs = { '13': datetime.timedelta(seconds=13), '3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123), '08:01': datetime.timedelta(minutes=8, seconds=1), datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123), 3600: datetime.timedelta(hours=1), } invalid_inputs = { 'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'], '3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'], } outputs = { datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123', } field = serializers.DurationField() # Choice types... class TestChoiceField(FieldValues): """ Valid and invalid values for `ChoiceField`. """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'amazing': ['"amazing" is not a valid choice.'] } outputs = { 'good': 'good', '': '', 'amazing': 'amazing', } field = serializers.ChoiceField( choices=[ ('poor', 'Poor quality'), ('medium', 'Medium quality'), ('good', 'Good quality'), ] ) def test_allow_blank(self): """ If `allow_blank=True` then '' is a valid input. """ field = serializers.ChoiceField( allow_blank=True, choices=[ ('poor', 'Poor quality'), ('medium', 'Medium quality'), ('good', 'Good quality'), ] ) output = field.run_validation('') assert output == '' def test_allow_null(self): """ If `allow_null=True` then '' on HTML forms is treated as None. """ field = serializers.ChoiceField( allow_null=True, choices=[ 1, 2, 3 ] ) field.field_name = 'example' value = field.get_value(QueryDict('example=')) assert value is None output = field.run_validation(None) assert output is None def test_iter_options(self): """ iter_options() should return a list of options and option groups. """ field = serializers.ChoiceField( choices=[ ('Numbers', ['integer', 'float']), ('Strings', ['text', 'email', 'url']), 'boolean' ] ) items = list(field.iter_options()) assert items[0].start_option_group assert items[0].label == 'Numbers' assert items[1].value == 'integer' assert items[2].value == 'float' assert items[3].end_option_group assert items[4].start_option_group assert items[4].label == 'Strings' assert items[5].value == 'text' assert items[6].value == 'email' assert items[7].value == 'url' assert items[8].end_option_group assert items[9].value == 'boolean' class TestChoiceFieldWithType(FieldValues): """ Valid and invalid values for a `Choice` field that uses an integer type, instead of a char type. """ valid_inputs = { '1': 1, 3: 3, } invalid_inputs = { 5: ['"5" is not a valid choice.'], 'abc': ['"abc" is not a valid choice.'] } outputs = { '1': 1, 1: 1 } field = serializers.ChoiceField( choices=[ (1, 'Poor quality'), (2, 'Medium quality'), (3, 'Good quality'), ] ) class TestChoiceFieldWithListChoices(FieldValues): """ Valid and invalid values for a `Choice` field that uses a flat list for the choices, rather than a list of pairs of (`value`, `description`). """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'awful': ['"awful" is not a valid choice.'] } outputs = { 'good': 'good' } field = serializers.ChoiceField(choices=('poor', 'medium', 'good')) class TestChoiceFieldWithGroupedChoices(FieldValues): """ Valid and invalid values for a `Choice` field that uses a grouped list for the choices, rather than a list of pairs of (`value`, `description`). """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'awful': ['"awful" is not a valid choice.'] } outputs = { 'good': 'good' } field = serializers.ChoiceField( choices=[ ( 'Category', ( ('poor', 'Poor quality'), ('medium', 'Medium quality'), ), ), ('good', 'Good quality'), ] ) class TestChoiceFieldWithMixedChoices(FieldValues): """ Valid and invalid values for a `Choice` field that uses a single paired or grouped. """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'awful': ['"awful" is not a valid choice.'] } outputs = { 'good': 'good' } field = serializers.ChoiceField( choices=[ ( 'Category', ( ('poor', 'Poor quality'), ), ), 'medium', ('good', 'Good quality'), ] ) class TestMultipleChoiceField(FieldValues): """ Valid and invalid values for `MultipleChoiceField`. """ valid_inputs = { (): set(), ('aircon',): set(['aircon']), ('aircon', 'manual'): set(['aircon', 'manual']), } invalid_inputs = { 'abc': ['Expected a list of items but got type "str".'], ('aircon', 'incorrect'): ['"incorrect" is not a valid choice.'] } outputs = [ (['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect'])) ] field = serializers.MultipleChoiceField( choices=[ ('aircon', 'AirCon'), ('manual', 'Manual drive'), ('diesel', 'Diesel'), ] ) def test_against_partial_and_full_updates(self): field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b'))) field.partial = False assert field.get_value(QueryDict({})) == [] field.partial = True assert field.get_value(QueryDict({})) == rest_framework.fields.empty class TestEmptyMultipleChoiceField(FieldValues): """ Invalid values for `MultipleChoiceField(allow_empty=False)`. """ valid_inputs = { } invalid_inputs = ( ([], ['This selection may not be empty.']), ) outputs = [ ] field = serializers.MultipleChoiceField( choices=[ ('consistency', 'Consistency'), ('availability', 'Availability'), ('partition', 'Partition tolerance'), ], allow_empty=False ) # File serializers... class MockFile: def __init__(self, name='', size=0, url=''): self.name = name self.size = size self.url = url def __eq__(self, other): return ( isinstance(other, MockFile) and self.name == other.name and self.size == other.size and self.url == other.url ) class TestFileField(FieldValues): """ Values for `FileField`. """ valid_inputs = [ (MockFile(name='example', size=10), MockFile(name='example', size=10)) ] invalid_inputs = [ ('invalid', ['The submitted data was not a file. Check the encoding type on the form.']), (MockFile(name='example.txt', size=0), ['The submitted file is empty.']), (MockFile(name='', size=10), ['No filename could be determined.']), (MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).']) ] outputs = [ (MockFile(name='example.txt', url='/example.txt'), '/example.txt'), ('', None) ] field = serializers.FileField(max_length=10) class TestFieldFieldWithName(FieldValues): """ Values for `FileField` with a filename output instead of URLs. """ valid_inputs = {} invalid_inputs = {} outputs = [ (MockFile(name='example.txt', url='/example.txt'), 'example.txt') ] field = serializers.FileField(use_url=False) # Stub out mock Django `forms.ImageField` class so we don't *actually* # call into it's regular validation, or require PIL for testing. class FailImageValidation(object): def to_python(self, value): raise serializers.ValidationError(self.error_messages['invalid_image']) class PassImageValidation(object): def to_python(self, value): return value class TestInvalidImageField(FieldValues): """ Values for an invalid `ImageField`. """ valid_inputs = {} invalid_inputs = [ (MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.']) ] outputs = {} field = serializers.ImageField(_DjangoImageField=FailImageValidation) class TestValidImageField(FieldValues): """ Values for an valid `ImageField`. """ valid_inputs = [ (MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10)) ] invalid_inputs = {} outputs = {} field = serializers.ImageField(_DjangoImageField=PassImageValidation) # Composite serializers... class TestListField(FieldValues): """ Values for `ListField` with IntegerField as child. """ valid_inputs = [ ([1, 2, 3], [1, 2, 3]), (['1', '2', '3'], [1, 2, 3]), ([], []) ] invalid_inputs = [ ('not a list', ['Expected a list of items but got type "str".']), ([1, 2, 'error'], ['A valid integer is required.']), ({'one': 'two'}, ['Expected a list of items but got type "dict".']) ] outputs = [ ([1, 2, 3], [1, 2, 3]), (['1', '2', '3'], [1, 2, 3]) ] field = serializers.ListField(child=serializers.IntegerField()) def test_no_source_on_child(self): with pytest.raises(AssertionError) as exc_info: serializers.ListField(child=serializers.IntegerField(source='other')) assert str(exc_info.value) == ( "The `source` argument is not meaningful when applied to a `child=` field. " "Remove `source=` from the field declaration." ) def test_collection_types_are_invalid_input(self): field = serializers.ListField(child=serializers.CharField()) input_value = ({'one': 'two'}) with pytest.raises(serializers.ValidationError) as exc_info: field.to_internal_value(input_value) assert exc_info.value.detail == ['Expected a list of items but got type "dict".'] class TestEmptyListField(FieldValues): """ Values for `ListField` with allow_empty=False flag. """ valid_inputs = {} invalid_inputs = [ ([], ['This list may not be empty.']) ] outputs = {} field = serializers.ListField(child=serializers.IntegerField(), allow_empty=False) class TestUnvalidatedListField(FieldValues): """ Values for `ListField` with no `child` argument. """ valid_inputs = [ ([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]), ] invalid_inputs = [ ('not a list', ['Expected a list of items but got type "str".']), ] outputs = [ ([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]), ] field = serializers.ListField() class TestDictField(FieldValues): """ Values for `ListField` with CharField as child. """ valid_inputs = [ ({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}), ] invalid_inputs = [ ({'a': 1, 'b': None}, ['This field may not be null.']), ('not a dict', ['Expected a dictionary of items but got type "str".']), ] outputs = [ ({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}), ] field = serializers.DictField(child=serializers.CharField()) def test_no_source_on_child(self): with pytest.raises(AssertionError) as exc_info: serializers.DictField(child=serializers.CharField(source='other')) assert str(exc_info.value) == ( "The `source` argument is not meaningful when applied to a `child=` field. " "Remove `source=` from the field declaration." ) def test_allow_null(self): """ If `allow_null=True` then `None` is a valid input. """ field = serializers.DictField(allow_null=True) output = field.run_validation(None) assert output is None class TestDictFieldWithNullChild(FieldValues): """ Values for `ListField` with allow_null CharField as child. """ valid_inputs = [ ({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}), ] invalid_inputs = [ ] outputs = [ ({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}), ] field = serializers.DictField(child=serializers.CharField(allow_null=True)) class TestUnvalidatedDictField(FieldValues): """ Values for `ListField` with no `child` argument. """ valid_inputs = [ ({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}), ] invalid_inputs = [ ('not a dict', ['Expected a dictionary of items but got type "str".']), ] outputs = [ ({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}), ] field = serializers.DictField() class TestJSONField(FieldValues): """ Values for `JSONField`. """ valid_inputs = [ ({ 'a': 1, 'b': ['some', 'list', True, 1.23], '3': None }, { 'a': 1, 'b': ['some', 'list', True, 1.23], '3': None }), ] invalid_inputs = [ ({'a': set()}, ['Value must be valid JSON.']), ] outputs = [ ({ 'a': 1, 'b': ['some', 'list', True, 1.23], '3': 3 }, { 'a': 1, 'b': ['some', 'list', True, 1.23], '3': 3 }), ] field = serializers.JSONField() class TestBinaryJSONField(FieldValues): """ Values for `JSONField` with binary=True. """ valid_inputs = [ (b'{"a": 1, "3": null, "b": ["some", "list", true, 1.23]}', { 'a': 1, 'b': ['some', 'list', True, 1.23], '3': None }), ] invalid_inputs = [ ('{"a": "unterminated string}', ['Value must be valid JSON.']), ] outputs = [ (['some', 'list', True, 1.23], b'["some", "list", true, 1.23]'), ] field = serializers.JSONField(binary=True) # Tests for FieldField. # --------------------- class MockRequest: def build_absolute_uri(self, value): return 'http://example.com' + value class TestFileFieldContext: def test_fully_qualified_when_request_in_context(self): field = serializers.FileField(max_length=10) field._context = {'request': MockRequest()} obj = MockFile(name='example.txt', url='/example.txt') value = field.to_representation(obj) assert value == 'http://example.com/example.txt' # Tests for SerializerMethodField. # -------------------------------- class TestSerializerMethodField: def test_serializer_method_field(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.SerializerMethodField() def get_example_field(self, obj): return 'ran get_example_field(%d)' % obj['example_field'] serializer = ExampleSerializer({'example_field': 123}) assert serializer.data == { 'example_field': 'ran get_example_field(123)' } def test_redundant_method_name(self): class ExampleSerializer(serializers.Serializer): example_field = serializers.SerializerMethodField('get_example_field') with pytest.raises(AssertionError) as exc_info: ExampleSerializer().fields assert str(exc_info.value) == ( "It is redundant to specify `get_example_field` on " "SerializerMethodField 'example_field' in serializer " "'ExampleSerializer', because it is the same as the default " "method name. Remove the `method_name` argument." )
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ pygments.styles.friendly ~~~~~~~~~~~~~~~~~~~~~~~~ A modern style based on the VIM pyte theme. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class FriendlyStyle(Style): """ A modern style based on the VIM pyte theme. """ background_color = "#f0f0f0" default_style = "" styles = { Whitespace: "#bbbbbb", Comment: "italic #60a0b0", Comment.Preproc: "noitalic #007020", Comment.Special: "noitalic bg:#fff0f0", Keyword: "bold #007020", Keyword.Pseudo: "nobold", Keyword.Type: "nobold #902000", Operator: "#666666", Operator.Word: "bold #007020", Name.Builtin: "#007020", Name.Function: "#06287e", Name.Class: "bold #0e84b5", Name.Namespace: "bold #0e84b5", Name.Exception: "#007020", Name.Variable: "#bb60d5", Name.Constant: "#60add5", Name.Label: "bold #002070", Name.Entity: "bold #d55537", Name.Attribute: "#4070a0", Name.Tag: "bold #062873", Name.Decorator: "bold #555555", String: "#4070a0", String.Doc: "italic", String.Interpol: "italic #70a0d0", String.Escape: "bold #4070a0", String.Regex: "#235388", String.Symbol: "#517918", String.Other: "#c65d09", Number: "#40a070", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#A00000", Generic.Inserted: "#00A000", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #c65d09", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2011 Uninett AS # # This file is part of Network Administration Visualized (NAV). # # NAV is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License version 3 as published by the Free # Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. You should have received a copy of the GNU General Public # License along with NAV. If not, see <http://www.gnu.org/licenses/>. # """Django ORM wrapper for profiles in NAV""" # pylint: disable=R0903 from hashlib import md5 import itertools import logging from datetime import datetime import re import json from django.views.decorators.debug import sensitive_variables from django.db import models, transaction from django.urls import reverse from django.utils.encoding import python_2_unicode_compatible from django.forms.models import model_to_dict from nav.adapters import HStoreField import nav.buildconf import nav.pwhash from nav.config import getconfig as get_alertengine_config from nav.alertengine.dispatchers import DispatcherException from nav.alertengine.dispatchers import FatalDispatcherException from nav.models.event import AlertQueue, AlertType, EventType from nav.models.manage import Arp, Cam, Category, Device, Location from nav.models.manage import Memory, Netbox, NetboxInfo, NetboxType from nav.models.manage import Organization, Prefix, Room, NetboxGroup from nav.models.manage import Interface, Usage, Vlan, Vendor from nav.models.fields import VarcharField, DictAsJsonField # This should be the authorative source as to which models alertengine # supports. The acctuall mapping from alerts to data in these models is done # the MatchField model. SUPPORTED_MODELS = [ # event models AlertQueue, AlertType, EventType, # manage models Arp, Cam, Category, Device, Location, Memory, Netbox, NetboxInfo, NetboxType, Organization, Prefix, Room, NetboxGroup, Interface, Vendor, Vlan, Usage, ] _ = lambda a: a ####################################################################### ### Account models @python_2_unicode_compatible class Account(models.Model): """ NAV's basic account model""" DEFAULT_ACCOUNT = 0 ADMIN_ACCOUNT = 1 # An overview of current preferences. # They should start with PREFERENCE_KEY PREFERENCE_KEY_LANGUAGE = 'language' # AlertProfiles PREFERENCE_KEY_STATUS = 'status-preferences' PREFERENCE_KEY_WIDGET_COLUMNS = 'widget_columns' PREFERENCE_KEY_REPORT_PAGE_SIZE = 'report_page_size' PREFERENCE_KEY_WIDGET_DISPLAY_DENSITY = 'widget_display_density' PREFERENCE_KEY_IPDEVINFO_PORT_LAYOUT = 'ipdevinfo_port_layout' # FIXME get this from setting. MIN_PASSWD_LENGTH = 8 login = VarcharField(unique=True) name = VarcharField() password = VarcharField() ext_sync = VarcharField(blank=True) preferences = HStoreField(default=dict) organizations = models.ManyToManyField(Organization, db_table='accountorg', blank=True) # Set this in order to provide a link to the actual operator when Account # objects are retrieved from session data sudo_operator = None class Meta(object): db_table = u'account' ordering = ('login',) def __str__(self): if self.sudo_operator and self.sudo_operator != self: return '{} (operated by {})'.format(self.login, self.sudo_operator) else: return self.login def get_active_profile(self): """Returns the account's active alert profile""" try: return self.alertpreference.active_profile except (AlertPreference.DoesNotExist, AlertProfile.DoesNotExist): pass def get_groups(self): """Fetches and returns this users groups. Also stores groups in this object for later use. """ try: return self._cached_groups except AttributeError: self._cached_groups = self.accountgroup_set.values_list( 'id', flat=True) return self._cached_groups def get_privileges(self): """Fetches privileges for this users groups. Also stores privileges in this object for later use. """ try: return self._cached_privileges except AttributeError: self._cached_privileges = Privilege.objects.filter( group__in=self.get_groups()) return self._cached_privileges def get_tools(self): """Get the tool list for this account""" return [tool for tool in self.accounttool_set.all().order_by('priority') if self.has_perm('web_access', tool.tool.uri)] def has_perm(self, action, target): """Checks if user has permission to do action on target.""" groups = self.get_groups() privileges = self.get_privileges() if AccountGroup.ADMIN_GROUP in groups: return True elif privileges.count() == 0: return False elif action == 'web_access': for privilege in privileges: regexp = re.compile(privilege.target) if regexp.search(target): return True return False else: return privileges.filter(target=target).count() > 0 def is_system_account(self): """Is this system (undeleteable) account?""" return self.id < 1000 def is_default_account(self): """Is this the anonymous user account?""" return self.id == self.DEFAULT_ACCOUNT def is_admin_account(self): """Is this the admin account?""" return self.id == self.ADMIN_ACCOUNT def is_admin(self): """Has this user administrator rights?""" return self.has_perm(None, None) @sensitive_variables('password') def set_password(self, password): """Sets user password. Copied from nav.db.navprofiles""" if password.strip(): pw_hash = nav.pwhash.Hash(password=password) self.password = str(pw_hash) else: self.password = '' @sensitive_variables('password') def check_password(self, password): """ Return True if the submitted authentication tokens are valid for this Account. In simpler terms; when password authentication is used, this method compares the given password with the one stored for this account and returns true if they are equal. If the stored password is blank, we interpret this as: 'The user is not allowed to log in' In the future, this could be extended to accept other types of authentication tokens, such as personal certificates or whatever. Copied from nav.db.navprofiles """ if not self.locked: try: stored_hash = self.password_hash except nav.pwhash.InvalidHashStringError: # Probably an old style NAV password hash, get out # of here and check it the old way pass else: return stored_hash.verify(password) if self.has_old_style_password_hash(): return self._verify_old_password_hash_and_rehash(password) else: return password == self.password else: return False def has_old_style_password_hash(self): """Returns True if this account has an old-style, insecure password hash""" return self.unlocked_password.startswith("md5") def has_plaintext_password(self): """Returns True if this account appears to contain a plain-text password""" if not self.has_old_style_password_hash(): try: self.password_hash except nav.pwhash.InvalidHashStringError: return True return False def has_deprecated_password_hash_method(self): """Returns True if this account's password is salted hash, but using a deprecated hashing method. """ if not (self.has_plaintext_password() or self.has_old_style_password_hash()): return self.password_hash.method != nav.pwhash.DEFAULT_METHOD return False @sensitive_variables('password') def _verify_old_password_hash_and_rehash(self, password): """Verifies an old-style MD5 password hash, and if there is a match, the password is re-hashed using the modern and more secure method. """ pw_hash = md5(password.encode("utf-8")) verified = pw_hash.hexdigest() == self.password[3:] if verified: self.set_password(password) if self.pk: Account.objects.filter(pk=self.pk).update(password=self.password) return verified @property def locked(self): return not self.password or self.password.startswith('!') @locked.setter def locked(self, value): if not value and self.password.startswith('!'): self.password = self.password[1:] elif value and not self.password.startswith('!'): self.password = '!' + self.password @property def password_hash(self): """Returns the Account's password as a Hash object""" stored_hash = nav.pwhash.Hash() stored_hash.set_hash(self.unlocked_password) return stored_hash @property def unlocked_password(self): """Returns the raw password value, but with any lock status stripped""" if not self.locked: return self.password or '' else: return self.password[1:] if self.password else '' def get_email_addresses(self): return self.alertaddress_set.filter(type__name=AlertSender.EMAIL) @python_2_unicode_compatible class AccountGroup(models.Model): """NAV account groups""" # FIXME other places in code that use similiar definitions should switch to # using this one. ADMIN_GROUP = 1 EVERYONE_GROUP = 2 AUTHENTICATED_GROUP = 3 name = VarcharField() description = VarcharField(db_column='descr') # FIXME this uses a view hack, was AccountInGroup accounts = models.ManyToManyField('Account') class Meta(object): db_table = u'accountgroup' ordering = ('name',) def __str__(self): return self.name def is_system_group(self): """Is this a system (undeleteable) group?""" return self.id < 1000 def is_protected_group(self): """Is this a protected group? Users cannot be removed from protected groups. """ return self.id in [self.EVERYONE_GROUP, self.AUTHENTICATED_GROUP] def is_admin_group(self): """Is this the administrators group?""" return self.id == self.ADMIN_GROUP @python_2_unicode_compatible class NavbarLink(models.Model): """A hyperlink on a user's navigation bar.""" account = models.ForeignKey( 'Account', on_delete=models.CASCADE, db_column='accountid' ) name = models.CharField('Link text', blank=False, max_length=100) uri = models.CharField('URL', blank=False, max_length=100) class Meta(object): db_table = u'navbarlink' ordering = ('id', ) def __str__(self): return '%s=%s' % (self.name, self.uri) @python_2_unicode_compatible class Privilege(models.Model): """A privilege granted to an AccountGroup.""" group = models.ForeignKey( 'AccountGroup', on_delete=models.CASCADE, db_column='accountgroupid' ) type = models.ForeignKey( 'PrivilegeType', on_delete=models.CASCADE, db_column='privilegeid' ) target = VarcharField() class Meta(object): db_table = u'accountgroupprivilege' def __str__(self): return '%s for %s' % (self.type, self.target) @python_2_unicode_compatible class PrivilegeType(models.Model): """A registered privilege type.""" id = models.AutoField(db_column='privilegeid', primary_key=True) name = models.CharField(max_length=30, db_column='privilegename') class Meta(object): db_table = u'privilege' def __str__(self): return self.name @python_2_unicode_compatible class AlertAddress(models.Model): """Accounts alert addresses, valid types are retrived from alertengine.conf """ DEBUG_MODE = False account = models.ForeignKey( 'Account', on_delete=models.CASCADE, db_column='accountid' ) type = models.ForeignKey( 'AlertSender', on_delete=models.CASCADE, db_column='type' ) address = VarcharField() class Meta(object): db_table = u'alertaddress' def __str__(self): return self.type.scheme() + self.address @transaction.atomic def send(self, alert, subscription): """Handles sending of alerts to with defined alert notification types Return value should indicate if message was sent""" _logger = logging.getLogger('nav.alertengine.alertaddress.send') # Determine the right language for the user. lang = self.account.preferences.get( Account.PREFERENCE_KEY_LANGUAGE, 'en') if not (self.address or '').strip(): _logger.error( 'Ignoring alert %d (%s: %s)! Account %s does not have an ' 'address set for the alertaddress with id %d, this needs ' 'to be fixed before the user will recieve any alerts.', alert.id, alert, alert.netbox, self.account, self.id) return True if self.type.is_blacklisted(): _logger.warning( 'Not sending alert %s to %s as handler %s is blacklisted: %s', alert.id, self.address, self.type, self.type.blacklist_reason()) return False try: self.type.send(self, alert, language=lang) _logger.info( 'alert %d sent by %s to %s due to %s subscription %d', alert.id, self.type, self.address, subscription.get_type_display(), subscription.id) except FatalDispatcherException as error: _logger.error( '%s raised a FatalDispatcherException indicating that the ' 'alert never will be sent: %s', self.type, error) raise except DispatcherException as error: _logger.error( '%s raised a DispatcherException indicating that an alert ' 'could not be sent at this time: %s', self.type, error) return False except Exception as error: _logger.exception( 'Unhandled error from %s (the handler has been blacklisted)', self.type) self.type.blacklist(error) return False return True @python_2_unicode_compatible class AlertSender(models.Model): """A registered alert sender/medium.""" name = models.CharField(max_length=100) handler = models.CharField(max_length=100) supported = models.BooleanField(default=True) _blacklist = {} _handlers = {} EMAIL = u'Email' SMS = u'SMS' SLACK = u'Slack' SCHEMES = { EMAIL: u'mailto:', SMS: u'sms:', SLACK: u'slack:' } def __str__(self): return self.name @transaction.atomic def send(self, *args, **kwargs): """Sends an alert via this medium.""" if not self.supported: raise FatalDispatcherException("{} is not supported".format(self.name)) if self.handler not in self._handlers: dispatcher_class = self._load_dispatcher_class() dispatcher = dispatcher_class( config=AlertSender.config.get(self.handler, {})) self._handlers[self.handler] = dispatcher else: dispatcher = self._handlers[self.handler] # Delegate sending of message return dispatcher.send(*args, **kwargs) def _load_dispatcher_class(self): # Get config if not hasattr(AlertSender, 'config'): AlertSender.config = get_alertengine_config('alertengine.conf') # Load module module = __import__( 'nav.alertengine.dispatchers.%s_dispatcher' % self.handler, globals(), locals(), [self.handler]) # Return matching object from module based on case-insensitive match namemap = {name.lower(): obj for name, obj in vars(module).items()} return namemap[self.handler.lower()] def blacklist(self, reason=None): """Blacklists this sender/medium from further alert dispatch.""" self.__class__._blacklist[self.handler] = reason def is_blacklisted(self): """Gets the blacklist status of this sender/medium.""" return self.handler in self.__class__._blacklist def blacklist_reason(self): """Gets the reason for a blacklist for this sender/medium""" return self.__class__._blacklist.get(self.handler, 'Unknown reason') def scheme(self): return self.SCHEMES.get(self.name, u'') class Meta(object): db_table = 'alertsender' @python_2_unicode_compatible class AlertPreference(models.Model): """AlertProfile account preferences""" account = models.OneToOneField( 'Account', primary_key=True, on_delete=models.CASCADE, db_column='accountid' ) active_profile = models.OneToOneField( 'AlertProfile', on_delete=models.CASCADE, db_column='activeprofile', null=True ) last_sent_day = models.DateTimeField(db_column='lastsentday') last_sent_week = models.DateTimeField(db_column='lastsentweek') class Meta(object): db_table = u'alertpreference' def __str__(self): return 'preferences for %s' % self.account ####################################################################### ### Profile models @python_2_unicode_compatible class AlertProfile(models.Model): """Account AlertProfiles""" # Weekday numbers follows date.weekday(), not day.isoweekday(). MONDAY = 0 TUESDAY = 1 WEDNESDAY = 2 THURSDAY = 3 FRIDAY = 4 SATURDAY = 5 SUNDAY = 6 VALID_WEEKDAYS = ( (MONDAY, _('monday')), (TUESDAY, _('tuesday')), (WEDNESDAY, _('wednesday')), (THURSDAY, _('thursday')), (FRIDAY, _('friday')), (SATURDAY, _('saturday')), (SUNDAY, _('sunday')), ) account = models.ForeignKey( 'Account', on_delete=models.CASCADE, db_column='accountid' ) name = VarcharField() daily_dispatch_time = models.TimeField(default='08:00') weekly_dispatch_day = models.IntegerField(choices=VALID_WEEKDAYS, default=MONDAY) weekly_dispatch_time = models.TimeField(default='08:00') class Meta(object): db_table = u'alertprofile' def __str__(self): return self.name def get_active_timeperiod(self): """Gets the currently active timeperiod for this profile""" # Could have been done with a ModelManager, but the logic # is somewhat tricky to do with the django ORM. _logger = logging.getLogger( 'nav.alertengine.alertprofile.get_active_timeperiod') now = datetime.now() # Limit our query to the correct type of time periods if now.isoweekday() in [6, 7]: valid_during = [TimePeriod.ALL_WEEK, TimePeriod.WEEKENDS] else: valid_during = [TimePeriod.ALL_WEEK, TimePeriod.WEEKDAYS] # The following code should get the currently active timeperiod. active_timeperiod = None timeperiods = list(self.timeperiod_set.filter( valid_during__in=valid_during).order_by('start')) # If the current time is before the start of the first time # period, the active time period is the last one (i.e. from # the day before) if timeperiods and timeperiods[0].start > now.time(): active_timeperiod = timeperiods[-1] else: for period in timeperiods: if period.start <= now.time(): active_timeperiod = period if active_timeperiod: _logger.debug("Active timeperiod for alertprofile %d is %s (%d)", self.id, active_timeperiod, active_timeperiod.id) else: _logger.debug("No active timeperiod for alertprofile %d", self.id) return active_timeperiod @python_2_unicode_compatible class TimePeriod(models.Model): """Defines TimerPeriods and which part of the week they are valid""" ALL_WEEK = 1 WEEKDAYS = 2 WEEKENDS = 3 VALID_DURING_CHOICES = ( (ALL_WEEK, _('all days')), (WEEKDAYS, _('weekdays')), (WEEKENDS, _('weekends')), ) profile = models.ForeignKey( 'AlertProfile', on_delete=models.CASCADE, db_column='alert_profile_id' ) start = models.TimeField(db_column='start_time', default='08:00') valid_during = models.IntegerField(choices=VALID_DURING_CHOICES, default=ALL_WEEK) class Meta(object): db_table = u'timeperiod' def __str__(self): return u'from %s for %s profile on %s' % ( self.start, self.profile, self.get_valid_during_display()) @python_2_unicode_compatible class AlertSubscription(models.Model): """Links an address and timeperiod to a filtergroup with a given subscription type. """ NOW = 0 DAILY = 1 WEEKLY = 2 NEXT = 3 SUBSCRIPTION_TYPES = ( (NOW, _('immediately')), (DAILY, _('daily at predefined time')), (WEEKLY, _('weekly at predefined time')), (NEXT, _('at end of timeperiod')), ) alert_address = models.ForeignKey( 'AlertAddress', on_delete=models.CASCADE, ) time_period = models.ForeignKey( 'TimePeriod', on_delete=models.CASCADE, ) filter_group = models.ForeignKey( 'FilterGroup', on_delete=models.CASCADE, ) type = models.IntegerField(db_column='subscription_type', choices=SUBSCRIPTION_TYPES, default=NOW) ignore_resolved_alerts = models.BooleanField(default=False) class Meta(object): db_table = u'alertsubscription' def delete(self): for a in self.accountalertqueue_set.all(): a.delete() super(AlertSubscription, self).delete() def __str__(self): return 'alerts received %s should be sent %s to %s' % ( self.time_period, self.get_type_display(), self.alert_address) ####################################################################### ### Equipment models @python_2_unicode_compatible class FilterGroupContent(models.Model): """Defines how a given filter should be used in a filtergroup""" # inc pos # Add | 1 | 1 | union in set theory # Sub | 0 | 1 | exclusion # And | 0 | 0 | intersection in set theory # Add inv. | 1 | 0 | complement of set # include and positive are used to decide how the match result of the # filter should be applied. the table above is an attempt at showing how # this should work. Add inv is really the only tricky one, basicly it is # nothing more that a negated add, ie if we have a filter that checks # severity > 70 using a add inv on it is equivilent til severity < 70. # The actual checking of the FilterGroup is done in the alertengine # subsystem in an attempt to keep most of the alerteninge code simple and # in one place. include = models.BooleanField(default=False) positive = models.BooleanField(default=False) priority = models.IntegerField() filter = models.ForeignKey( 'Filter', on_delete=models.CASCADE, ) filter_group = models.ForeignKey( 'FilterGroup', on_delete=models.CASCADE, ) class Meta(object): db_table = u'filtergroupcontent' ordering = ['priority'] def __str__(self): if self.include: type_ = 'inclusive' else: type_ = 'exclusive' if not self.positive: type_ = 'inverted %s' % type_ return '%s filter on %s' % (type_, self.filter) @python_2_unicode_compatible class Operator(models.Model): """Defines valid operators for a given matchfield.""" EQUALS = 0 GREATER = 1 GREATER_EQ = 2 LESS = 3 LESS_EQ = 4 NOT_EQUAL = 5 STARTSWITH = 6 ENDSWITH = 7 CONTAINS = 8 REGEXP = 9 WILDCARD = 10 IN = 11 # This list designates which operators are supported for any field. The # only major special case is IP's which are matched with special pg ip # operators where it makes sense, the rest of the operators are handeled # with plain text comaparisons against the result of text(ip) OPERATOR_TYPES = ( (EQUALS, _('equals')), (GREATER, _('is greater')), (GREATER_EQ, _('is greater or equal')), (LESS, _('is less')), (LESS_EQ, _('is less or equal')), (NOT_EQUAL, _('not equals')), (STARTSWITH, _('starts with')), (ENDSWITH, _('ends with')), (CONTAINS, _('contains')), (REGEXP, _('regexp')), (WILDCARD, _('wildcard (? og *)')), (IN, _('in')), ) # This is the mapping that is jused when we try querying the ORM to se if # filtes match. Note that wildcard is not here as it neeeds to be special # cased. OPERATOR_MAPPING = { EQUALS: '__exact', NOT_EQUAL: '', # exclusion is special-cased by Filter.check() GREATER: '__gt', GREATER_EQ: '__gte', LESS: '__lt', LESS_EQ: '__lte', STARTSWITH: '__istartswith', ENDSWITH: '__iendswith', CONTAINS: '__icontains', REGEXP: '__iregex', IN: '__in', } # The IpAddressField in django does not support ipv6 yet so the IP # datatype needs to be completly special cased. The following operator # mapping is used to achive this and expects that it will get '% field' IP_OPERATOR_MAPPING = { EQUALS: '%s = %%s', GREATER: '%s > %%s', GREATER_EQ: '%s >= %%s', LESS: '%s < %%s', LESS_EQ: '%s <= %%s', NOT_EQUAL: '%s <> %%s', CONTAINS: '%s >>= %%s', IN: '%s <<= %%s', WILDCARD: "host(%s) LIKE %%s", REGEXP: "host(%s) ~* %%s", STARTSWITH: "host(%s) ILIKE '%%%%' + %%s", ENDSWITH: "host(%s) ILIKE %%s + '%%%%'", } type = models.IntegerField(choices=OPERATOR_TYPES, db_column='operator_id') match_field = models.ForeignKey( 'MatchField', on_delete=models.CASCADE, ) class Meta(object): db_table = u'operator' unique_together = (('type', 'match_field'),) def __str__(self): return u'%s match on %s' % (self.get_type_display(), self.match_field) def get_operator_mapping(self): """Returns the Django query operator represented by this instance.""" return self.OPERATOR_MAPPING[self.type] def get_ip_operator_mapping(self): """Returns the SQL query IP operator represented by this instance.""" return self.IP_OPERATOR_MAPPING[self.type] @python_2_unicode_compatible class Expression(models.Model): """Combines filer, operator, matchfield and value into an expression that can be evaluated. """ filter = models.ForeignKey( 'Filter', on_delete=models.CASCADE, ) match_field = models.ForeignKey( 'MatchField', on_delete=models.CASCADE, ) operator = models.IntegerField(choices=Operator.OPERATOR_TYPES) value = VarcharField() class Meta(object): db_table = u'expression' def __str__(self): return '%s match on %s against %s' % (self.get_operator_display(), self.match_field, self.value) def get_operator_mapping(self): """Returns the Django query operator represented by this expression.""" return Operator(type=self.operator).get_operator_mapping() @python_2_unicode_compatible class Filter(models.Model): """One or more expressions that are combined with an and operation. Handles the actual construction of queries to be run taking into account special cases like the IP datatype and WILDCARD lookups.""" owner = models.ForeignKey( 'Account', on_delete=models.CASCADE, null=True ) name = VarcharField() class Meta(object): db_table = u'filter' def __str__(self): return self.name def verify(self, alert): """Combines expressions to an ORM query that will tell us if an alert matched. This function builds three dicts that are used in the ORM .filter() .exclude() and .extra() methods which finally gets a .count() as we only need to know if something matched. Running alertengine in debug mode will print the dicts to the logs. :type alert: nav.models.event.AlertQueue """ _logger = logging.getLogger('nav.alertengine.filter.check') filtr = {} exclude = {} extra = {'where': [], 'params': []} for expression in self.expression_set.all(): # Handle IP datatypes: if expression.match_field.data_type == MatchField.IP: # Trick the ORM into joining the tables we want lookup = ('%s__isnull' % expression.match_field.get_lookup_mapping()) filtr[lookup] = False where = Operator( type=expression.operator).get_ip_operator_mapping() if expression.operator in [Operator.IN, Operator.CONTAINS]: values = expression.value.split('|') where = ' OR '.join( [where % expression.match_field.value_id] * len(values)) extra['where'].append('(%s)' % where) extra['params'].extend(values) else: # Get the IP mapping and put in the field before adding it # to our where clause. extra['where'].append( where % expression.match_field.value_id) extra['params'].append(expression.value) # Include all sublocations when matching on location elif expression.match_field.name == 'Location': lookup = "{}__in".format(MatchField.FOREIGN_MAP[MatchField.LOCATION]) # Location only have two Operators (in and exact) so we handle # both with a split locations = Location.objects.filter( pk__in=expression.value.split('|')) # Find all descendants for locations in a totally readable way filtr[lookup] = list(set(itertools.chain( *[l.get_descendants(include_self=True) for l in locations]))) # Handle wildcard lookups which are not directly supported by # django (as far as i know) elif expression.operator == Operator.WILDCARD: # Trick the ORM into joining the tables we want lookup = ('%s__isnull' % expression.match_field.get_lookup_mapping()) filtr[lookup] = False extra['where'].append( '%s ILIKE %%s' % expression.match_field.value_id) extra['params'].append(expression.value) # Handle the plain lookups that we can do directly in ORM else: lookup = (expression.match_field.get_lookup_mapping() + expression.get_operator_mapping()) # Ensure that in and not equal are handeled correctly if expression.operator == Operator.IN: filtr[lookup] = expression.value.split('|') elif expression.operator == Operator.NOT_EQUAL: exclude[lookup] = expression.value else: filtr[lookup] = expression.value # Limit ourselves to our alert filtr['id'] = alert.id if not extra['where']: extra = {} _logger.debug( 'alert %d: checking against filter %d with filter: %s, exclude: ' '%s and extra: %s', alert.id, self.id, filtr, exclude, extra) # Check the alert maches whith a SELECT COUNT(*) FROM .... so that the # db doesn't have to work as much. if AlertQueue.objects.filter(**filtr).exclude(**exclude).extra( **extra).count(): _logger.debug('alert %d: matches filter %d', alert.id, self.id) return True _logger.debug('alert %d: did not match filter %d', alert.id, self.id) return False @python_2_unicode_compatible class FilterGroup(models.Model): """A set of filters group contents that an account can subscribe to or be given permission to. """ owner = models.ForeignKey( 'Account', on_delete=models.CASCADE, null=True ) name = VarcharField() description = VarcharField() group_permissions = models.ManyToManyField( 'AccountGroup', db_table='filtergroup_group_permission') class Meta(object): db_table = u'filtergroup' def __str__(self): return self.name @python_2_unicode_compatible class MatchField(models.Model): """Defines which fields can be matched upon and how""" STRING = 0 INTEGER = 1 IP = 2 # Due to the way alertengine has been reimpleneted the code only really # does stuff diffrently if datatype is set to IP, however setting datatype # still makes alot of sense in alertprofiles so that we can verify # userinput DATA_TYPES = ( (STRING, _('string')), (INTEGER, _('integer')), (IP, _('ip')), ) # This is a manualy mainted mapping between our model concepts and the # actual db tables that are in use. This is needed as our value_id is base # on this value. ALERT = 'alertq' ALERTTYPE = 'alerttype' ARP = 'arp' CAM = 'cam' CATEGORY = 'cat' NETBOXGROUP = 'netboxgroup' DEVICE = 'device' EVENT_TYPE = 'eventtype' LOCATION = 'location' MEMORY = 'mem' MODULE = 'module' NETBOX = 'netbox' NETBOXINFO = 'netboxinfo' ORGANIZATION = 'org' PREFIX = 'prefix' ROOM = 'room' SERVICE = 'service' INTERFACE = 'interface' TYPE = 'type' VENDOR = 'vendor' VLAN = 'vlan' USAGE = 'usage' LOOKUP_FIELDS = ( (ALERT, _('alert')), (ALERTTYPE, _('alert type')), (ARP, _('arp')), (CAM, _('cam')), (CATEGORY, _('category')), (NETBOXGROUP, _('netboxgroup')), (DEVICE, _('device')), (EVENT_TYPE, _('event type')), (LOCATION, _('location')), (MEMORY, _('memeroy')), (MODULE, _('module')), (NETBOX, _('netbox')), (NETBOXINFO, _('netbox info')), (ORGANIZATION, _('organization')), (PREFIX, _('prefix')), (ROOM, _('room')), (SERVICE, _('service')), (INTERFACE, _('Interface')), (TYPE, _('type')), (VENDOR, _('vendor')), (VLAN, _('vlan')), (USAGE, _('usage')), ) # This mapping designates how a MatchField relates to an alert. (yes the # formating is not PEP8, but it wouldn't be very readable otherwise) # Since we need to know how things are connected this has been done manualy FOREIGN_MAP = { ARP: 'netbox__arp', CAM: 'netbox__cam', CATEGORY: 'netbox__category', NETBOXGROUP: 'netbox__netboxcategory__category', DEVICE: 'netbox__device', EVENT_TYPE: 'event_type', LOCATION: 'netbox__room__location', MEMORY: 'netbox__memory', MODULE: 'netbox__module', NETBOX: 'netbox', NETBOXINFO: 'netbox__info', ORGANIZATION: 'netbox__organization', PREFIX: 'netbox__prefix', ROOM: 'netbox__room', SERVICE: 'netbox__service', INTERFACE: 'netbox__connected_to_interface', TYPE: 'netbox__type', USAGE: 'netbox__organization__vlan__usage', VENDOR: 'netbox__type__vendor', VLAN: 'netbox__organization__vlan', ALERT: '', # Checks alert object itself ALERTTYPE: 'alert_type', } # Build the mapping we need to be able to do checks. VALUE_MAP = {} CHOICES = [] MODEL_MAP = {} # This code loops over all the SUPPORTED_MODELS and gets the db_table and # db_column so that we can translate them into the correspinding attributes # on our django models. (field and model need to be set to None to avoid an # ugly side effect of field becoming an acctuall field on MatchField) for model in SUPPORTED_MODELS: for field in model._meta.fields: key = '%s.%s' % (model._meta.db_table, field.db_column or field.attname) value = '%s__%s' % (FOREIGN_MAP[model._meta.db_table], field.attname) VALUE_MAP[key] = field.attname CHOICES.append((key, value.lstrip('_'))) MODEL_MAP[key] = (model, field.attname) field = None model = None name = VarcharField() description = VarcharField(blank=True) value_help = VarcharField( blank=True, help_text=_(u'Help text for the match field. Displayed by the value ' u'input box in the GUI to help users enter sane values.') ) value_id = VarcharField( choices=CHOICES, help_text=_(u'The "match field". This is the actual database field ' u'alert engine will watch.') ) value_name = VarcharField( choices=CHOICES, blank=True, help_text=_(u'When "show list" is checked, the list will be populated ' u'with data from this column as well as the "value id" ' u'field. Does nothing else than provide a little more ' u'info for the users in the GUI.') ) value_sort = VarcharField( choices=CHOICES, blank=True, help_text=_(u'Options in the list will be ordered by this field (if ' u'not set, options will be ordered by primary key). Only ' u'does something when "Show list" is checked.') ) list_limit = models.IntegerField( blank=True, help_text=_(u'Only this many options will be available in the list. ' u'Only does something when "Show list" is checked.') ) data_type = models.IntegerField( choices=DATA_TYPES, help_text=_(u'The data type of the match field.') ) show_list = models.BooleanField( blank=True, default=False, help_text=_(u'If unchecked values can be entered into a text input. ' u'If checked values must be selected from a list ' u'populated by data from the match field selected above.') ) class Meta(object): db_table = u'matchfield' def __str__(self): return self.name def get_lookup_mapping(self): """Returns the field lookup represented by this MatchField.""" _logger = logging.getLogger( 'nav.alertengine.matchfield.get_lookup_mapping') try: foreign_lookup = self.FOREIGN_MAP[self.value_id.split('.')[0]] value = self.VALUE_MAP[self.value_id] if foreign_lookup: return '%s__%s' % (foreign_lookup, value) return value except KeyError: _logger.error( "Tried to lookup mapping for %s which is not supported", self.value_id) return None ####################################################################### ### AlertEngine models @python_2_unicode_compatible class SMSQueue(models.Model): """Queue of messages that should be sent or have been sent by SMSd""" SENT = 'Y' NOT_SENT = 'N' IGNORED = 'I' SENT_CHOICES = ( (SENT, _('sent')), (NOT_SENT, _('not sent yet')), (IGNORED, _('ignored')), ) account = models.ForeignKey( 'Account', on_delete=models.CASCADE, db_column='accountid', null=True ) time = models.DateTimeField(auto_now_add=True) phone = models.CharField(max_length=15) message = models.CharField(max_length=145, db_column='msg') sent = models.CharField(max_length=1, default=NOT_SENT, choices=SENT_CHOICES) sms_id = models.IntegerField(db_column='smsid') time_sent = models.DateTimeField(db_column='timesent') severity = models.IntegerField() class Meta(object): db_table = u'smsq' def __str__(self): return '"%s" to %s, sent: %s' % (self.message, self.phone, self.sent) def save(self, *args, **kwargs): """Overrides save to truncate long messages (max is 145)""" if len(self.message) > 142: self.message = self.message[:142] + '...' return super(SMSQueue, self).save(*args, **kwargs) class AccountAlertQueue(models.Model): """Defines which alerts should be keept around and sent at a later time""" account = models.ForeignKey( 'Account', on_delete=models.CASCADE, null=True ) subscription = models.ForeignKey( 'AlertSubscription', on_delete=models.CASCADE, null=True ) alert = models.ForeignKey( 'AlertQueue', on_delete=models.CASCADE, null=True ) insertion_time = models.DateTimeField(auto_now_add=True) class Meta(object): db_table = u'accountalertqueue' def delete(self, *args, **kwargs): """Deletes the alert from the user's alert queue. Also deletes the alert globally if not queued for anyone else. """ # TODO deleting items with the manager will not trigger this behaviour # cleaning up related messages. super(AccountAlertQueue, self).delete(*args, **kwargs) # Remove the alert from the AlertQueue if we are the last item # depending upon it. if self.alert.accountalertqueue_set.count() == 0: self.alert.delete() def send(self): """Sends the alert in question to the address in the subscription""" try: sent = self.subscription.alert_address.send(self.alert, self.subscription) except AlertSender.DoesNotExist: address = self.subscription.alert_address sender = address.type_id if sender is not None: raise Exception( "Invalid sender set for address %s, " "please check that %s is in profiles.alertsender" % (address, sender)) else: raise Exception( "No sender set for address %s, this might be due to a " "failed db upgrade from 3.4 to 3.5" % address) except AlertQueue.DoesNotExist: _logger = logging.getLogger( 'nav.alertengine.accountalertqueue.send') _logger.error(('Inconsistent database state, alertqueue entry %d ' + 'missing for account-alert. If you know how the ' + 'database got into this state please update ' + 'LP#494036'), self.alert_id) super(AccountAlertQueue, self).delete() return False except FatalDispatcherException: self.delete() return False if sent: self.delete() return sent # Make sure you update netmap-extras.js too if you change this! ;-) LINK_TYPES = (2, 'Layer 2'), (3, 'Layer 3') @python_2_unicode_compatible class NetmapView(models.Model): """Properties for a specific view in Netmap""" viewid = models.AutoField(primary_key=True) owner = models.ForeignKey( Account, on_delete=models.CASCADE, db_column='owner' ) title = models.TextField() description = models.TextField(null=True, blank=True) topology = models.IntegerField(choices=LINK_TYPES) # picke x,y,scale (translate(x,y) , scale(scale) zoom = models.CharField(max_length=255) last_modified = models.DateTimeField(auto_now_add=True) is_public = models.BooleanField(default=False) display_elinks = models.BooleanField(default=False) display_orphans = models.BooleanField(default=False) location_room_filter = models.CharField(max_length=255, blank=True) categories = models.ManyToManyField(Category, through='NetmapViewCategories', related_name='netmap_views') def __str__(self): return u'%s (%s)' % (self.viewid, self.title) def topology_unicode(self): return dict(LINK_TYPES).get(self.topology) def get_absolute_url(self): return "%s#/netmap/%s" % (reverse('netmap-index'), self.viewid) def get_set_defaultview_url(self): """URL for admin django view to set a default view""" return reverse('netmap-api-netmap-defaultview-global') class Meta(object): db_table = u'netmap_view' class NetmapViewDefaultView(models.Model): """Default view for each user""" id = models.AutoField(primary_key=True) view = models.ForeignKey( NetmapView, on_delete=models.CASCADE, db_column='viewid' ) owner = models.ForeignKey( Account, on_delete=models.CASCADE, db_column='ownerid' ) class Meta(object): db_table = u'netmap_view_defaultview' def __repr__(self): return "{name}{args!r}".format( name=self.__class__.__name__, args=(self.id, self.view, self.owner) ) @python_2_unicode_compatible class NetmapViewCategories(models.Model): """Saved categories for a selected view in Netmap""" id = models.AutoField(primary_key=True) # Serial for faking a primary key view = models.ForeignKey( NetmapView, on_delete=models.CASCADE, db_column='viewid', related_name='categories_set' ) category = models.ForeignKey( Category, on_delete=models.CASCADE, db_column='catid', related_name='netmapview_set' ) def __str__(self): return u'%s in category %s' % (self.view, self.category) class Meta(object): db_table = u'netmap_view_categories' unique_together = (('view', 'category'),) # Primary key class NetmapViewNodePosition(models.Model): """Saved positions for nodes for a selected view in Netmap""" id = models.AutoField(primary_key=True) # Serial for faking a primary key viewid = models.ForeignKey( NetmapView, on_delete=models.CASCADE, db_column='viewid', related_name='node_position_set' ) netbox = models.ForeignKey( Netbox, on_delete=models.CASCADE, db_column='netboxid', related_name='node_position_set' ) x = models.IntegerField() y = models.IntegerField() class Meta(object): db_table = u'netmap_view_nodeposition' @python_2_unicode_compatible class AccountTool(models.Model): """Link between tool and account""" id = models.AutoField(primary_key=True, db_column='account_tool_id') toolname = VarcharField() account = models.ForeignKey( Account, on_delete=models.CASCADE, db_column='accountid' ) display = models.BooleanField(default=True) priority = models.IntegerField(default=0) def __str__(self): return "%s - %s" % (self.toolname, self.account) class Meta(object): db_table = u'accounttool' @python_2_unicode_compatible class AccountDashboard(models.Model): """Stores dashboards for each user""" name = VarcharField() is_default = models.BooleanField(default=False) num_columns = models.IntegerField(default=3) account = models.ForeignKey( Account, on_delete=models.CASCADE, ) def __str__(self): return self.name def get_absolute_url(self): return reverse('dashboard-index-id', kwargs={'did': self.id}) def to_json_dict(self): data = { 'name': self.name, 'num_columns': self.num_columns, 'account': self.account_id, 'widgets': [], 'version': 1, } for widget in self.widgets.all(): data['widgets'].append(widget.to_json_dict()) return data class Meta(object): db_table = 'account_dashboard' ordering = ('name',) @python_2_unicode_compatible class AccountNavlet(models.Model): """Store information about a users navlets""" navlet = VarcharField() order = models.IntegerField(default=0, db_column='displayorder') account = models.ForeignKey( Account, on_delete=models.CASCADE, db_column='account' ) preferences = DictAsJsonField(null=True) column = models.IntegerField(db_column='col') dashboard = models.ForeignKey( AccountDashboard, on_delete=models.CASCADE, related_name='widgets' ) def __str__(self): return "%s - %s" % (self.navlet, self.account) def to_json_dict(self): return { 'navlet': self.navlet, 'preferences': self.preferences, 'column': self.column, 'order': self.order, } class Meta(object): db_table = 'account_navlet' ordering = ['order'] @python_2_unicode_compatible class ReportSubscription(models.Model): """Subscriptions for availability reports""" MONTH = 'month' WEEK = 'week' DAY = 'day' PERIODS = ((MONTH, 'monthly'), (WEEK, 'weekly'), (DAY, 'daily')) DEVICE = 'device' LINK = 'link' TYPES = ((DEVICE, 'device availability'), (LINK, 'link availability')) account = models.ForeignKey( Account, on_delete=models.CASCADE, ) address = models.ForeignKey( AlertAddress, on_delete=models.CASCADE, ) period = VarcharField(choices=PERIODS) report_type = VarcharField(choices=TYPES) exclude_maintenance = models.BooleanField() class Meta(object): db_table = u'report_subscription' def __str__(self): if self.report_type == self.LINK: return u"{} report for {} sent to {}".format( self.get_period_description(self.period), self.get_type_description(self.report_type), self.address.address) return u"{} report for {} ({} time in maintenance) sent to {}".format( self.get_period_description(self.period), self.get_type_description(self.report_type), 'excluding' if self.exclude_maintenance else 'including', self.address.address) def serialize(self): keys = ['report_type', 'period', 'address'] filtered = {k: v for k, v in model_to_dict(self).items() if k in keys} return json.dumps(filtered) @staticmethod def get_period_description(period): return next(v for k, v in ReportSubscription.PERIODS if k == period) @staticmethod def get_type_description(report_type): return next(v for k, v in ReportSubscription.TYPES if k == report_type)
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2010 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.util.concurrent; import com.google.common.testing.NullPointerTester; import com.google.common.testing.TearDownStack; import java.util.Random; import junit.framework.TestCase; import org.jspecify.annotations.NullUnmarked; /** * Tests for {@link Monitor}, either interruptible or uninterruptible. * * @author Justin T. Sampson */ @NullUnmarked public abstract class MonitorTestCase extends TestCase { public class TestGuard extends Monitor.Guard { private volatile boolean satisfied; public TestGuard(boolean satisfied) { super(MonitorTestCase.this.monitor); this.satisfied = satisfied; } @Override public boolean isSatisfied() { return this.satisfied; } public void setSatisfied(boolean satisfied) { this.satisfied = satisfied; } } private final boolean interruptible; private Monitor monitor; private final TearDownStack tearDownStack = new TearDownStack(); private TestThread<Monitor> thread1; private TestThread<Monitor> thread2; protected MonitorTestCase(boolean interruptible) { this.interruptible = interruptible; } @Override protected final void setUp() throws Exception { boolean fair = new Random().nextBoolean(); monitor = new Monitor(fair); tearDownStack.addTearDown(thread1 = new TestThread<>(monitor, "TestThread #1")); tearDownStack.addTearDown(thread2 = new TestThread<>(monitor, "TestThread #2")); } @Override protected final void tearDown() { tearDownStack.runTearDown(); } private String enter() { return interruptible ? "enterInterruptibly" : "enter"; } private String tryEnter() { return "tryEnter"; } private String enterIf() { return interruptible ? "enterIfInterruptibly" : "enterIf"; } private String tryEnterIf() { return "tryEnterIf"; } private String enterWhen() { return interruptible ? "enterWhen" : "enterWhenUninterruptibly"; } private String waitFor() { return interruptible ? "waitFor" : "waitForUninterruptibly"; } private String leave() { return "leave"; } public final void testMutualExclusion() throws Exception { thread1.callAndAssertReturns(enter()); thread2.callAndAssertBlocks(enter()); thread1.callAndAssertReturns(leave()); thread2.assertPriorCallReturns(enter()); } public final void testTryEnter() throws Exception { thread1.callAndAssertReturns(true, tryEnter()); thread2.callAndAssertReturns(false, tryEnter()); thread1.callAndAssertReturns(true, tryEnter()); thread2.callAndAssertReturns(false, tryEnter()); thread1.callAndAssertReturns(leave()); thread2.callAndAssertReturns(false, tryEnter()); thread1.callAndAssertReturns(leave()); thread2.callAndAssertReturns(true, tryEnter()); } public final void testSystemStateMethods() throws Exception { checkSystemStateMethods(0); thread1.callAndAssertReturns(enter()); checkSystemStateMethods(1); thread1.callAndAssertReturns(enter()); checkSystemStateMethods(2); thread1.callAndAssertReturns(leave()); checkSystemStateMethods(1); thread1.callAndAssertReturns(leave()); checkSystemStateMethods(0); } private void checkSystemStateMethods(int enterCount) throws Exception { thread1.callAndAssertReturns(enterCount != 0, "isOccupied"); thread1.callAndAssertReturns(enterCount != 0, "isOccupiedByCurrentThread"); thread1.callAndAssertReturns(enterCount, "getOccupiedDepth"); thread2.callAndAssertReturns(enterCount != 0, "isOccupied"); thread2.callAndAssertReturns(false, "isOccupiedByCurrentThread"); thread2.callAndAssertReturns(0, "getOccupiedDepth"); } public final void testEnterWhen_initiallyTrue() throws Exception { TestGuard guard = new TestGuard(true); thread1.callAndAssertReturns(enterWhen(), guard); } public final void testEnterWhen_initiallyFalse() throws Exception { TestGuard guard = new TestGuard(false); thread1.callAndAssertWaits(enterWhen(), guard); monitor.enter(); guard.setSatisfied(true); monitor.leave(); thread1.assertPriorCallReturns(enterWhen()); } public final void testEnterWhen_alreadyOccupied() throws Exception { TestGuard guard = new TestGuard(true); thread2.callAndAssertReturns(enter()); thread1.callAndAssertBlocks(enterWhen(), guard); thread2.callAndAssertReturns(leave()); thread1.assertPriorCallReturns(enterWhen()); } public final void testEnterIf_initiallyTrue() throws Exception { TestGuard guard = new TestGuard(true); thread1.callAndAssertReturns(true, enterIf(), guard); thread2.callAndAssertBlocks(enter()); } public final void testEnterIf_initiallyFalse() throws Exception { TestGuard guard = new TestGuard(false); thread1.callAndAssertReturns(false, enterIf(), guard); thread2.callAndAssertReturns(enter()); } public final void testEnterIf_alreadyOccupied() throws Exception { TestGuard guard = new TestGuard(true); thread2.callAndAssertReturns(enter()); thread1.callAndAssertBlocks(enterIf(), guard); thread2.callAndAssertReturns(leave()); thread1.assertPriorCallReturns(true, enterIf()); } public final void testTryEnterIf_initiallyTrue() throws Exception { TestGuard guard = new TestGuard(true); thread1.callAndAssertReturns(true, tryEnterIf(), guard); thread2.callAndAssertBlocks(enter()); } public final void testTryEnterIf_initiallyFalse() throws Exception { TestGuard guard = new TestGuard(false); thread1.callAndAssertReturns(false, tryEnterIf(), guard); thread2.callAndAssertReturns(enter()); } public final void testTryEnterIf_alreadyOccupied() throws Exception { TestGuard guard = new TestGuard(true); thread2.callAndAssertReturns(enter()); thread1.callAndAssertReturns(false, tryEnterIf(), guard); } public final void testWaitFor_initiallyTrue() throws Exception { TestGuard guard = new TestGuard(true); thread1.callAndAssertReturns(enter()); thread1.callAndAssertReturns(waitFor(), guard); } public final void testWaitFor_initiallyFalse() throws Exception { TestGuard guard = new TestGuard(false); thread1.callAndAssertReturns(enter()); thread1.callAndAssertWaits(waitFor(), guard); monitor.enter(); guard.setSatisfied(true); monitor.leave(); thread1.assertPriorCallReturns(waitFor()); } public final void testWaitFor_withoutEnter() throws Exception { TestGuard guard = new TestGuard(true); thread1.callAndAssertThrows(IllegalMonitorStateException.class, waitFor(), guard); } public void testNulls() { monitor.enter(); // Inhibit IllegalMonitorStateException new NullPointerTester() .setDefault(Monitor.Guard.class, new TestGuard(true)) .testAllPublicInstanceMethods(monitor); } // TODO: Test enter(long, TimeUnit). // TODO: Test enterWhen(Guard, long, TimeUnit). // TODO: Test enterIf(Guard, long, TimeUnit). // TODO: Test waitFor(Guard, long, TimeUnit). // TODO: Test getQueueLength(). // TODO: Test hasQueuedThreads(). // TODO: Test getWaitQueueLength(Guard). // TODO: Test automatic signaling before leave, waitFor, and reentrant enterWhen. // TODO: Test blocking to re-enter monitor after being signaled. // TODO: Test interrupts with both interruptible and uninterruptible monitor. // TODO: Test multiple waiters: If guard is still satisfied, signal next waiter. // TODO: Test multiple waiters: If guard is no longer satisfied, do not signal next waiter. }
java
github
https://github.com/google/guava
android/guava-tests/test/com/google/common/util/concurrent/MonitorTestCase.java
import datetime import os import logging import re import six from six.moves import cPickle import numpy as np import xarray as xr import pandas as pd import requests log=logging.getLogger('usgs_nwis') from ... import utils from .. import rdb from .common import periods try: import seawater except ImportError: seawater=None def nwis_dataset_collection(stations,*a,**k): """ Fetch from multiple stations, glue together to a combined dataset. The rest of the options are the same as for nwis_dataset(). Stations for which no data was found are omitted in the results. """ ds_per_site=[] for station in stations: ds=nwis_dataset(station,*a,**k) if ds is None: continue ds['site']=('site',),[station] ds_per_site.append(ds) # And now glue those all together, but no filling of gaps yet. # As cases of missing data come up, this will have to get smarter about padding # individual sites. if len(ds_per_site)==0: # Annoying, but if no stations exist, just return None return None collection=xr.concat( ds_per_site, dim='site') for ds in ds_per_site: ds.close() # free up FDs return collection def nwis_dataset(station,start_date,end_date,products, days_per_request='M',frequency='realtime', cache_dir=None,clip=True,cache_only=False, cache_no_data=False): """ Retrieval script for USGS waterdata.usgs.gov Retrieve one or more data products from a single station. station: string or numeric identifier for COOPS station. products: list of integers identifying the variable to retrieve. See usgs_parm_codes.tsv in the directory above this directory. start_date,end_date: period to retrieve, as python datetime, matplotlib datenum, or numpy datetime64. days_per_request: batch the requests to fetch smaller chunks at a time. if this is an integer, then chunks will start with start_date, then start_date+days_per_request, etc. if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex. so 'M' will request month-aligned chunks. this has the advantage that requests for different start dates will still be aligned to integer periods, and can reuse cached data. cache_dir: if specified, save each chunk as a netcdf file in this directory, with filenames that include the gage, period and products. The directory must already exist. clip: if True, then even if more data was fetched, return only the period requested. frequency: defaults to "realtime" which should correspond to the original sample frequency. Alternatively, "daily" which access daily average values. cache_only: only read from cache, not attempting to fetch any new data. cache_no_data: periods which successfully download but contain no data are recorded as empty files. Otherwise it is assumed that there may be a transient error, and nothing is written to cache. Do not use this for real-time retrievals, since it may cache no-data results from the future. returns an xarray dataset. Note that names of variables are inferred from parameter codes where possible, but this is not 100% accurate with respect to the descriptions provided in the rdb, notably "Discharge, cubic feet per second" may be reported as "stream_flow_mean_daily" """ start_date=utils.to_dt64(start_date) end_date=utils.to_dt64(end_date) params=dict(site_no=station, format='rdb') for prod in products: params['cb_%05d'%prod]='on' # Only for small requests of recent data: # base_url="https://waterdata.usgs.gov/nwis/uv" # Otherwise it redirects to here: if frequency=='realtime': base_url="https://nwis.waterdata.usgs.gov/usa/nwis/uv/" elif frequency=='daily': base_url="https://waterdata.usgs.gov/nwis/dv" else: raise Exception("Unknown frequency: %s"%(frequency)) params['period']='' # generator for dicing up the request period datasets=[] last_url=None for interval_start,interval_end in periods(start_date,end_date,days_per_request): params['begin_date']=utils.to_datetime(interval_start).strftime('%Y-%m-%d') params['end_date'] =utils.to_datetime(interval_end).strftime('%Y-%m-%d') # This is the base name for caching, but also a shorthand for reporting # issues with the user, since it already encapsulates most of the # relevant info in a single tidy string. base_fn="%s_%s_%s_%s.nc"%(station, "-".join(["%d"%p for p in products]), params['begin_date'], params['end_date']) if cache_dir is not None: cache_fn=os.path.join(cache_dir,base_fn) else: cache_fn=None if (cache_fn is not None) and os.path.exists(cache_fn): log.info("Cached %s -- %s"%(interval_start,interval_end)) if os.path.getsize(cache_fn)==0: # Cached no-data result log.warning(" cache for %s -- %s says no-data"%(interval_start,interval_end)) continue ds=xr.open_dataset(cache_fn) elif cache_only: log.info("Cache only - no data for %s -- %s"%(interval_start,interval_end)) continue else: log.info("Fetching %s"%(base_fn)) sesh = requests.Session() sesh.mount('https://', requests.adapters.HTTPAdapter(max_retries=3)) req=sesh.get(base_url,params=params) data=req.text ds=rdb.rdb_to_dataset(text=data) if ds is None: # There was no data there HERE - would like to have an option to record no data log.warning(" %s: no data found for this period"%base_fn) if (cache_fn is not None) and cache_no_data: log.warning(" %s: making zero-byte cache file"%base_fn) with open(cache_fn,'wb') as fp: pass continue ds.attrs['url']=req.url if cache_fn is not None: ds.to_netcdf(cache_fn) # USGS returns data inclusive of the requested date range - leading to some overlap if len(datasets): ds=ds.isel(time=ds.time>datasets[-1].time[-1]) datasets.append(ds) if len(datasets)==0: # could try to construct zero-length dataset, but that sounds like a pain # at the moment. log.warning(" no data for station %s for any periods!"%station) return None if len(datasets)>1: # it's possible that not all variables appear in all datasets # dataset=xr.concat( datasets, dim='time') dataset=datasets[0] for other in datasets[1:]: dataset=dataset.combine_first(other) for stale in datasets: stale.close() # maybe free up FDs? else: dataset=datasets[0] if clip: time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date) dataset=dataset.isel(time=time_sel) dataset.load() # force read into memory before closing files for d in datasets: d.close() for meta in ['datenum','tz_cd']: if meta in dataset.data_vars: dataset=dataset.set_coords(meta) return dataset def add_salinity(ds): assert seawater is not None for v in ds.data_vars: if v.startswith('specific_conductance'): salt_name=v.replace('specific_conductance','salinity') if salt_name not in ds: print("%s => %s"%(v,salt_name)) salt=seawater.eos80.salt(ds[v].values/1000. / seawater.constants.c3515, 25.0, # temperature - USGS adjusts to 25degC 0) # no pressure effects ds[salt_name]=ds[v].dims, salt def station_metadata(station,cache_dir=None): if cache_dir is not None: cache_fn=os.path.join(cache_dir,"meta-%s.pkl"%station) if os.path.exists(cache_fn): with open(cache_fn,'rb') as fp: meta=cPickle.load(fp) return meta url="https://waterdata.usgs.gov/nwis/inventory?agency_code=USGS&site_no=%s"%station resp=requests.get(url) m=re.search(r"Latitude\s+([.0-9&#;']+\")",resp.text) lat=m.group(1) m=re.search(r"Longitude\s+([.0-9&#;']+\")",resp.text) lon=m.group(1) def dms_to_dd(s): s=s.replace('&#176;',' ').replace('"',' ').replace("'"," ").strip() d,m,s =[float(p) for p in s.split()] return d + m/60. + s/3600. lat=dms_to_dd(lat) # no mention of west longitude, but can assume it is west. lon=-dms_to_dd(lon) meta=dict(lat=lat,lon=lon) if cache_dir is not None: with open(cache_fn,'wb') as fp: cPickle.dump(meta,fp) return meta
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * enum.c * I/O functions, operators, aggregates etc for enum types * * Copyright (c) 2006-2026, PostgreSQL Global Development Group * * * IDENTIFICATION * src/backend/utils/adt/enum.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/table.h" #include "catalog/pg_enum.h" #include "libpq/pqformat.h" #include "storage/procarray.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/syscache.h" #include "utils/typcache.h" static Oid enum_endpoint(Oid enumtypoid, ScanDirection direction); static ArrayType *enum_range_internal(Oid enumtypoid, Oid lower, Oid upper); /* * Disallow use of an uncommitted pg_enum tuple. * * We need to make sure that uncommitted enum values don't get into indexes. * If they did, and if we then rolled back the pg_enum addition, we'd have * broken the index because value comparisons will not work reliably without * an underlying pg_enum entry. (Note that removal of the heap entry * containing an enum value is not sufficient to ensure that it doesn't appear * in upper levels of indexes.) To do this we prevent an uncommitted row from * being used for any SQL-level purpose. This is stronger than necessary, * since the value might not be getting inserted into a table or there might * be no index on its column, but it's easy to enforce centrally. * * However, it's okay to allow use of uncommitted values belonging to enum * types that were themselves created in the same transaction, because then * any such index would also be new and would go away altogether on rollback. * We don't implement that fully right now, but we do allow free use of enum * values created during CREATE TYPE AS ENUM, which are surely of the same * lifespan as the enum type. (This case is required by "pg_restore -1".) * Values added by ALTER TYPE ADD VALUE are also allowed if the enum type * is known to have been created earlier in the same transaction. (Note that * we have to track that explicitly; comparing tuple xmins is insufficient, * because the type tuple might have been updated in the current transaction. * Subtransactions also create hazards to be accounted for; currently, * pg_enum.c only handles ADD VALUE at the outermost transaction level.) * * This function needs to be called (directly or indirectly) in any of the * functions below that could return an enum value to SQL operations. */ static void check_safe_enum_use(HeapTuple enumval_tup) { TransactionId xmin; Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enumval_tup); /* * If the row is hinted as committed, it's surely safe. This provides a * fast path for all normal use-cases. */ if (HeapTupleHeaderXminCommitted(enumval_tup->t_data)) return; /* * Usually, a row would get hinted as committed when it's read or loaded * into syscache; but just in case not, let's check the xmin directly. */ xmin = HeapTupleHeaderGetXmin(enumval_tup->t_data); if (!TransactionIdIsInProgress(xmin) && TransactionIdDidCommit(xmin)) return; /* * Check if the enum value is listed as uncommitted. If not, it's safe, * because it can't be shorter-lived than its owning type. (This'd also * be false for values made by other transactions; but the previous tests * should have handled all of those.) */ if (!EnumUncommitted(en->oid)) return; /* * There might well be other tests we could do here to narrow down the * unsafe conditions, but for now just raise an exception. */ ereport(ERROR, (errcode(ERRCODE_UNSAFE_NEW_ENUM_VALUE_USAGE), errmsg("unsafe use of new value \"%s\" of enum type %s", NameStr(en->enumlabel), format_type_be(en->enumtypid)), errhint("New enum values must be committed before they can be used."))); } /* Basic I/O support */ Datum enum_in(PG_FUNCTION_ARGS) { char *name = PG_GETARG_CSTRING(0); Oid enumtypoid = PG_GETARG_OID(1); Node *escontext = fcinfo->context; Oid enumoid; HeapTuple tup; /* must check length to prevent Assert failure within SearchSysCache */ if (strlen(name) >= NAMEDATALEN) ereturn(escontext, (Datum) 0, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); tup = SearchSysCache2(ENUMTYPOIDNAME, ObjectIdGetDatum(enumtypoid), CStringGetDatum(name)); if (!HeapTupleIsValid(tup)) ereturn(escontext, (Datum) 0, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); /* * Check it's safe to use in SQL. Perhaps we should take the trouble to * report "unsafe use" softly; but it's unclear that it's worth the * trouble, or indeed that that is a legitimate bad-input case at all * rather than an implementation shortcoming. */ check_safe_enum_use(tup); /* * This comes from pg_enum.oid and stores system oids in user tables. This * oid must be preserved by binary upgrades. */ enumoid = ((Form_pg_enum) GETSTRUCT(tup))->oid; ReleaseSysCache(tup); PG_RETURN_OID(enumoid); } Datum enum_out(PG_FUNCTION_ARGS) { Oid enumval = PG_GETARG_OID(0); char *result; HeapTuple tup; Form_pg_enum en; tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(enumval)); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("invalid internal value for enum: %u", enumval))); en = (Form_pg_enum) GETSTRUCT(tup); result = pstrdup(NameStr(en->enumlabel)); ReleaseSysCache(tup); PG_RETURN_CSTRING(result); } /* Binary I/O support */ Datum enum_recv(PG_FUNCTION_ARGS) { StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); Oid enumtypoid = PG_GETARG_OID(1); Oid enumoid; HeapTuple tup; char *name; int nbytes; name = pq_getmsgtext(buf, buf->len - buf->cursor, &nbytes); /* must check length to prevent Assert failure within SearchSysCache */ if (strlen(name) >= NAMEDATALEN) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); tup = SearchSysCache2(ENUMTYPOIDNAME, ObjectIdGetDatum(enumtypoid), CStringGetDatum(name)); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input value for enum %s: \"%s\"", format_type_be(enumtypoid), name))); /* check it's safe to use in SQL */ check_safe_enum_use(tup); enumoid = ((Form_pg_enum) GETSTRUCT(tup))->oid; ReleaseSysCache(tup); pfree(name); PG_RETURN_OID(enumoid); } Datum enum_send(PG_FUNCTION_ARGS) { Oid enumval = PG_GETARG_OID(0); StringInfoData buf; HeapTuple tup; Form_pg_enum en; tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(enumval)); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("invalid internal value for enum: %u", enumval))); en = (Form_pg_enum) GETSTRUCT(tup); pq_begintypsend(&buf); pq_sendtext(&buf, NameStr(en->enumlabel), strlen(NameStr(en->enumlabel))); ReleaseSysCache(tup); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } /* Comparison functions and related */ /* * enum_cmp_internal is the common engine for all the visible comparison * functions, except for enum_eq and enum_ne which can just check for OID * equality directly. */ static int enum_cmp_internal(Oid arg1, Oid arg2, FunctionCallInfo fcinfo) { TypeCacheEntry *tcache; /* * We don't need the typcache except in the hopefully-uncommon case that * one or both Oids are odd. This means that cursory testing of code that * fails to pass flinfo to an enum comparison function might not disclose * the oversight. To make such errors more obvious, Assert that we have a * place to cache even when we take a fast-path exit. */ Assert(fcinfo->flinfo != NULL); /* Equal OIDs are equal no matter what */ if (arg1 == arg2) return 0; /* Fast path: even-numbered Oids are known to compare correctly */ if ((arg1 & 1) == 0 && (arg2 & 1) == 0) { if (arg1 < arg2) return -1; else return 1; } /* Locate the typcache entry for the enum type */ tcache = (TypeCacheEntry *) fcinfo->flinfo->fn_extra; if (tcache == NULL) { HeapTuple enum_tup; Form_pg_enum en; Oid typeoid; /* Get the OID of the enum type containing arg1 */ enum_tup = SearchSysCache1(ENUMOID, ObjectIdGetDatum(arg1)); if (!HeapTupleIsValid(enum_tup)) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), errmsg("invalid internal value for enum: %u", arg1))); en = (Form_pg_enum) GETSTRUCT(enum_tup); typeoid = en->enumtypid; ReleaseSysCache(enum_tup); /* Now locate and remember the typcache entry */ tcache = lookup_type_cache(typeoid, 0); fcinfo->flinfo->fn_extra = tcache; } /* The remaining comparison logic is in typcache.c */ return compare_values_of_enum(tcache, arg1, arg2); } Datum enum_lt(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(enum_cmp_internal(a, b, fcinfo) < 0); } Datum enum_le(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(enum_cmp_internal(a, b, fcinfo) <= 0); } Datum enum_eq(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(a == b); } Datum enum_ne(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(a != b); } Datum enum_ge(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(enum_cmp_internal(a, b, fcinfo) >= 0); } Datum enum_gt(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_BOOL(enum_cmp_internal(a, b, fcinfo) > 0); } Datum enum_smaller(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_OID(enum_cmp_internal(a, b, fcinfo) < 0 ? a : b); } Datum enum_larger(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_OID(enum_cmp_internal(a, b, fcinfo) > 0 ? a : b); } Datum enum_cmp(PG_FUNCTION_ARGS) { Oid a = PG_GETARG_OID(0); Oid b = PG_GETARG_OID(1); PG_RETURN_INT32(enum_cmp_internal(a, b, fcinfo)); } /* Enum programming support functions */ /* * enum_endpoint: common code for enum_first/enum_last */ static Oid enum_endpoint(Oid enumtypoid, ScanDirection direction) { Relation enum_rel; Relation enum_idx; SysScanDesc enum_scan; HeapTuple enum_tuple; ScanKeyData skey; Oid minmax; /* * Find the first/last enum member using pg_enum_typid_sortorder_index. * Note we must not use the syscache. See comments for RenumberEnumType * in catalog/pg_enum.c for more info. */ ScanKeyInit(&skey, Anum_pg_enum_enumtypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(enumtypoid)); enum_rel = table_open(EnumRelationId, AccessShareLock); enum_idx = index_open(EnumTypIdSortOrderIndexId, AccessShareLock); enum_scan = systable_beginscan_ordered(enum_rel, enum_idx, NULL, 1, &skey); enum_tuple = systable_getnext_ordered(enum_scan, direction); if (HeapTupleIsValid(enum_tuple)) { /* check it's safe to use in SQL */ check_safe_enum_use(enum_tuple); minmax = ((Form_pg_enum) GETSTRUCT(enum_tuple))->oid; } else { /* should only happen with an empty enum */ minmax = InvalidOid; } systable_endscan_ordered(enum_scan); index_close(enum_idx, AccessShareLock); table_close(enum_rel, AccessShareLock); return minmax; } Datum enum_first(PG_FUNCTION_ARGS) { Oid enumtypoid; Oid min; /* * We rely on being able to get the specific enum type from the calling * expression tree. Notice that the actual value of the argument isn't * examined at all; in particular it might be NULL. */ enumtypoid = get_fn_expr_argtype(fcinfo->flinfo, 0); if (enumtypoid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("could not determine actual enum type"))); /* Get the OID using the index */ min = enum_endpoint(enumtypoid, ForwardScanDirection); if (!OidIsValid(min)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("enum %s contains no values", format_type_be(enumtypoid)))); PG_RETURN_OID(min); } Datum enum_last(PG_FUNCTION_ARGS) { Oid enumtypoid; Oid max; /* * We rely on being able to get the specific enum type from the calling * expression tree. Notice that the actual value of the argument isn't * examined at all; in particular it might be NULL. */ enumtypoid = get_fn_expr_argtype(fcinfo->flinfo, 0); if (enumtypoid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("could not determine actual enum type"))); /* Get the OID using the index */ max = enum_endpoint(enumtypoid, BackwardScanDirection); if (!OidIsValid(max)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("enum %s contains no values", format_type_be(enumtypoid)))); PG_RETURN_OID(max); } /* 2-argument variant of enum_range */ Datum enum_range_bounds(PG_FUNCTION_ARGS) { Oid lower; Oid upper; Oid enumtypoid; if (PG_ARGISNULL(0)) lower = InvalidOid; else lower = PG_GETARG_OID(0); if (PG_ARGISNULL(1)) upper = InvalidOid; else upper = PG_GETARG_OID(1); /* * We rely on being able to get the specific enum type from the calling * expression tree. The generic type mechanism should have ensured that * both are of the same type. */ enumtypoid = get_fn_expr_argtype(fcinfo->flinfo, 0); if (enumtypoid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("could not determine actual enum type"))); PG_RETURN_ARRAYTYPE_P(enum_range_internal(enumtypoid, lower, upper)); } /* 1-argument variant of enum_range */ Datum enum_range_all(PG_FUNCTION_ARGS) { Oid enumtypoid; /* * We rely on being able to get the specific enum type from the calling * expression tree. Notice that the actual value of the argument isn't * examined at all; in particular it might be NULL. */ enumtypoid = get_fn_expr_argtype(fcinfo->flinfo, 0); if (enumtypoid == InvalidOid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("could not determine actual enum type"))); PG_RETURN_ARRAYTYPE_P(enum_range_internal(enumtypoid, InvalidOid, InvalidOid)); } static ArrayType * enum_range_internal(Oid enumtypoid, Oid lower, Oid upper) { ArrayType *result; Relation enum_rel; Relation enum_idx; SysScanDesc enum_scan; HeapTuple enum_tuple; ScanKeyData skey; Datum *elems; int max, cnt; bool left_found; /* * Scan the enum members in order using pg_enum_typid_sortorder_index. * Note we must not use the syscache. See comments for RenumberEnumType * in catalog/pg_enum.c for more info. */ ScanKeyInit(&skey, Anum_pg_enum_enumtypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(enumtypoid)); enum_rel = table_open(EnumRelationId, AccessShareLock); enum_idx = index_open(EnumTypIdSortOrderIndexId, AccessShareLock); enum_scan = systable_beginscan_ordered(enum_rel, enum_idx, NULL, 1, &skey); max = 64; elems = (Datum *) palloc(max * sizeof(Datum)); cnt = 0; left_found = !OidIsValid(lower); while (HeapTupleIsValid(enum_tuple = systable_getnext_ordered(enum_scan, ForwardScanDirection))) { Oid enum_oid = ((Form_pg_enum) GETSTRUCT(enum_tuple))->oid; if (!left_found && lower == enum_oid) left_found = true; if (left_found) { /* check it's safe to use in SQL */ check_safe_enum_use(enum_tuple); if (cnt >= max) { max *= 2; elems = (Datum *) repalloc(elems, max * sizeof(Datum)); } elems[cnt++] = ObjectIdGetDatum(enum_oid); } if (OidIsValid(upper) && upper == enum_oid) break; } systable_endscan_ordered(enum_scan); index_close(enum_idx, AccessShareLock); table_close(enum_rel, AccessShareLock); /* and build the result array */ /* note this hardwires some details about the representation of Oid */ result = construct_array(elems, cnt, enumtypoid, sizeof(Oid), true, TYPALIGN_INT); pfree(elems); return result; }
c
github
https://github.com/postgres/postgres
src/backend/utils/adt/enum.c
import collections import subprocess import warnings from . import compat from . import protocols from . import transports from .coroutines import coroutine from .log import logger class BaseSubprocessTransport(transports.SubprocessTransport): def __init__(self, loop, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=None, extra=None, **kwargs): super().__init__(extra) self._closed = False self._protocol = protocol self._loop = loop self._proc = None self._pid = None self._returncode = None self._exit_waiters = [] self._pending_calls = collections.deque() self._pipes = {} self._finished = False if stdin == subprocess.PIPE: self._pipes[0] = None if stdout == subprocess.PIPE: self._pipes[1] = None if stderr == subprocess.PIPE: self._pipes[2] = None # Create the child process: set the _proc attribute try: self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, **kwargs) except: self.close() raise self._pid = self._proc.pid self._extra['subprocess'] = self._proc if self._loop.get_debug(): if isinstance(args, (bytes, str)): program = args else: program = args[0] logger.debug('process %r created: pid %s', program, self._pid) self._loop.create_task(self._connect_pipes(waiter)) def __repr__(self): info = [self.__class__.__name__] if self._closed: info.append('closed') if self._pid is not None: info.append('pid=%s' % self._pid) if self._returncode is not None: info.append('returncode=%s' % self._returncode) elif self._pid is not None: info.append('running') else: info.append('not started') stdin = self._pipes.get(0) if stdin is not None: info.append('stdin=%s' % stdin.pipe) stdout = self._pipes.get(1) stderr = self._pipes.get(2) if stdout is not None and stderr is stdout: info.append('stdout=stderr=%s' % stdout.pipe) else: if stdout is not None: info.append('stdout=%s' % stdout.pipe) if stderr is not None: info.append('stderr=%s' % stderr.pipe) return '<%s>' % ' '.join(info) def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): raise NotImplementedError def set_protocol(self, protocol): self._protocol = protocol def get_protocol(self): return self._protocol def is_closing(self): return self._closed def close(self): if self._closed: return self._closed = True for proto in self._pipes.values(): if proto is None: continue proto.pipe.close() if (self._proc is not None # the child process finished? and self._returncode is None # the child process finished but the transport was not notified yet? and self._proc.poll() is None ): if self._loop.get_debug(): logger.warning('Close running child process: kill %r', self) try: self._proc.kill() except ProcessLookupError: pass # Don't clear the _proc reference yet: _post_init() may still run # On Python 3.3 and older, objects with a destructor part of a reference # cycle are never destroyed. It's not more the case on Python 3.4 thanks # to the PEP 442. if compat.PY34: def __del__(self): if not self._closed: warnings.warn("unclosed transport %r" % self, ResourceWarning) self.close() def get_pid(self): return self._pid def get_returncode(self): return self._returncode def get_pipe_transport(self, fd): if fd in self._pipes: return self._pipes[fd].pipe else: return None def _check_proc(self): if self._proc is None: raise ProcessLookupError() def send_signal(self, signal): self._check_proc() self._proc.send_signal(signal) def terminate(self): self._check_proc() self._proc.terminate() def kill(self): self._check_proc() self._proc.kill() @coroutine def _connect_pipes(self, waiter): try: proc = self._proc loop = self._loop if proc.stdin is not None: _, pipe = yield from loop.connect_write_pipe( lambda: WriteSubprocessPipeProto(self, 0), proc.stdin) self._pipes[0] = pipe if proc.stdout is not None: _, pipe = yield from loop.connect_read_pipe( lambda: ReadSubprocessPipeProto(self, 1), proc.stdout) self._pipes[1] = pipe if proc.stderr is not None: _, pipe = yield from loop.connect_read_pipe( lambda: ReadSubprocessPipeProto(self, 2), proc.stderr) self._pipes[2] = pipe assert self._pending_calls is not None loop.call_soon(self._protocol.connection_made, self) for callback, data in self._pending_calls: loop.call_soon(callback, *data) self._pending_calls = None except Exception as exc: if waiter is not None and not waiter.cancelled(): waiter.set_exception(exc) else: if waiter is not None and not waiter.cancelled(): waiter.set_result(None) def _call(self, cb, *data): if self._pending_calls is not None: self._pending_calls.append((cb, data)) else: self._loop.call_soon(cb, *data) def _pipe_connection_lost(self, fd, exc): self._call(self._protocol.pipe_connection_lost, fd, exc) self._try_finish() def _pipe_data_received(self, fd, data): self._call(self._protocol.pipe_data_received, fd, data) def _process_exited(self, returncode): assert returncode is not None, returncode assert self._returncode is None, self._returncode if self._loop.get_debug(): logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode if self._proc.returncode is None: # asyncio uses a child watcher: copy the status into the Popen # object. On Python 3.6, it is required to avoid a ResourceWarning. self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() # wake up futures waiting for wait() for waiter in self._exit_waiters: if not waiter.cancelled(): waiter.set_result(returncode) self._exit_waiters = None @coroutine def _wait(self): """Wait until the process exit and return the process return code. This method is a coroutine.""" if self._returncode is not None: return self._returncode waiter = self._loop.create_future() self._exit_waiters.append(waiter) return (yield from waiter) def _try_finish(self): assert not self._finished if self._returncode is None: return if all(p is not None and p.disconnected for p in self._pipes.values()): self._finished = True self._call(self._call_connection_lost, None) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: self._loop = None self._proc = None self._protocol = None class WriteSubprocessPipeProto(protocols.BaseProtocol): def __init__(self, proc, fd): self.proc = proc self.fd = fd self.pipe = None self.disconnected = False def connection_made(self, transport): self.pipe = transport def __repr__(self): return ('<%s fd=%s pipe=%r>' % (self.__class__.__name__, self.fd, self.pipe)) def connection_lost(self, exc): self.disconnected = True self.proc._pipe_connection_lost(self.fd, exc) self.proc = None def pause_writing(self): self.proc._protocol.pause_writing() def resume_writing(self): self.proc._protocol.resume_writing() class ReadSubprocessPipeProto(WriteSubprocessPipeProto, protocols.Protocol): def data_received(self, data): self.proc._pipe_data_received(self.fd, data)
unknown
codeparrot/codeparrot-clean
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova.objects import instance from nova.objects import security_group from nova.tests.objects import test_objects fake_secgroup = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 1, 'name': 'fake-name', 'description': 'fake-desc', 'user_id': 'fake-user', 'project_id': 'fake-project', } class _TestSecurityGroupObject(object): def _fix_deleted(self, db_secgroup): # NOTE(danms): Account for the difference in 'deleted' return dict(db_secgroup.items(), deleted=False) def test_get(self): self.mox.StubOutWithMock(db, 'security_group_get') db.security_group_get(self.context, 1).AndReturn(fake_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup.get(self.context, 1) self.assertEqual(self._fix_deleted(fake_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) self.assertRemotes() def test_get_by_name(self): self.mox.StubOutWithMock(db, 'security_group_get_by_name') db.security_group_get_by_name(self.context, 'fake-project', 'fake-name').AndReturn(fake_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup.get_by_name(self.context, 'fake-project', 'fake-name') self.assertEqual(self._fix_deleted(fake_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) self.assertRemotes() def test_in_use(self): self.mox.StubOutWithMock(db, 'security_group_in_use') db.security_group_in_use(self.context, 123).AndReturn(True) self.mox.ReplayAll() secgroup = security_group.SecurityGroup() secgroup.id = 123 self.assertTrue(secgroup.in_use(self.context)) self.assertRemotes() def test_save(self): self.mox.StubOutWithMock(db, 'security_group_update') updated_secgroup = dict(fake_secgroup, project_id='changed') db.security_group_update(self.context, 1, {'description': 'foobar'}).AndReturn( updated_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( security_group.SecurityGroup(), fake_secgroup) secgroup.description = 'foobar' secgroup.save(self.context) self.assertEqual(self._fix_deleted(updated_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) self.assertRemotes() def test_save_no_changes(self): self.mox.StubOutWithMock(db, 'security_group_update') self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( security_group.SecurityGroup(), fake_secgroup) secgroup.save(self.context) def test_refresh(self): updated_secgroup = dict(fake_secgroup, description='changed') self.mox.StubOutWithMock(db, 'security_group_get') db.security_group_get(self.context, 1).AndReturn(updated_secgroup) self.mox.ReplayAll() secgroup = security_group.SecurityGroup._from_db_object( security_group.SecurityGroup(), fake_secgroup) secgroup.refresh(self.context) self.assertEqual(self._fix_deleted(updated_secgroup), dict(secgroup.items())) self.assertEqual(secgroup.obj_what_changed(), set()) self.assertRemotes() class TestSecurityGroupObject(test_objects._LocalTest, _TestSecurityGroupObject): pass class TestSecurityGroupObjectRemote(test_objects._RemoteTest, _TestSecurityGroupObject): pass fake_secgroups = [ dict(fake_secgroup, id=1, name='secgroup1'), dict(fake_secgroup, id=2, name='secgroup2'), ] class _TestSecurityGroupListObject(object): def test_get_all(self): self.mox.StubOutWithMock(db, 'security_group_get_all') db.security_group_get_all(self.context).AndReturn(fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_all(self.context) for i in range(len(fake_secgroups)): self.assertTrue(isinstance(secgroup_list[i], security_group.SecurityGroup)) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) self.assertEqual(secgroup_list[i]._context, self.context) def test_get_by_project(self): self.mox.StubOutWithMock(db, 'security_group_get_by_project') db.security_group_get_by_project(self.context, 'fake-project').AndReturn( fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_by_project( self.context, 'fake-project') for i in range(len(fake_secgroups)): self.assertTrue(isinstance(secgroup_list[i], security_group.SecurityGroup)) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) def test_get_by_instance(self): inst = instance.Instance() inst.uuid = 'fake-inst-uuid' self.mox.StubOutWithMock(db, 'security_group_get_by_instance') db.security_group_get_by_instance(self.context, 'fake-inst-uuid').AndReturn( fake_secgroups) self.mox.ReplayAll() secgroup_list = security_group.SecurityGroupList.get_by_instance( self.context, inst) for i in range(len(fake_secgroups)): self.assertTrue(isinstance(secgroup_list[i], security_group.SecurityGroup)) self.assertEqual(fake_secgroups[i]['id'], secgroup_list[i]['id']) class TestSecurityGroupListObject(test_objects._LocalTest, _TestSecurityGroupListObject): pass class TestSecurityGroupListObjectRemote(test_objects._RemoteTest, _TestSecurityGroupListObject): pass
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Classes for hadoop-tos ops. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "YARN", "Hive"}) @InterfaceStability.Evolving package org.apache.hadoop.fs.tosfs.ops; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
java
github
https://github.com/apache/hadoop
hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/package-info.java
#!/usr/bin/env bash set -eux platform="$(uname)" function setup() { if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then ifconfig lo0 existing=$(ifconfig lo0 | grep '^[[:blank:]]inet 127\.0\.0\. ' || true) echo "${existing}" for i in 3 4 254; do ip="127.0.0.${i}" if [[ "${existing}" != *"${ip}"* ]]; then ifconfig lo0 alias "${ip}" up fi done ifconfig lo0 fi } function teardown() { if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then for i in 3 4 254; do ip="127.0.0.${i}" if [[ "${existing}" != *"${ip}"* ]]; then ifconfig lo0 -alias "${ip}" fi done ifconfig lo0 fi } setup trap teardown EXIT ANSIBLE_SSH_ARGS='-C -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null' \ ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_delegate_to.yml -i inventory -v "$@" # this test is not doing what it says it does, also relies on var that should not be available #ansible-playbook test_loop_control.yml -v "$@" ansible-playbook test_delegate_to_loop_randomness.yml -i inventory -v "$@" ansible-playbook delegate_and_nolog.yml -i inventory -v "$@" ansible-playbook delegate_facts_block.yml -i inventory -v "$@" ansible-playbook test_delegate_to_loop_caching.yml -i inventory -v "$@" # ensure we are using correct settings when delegating ANSIBLE_TIMEOUT=3 ansible-playbook delegate_vars_handling.yml -i inventory -i delegate_vars_inventory -v | tee out if grep '{{ hostip }}' out; then echo 'Callback displayed the ansible_host template instead of the rendered value.' exit 1 fi ansible-playbook has_hostvars.yml -i inventory -v "$@" # test ansible_x_interpreter # python source virtualenv.sh ( cd "${OUTPUT_DIR}"/venv/bin ln -s python firstpython ln -s python secondpython ) ansible-playbook verify_interpreter.yml -i inventory_interpreters -v "$@" ansible-playbook discovery_applied.yml -i inventory -v "$@" ansible-playbook resolve_vars.yml -i inventory -v "$@" ansible-playbook test_delegate_to_lookup_context.yml -i inventory -v "$@" ansible-playbook delegate_local_from_root.yml -i inventory -v "$@" -e 'ansible_user=root' ansible-playbook delegate_with_fact_from_delegate_host.yml "$@" ansible-playbook delegate_facts_loop.yml -i inventory -v "$@" ansible-playbook test_random_delegate_to_with_loop.yml -i inventory -v "$@" ansible-playbook test_delegated_async.yml -v "$@" # Run playbook multiple times to ensure there are no false-negatives for i in $(seq 0 10); do ansible-playbook test_random_delegate_to_without_loop.yml -i inventory -v "$@"; done;
unknown
github
https://github.com/ansible/ansible
test/integration/targets/delegate_to/runme.sh
from django.conf import settings from django.contrib.sessions.backends.base import SessionBase from django.core import signing class SessionStore(SessionBase): def load(self): """ We load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raises BadSignature if signature fails. """ try: return signing.loads( self.session_key, serializer=self.serializer, # This doesn't handle non-default expiry dates, see #19201 max_age=settings.SESSION_COOKIE_AGE, salt='django.contrib.sessions.backends.signed_cookies', ) except Exception: # BadSignature, ValueError, or unpickling exceptions. If any of # these happen, reset the session. self.create() return {} def create(self): """ To create a new key, we simply make sure that the modified flag is set so that the cookie is set on the client for the current request. """ self.modified = True def save(self, must_create=False): """ To save, we get the session key as a securely signed string and then set the modified flag so that the cookie is set on the client for the current request. """ self._session_key = self._get_session_key() self.modified = True def exists(self, session_key=None): """ This method makes sense when you're talking to a shared resource, but it doesn't matter when you're storing the information in the client's cookie. """ return False def delete(self, session_key=None): """ To delete, we clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request. """ self._session_key = '' self._session_cache = {} self.modified = True def cycle_key(self): """ Keeps the same data but with a new key. To do this, we just have to call ``save()`` and it will automatically save a cookie with a new key at the end of the request. """ self.save() def _get_session_key(self): """ Most session backends don't need to override this method, but we do, because instead of generating a random string, we want to actually generate a secure url-safe Base64-encoded string of data as our session key. """ return signing.dumps( self._session, compress=True, salt='django.contrib.sessions.backends.signed_cookies', serializer=self.serializer, ) @classmethod def clear_expired(cls): pass
unknown
codeparrot/codeparrot-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.buildings', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration] module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'], import_from_module='ns.propagation') ## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration] module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'], import_from_module='ns.propagation') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## box.h (module 'mobility'): ns3::Box [class] module.add_class('Box', import_from_module='ns.mobility') ## box.h (module 'mobility'): ns3::Box::Side [enumeration] module.add_enum('Side', ['RIGHT', 'LEFT', 'TOP', 'BOTTOM', 'UP', 'DOWN'], outer_class=root_module['ns3::Box'], import_from_module='ns.mobility') ## building-container.h (module 'buildings'): ns3::BuildingContainer [class] module.add_class('BuildingContainer') ## building-list.h (module 'buildings'): ns3::BuildingList [class] module.add_class('BuildingList') ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper [class] module.add_class('BuildingsHelper') ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper [class] module.add_class('ConstantVelocityHelper', import_from_module='ns.mobility') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## vector.h (module 'core'): ns3::Vector2D [class] module.add_class('Vector2D', import_from_module='ns.core') ## vector.h (module 'core'): ns3::Vector3D [class] module.add_class('Vector3D', import_from_module='ns.core') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator [class] module.add_class('PositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class] module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator [class] module.add_class('RandomBoxPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator [class] module.add_class('RandomBuildingPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator [class] module.add_class('RandomDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class] module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator [class] module.add_class('RandomRectanglePositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator [class] module.add_class('RandomRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class] module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator [class] module.add_class('SameRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class] module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class] module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator [class] module.add_class('UniformDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## box.h (module 'mobility'): ns3::BoxChecker [class] module.add_class('BoxChecker', import_from_module='ns.mobility', parent=root_module['ns3::AttributeChecker']) ## box.h (module 'mobility'): ns3::BoxValue [class] module.add_class('BoxValue', import_from_module='ns.mobility', parent=root_module['ns3::AttributeValue']) ## building.h (module 'buildings'): ns3::Building [class] module.add_class('Building', parent=root_module['ns3::Object']) ## building.h (module 'buildings'): ns3::Building::BuildingType_t [enumeration] module.add_enum('BuildingType_t', ['Residential', 'Office', 'Commercial'], outer_class=root_module['ns3::Building']) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t [enumeration] module.add_enum('ExtWallsType_t', ['Wood', 'ConcreteWithWindows', 'ConcreteWithoutWindows', 'StoneBlocks'], outer_class=root_module['ns3::Building']) ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel [class] module.add_class('BuildingsPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class] module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class] module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator [class] module.add_class('GridBuildingAllocator', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator [class] module.add_class('GridPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType [enumeration] module.add_enum('LayoutType', ['ROW_FIRST', 'COLUMN_FIRST'], outer_class=root_module['ns3::GridPositionAllocator'], import_from_module='ns.mobility') ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel [class] module.add_class('HybridBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel [class] module.add_class('ItuR1238PropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator [class] module.add_class('ListPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class] module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class] module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## mobility-model.h (module 'mobility'): ns3::MobilityModel [class] module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class] module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel [class] module.add_class('OhBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector2DChecker [class] module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector2DValue [class] module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector3DChecker [class] module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector3DValue [class] module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## buildings-mobility-model.h (module 'buildings'): ns3::BuildingsMobilityModel [class] module.add_class('BuildingsMobilityModel', parent=root_module['ns3::MobilityModel']) typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue') typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*') typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector') typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*') typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker') typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*') typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Box_methods(root_module, root_module['ns3::Box']) register_Ns3BuildingContainer_methods(root_module, root_module['ns3::BuildingContainer']) register_Ns3BuildingList_methods(root_module, root_module['ns3::BuildingList']) register_Ns3BuildingsHelper_methods(root_module, root_module['ns3::BuildingsHelper']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ConstantVelocityHelper_methods(root_module, root_module['ns3::ConstantVelocityHelper']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PositionAllocator_methods(root_module, root_module['ns3::PositionAllocator']) register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel']) register_Ns3RandomBoxPositionAllocator_methods(root_module, root_module['ns3::RandomBoxPositionAllocator']) register_Ns3RandomBuildingPositionAllocator_methods(root_module, root_module['ns3::RandomBuildingPositionAllocator']) register_Ns3RandomDiscPositionAllocator_methods(root_module, root_module['ns3::RandomDiscPositionAllocator']) register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel']) register_Ns3RandomRectanglePositionAllocator_methods(root_module, root_module['ns3::RandomRectanglePositionAllocator']) register_Ns3RandomRoomPositionAllocator_methods(root_module, root_module['ns3::RandomRoomPositionAllocator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel']) register_Ns3SameRoomPositionAllocator_methods(root_module, root_module['ns3::SameRoomPositionAllocator']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel']) register_Ns3UniformDiscPositionAllocator_methods(root_module, root_module['ns3::UniformDiscPositionAllocator']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BoxChecker_methods(root_module, root_module['ns3::BoxChecker']) register_Ns3BoxValue_methods(root_module, root_module['ns3::BoxValue']) register_Ns3Building_methods(root_module, root_module['ns3::Building']) register_Ns3BuildingsPropagationLossModel_methods(root_module, root_module['ns3::BuildingsPropagationLossModel']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel']) register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3GridBuildingAllocator_methods(root_module, root_module['ns3::GridBuildingAllocator']) register_Ns3GridPositionAllocator_methods(root_module, root_module['ns3::GridPositionAllocator']) register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, root_module['ns3::HybridBuildingsPropagationLossModel']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ItuR1238PropagationLossModel_methods(root_module, root_module['ns3::ItuR1238PropagationLossModel']) register_Ns3ListPositionAllocator_methods(root_module, root_module['ns3::ListPositionAllocator']) register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel']) register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel']) register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OhBuildingsPropagationLossModel_methods(root_module, root_module['ns3::OhBuildingsPropagationLossModel']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BuildingsMobilityModel_methods(root_module, root_module['ns3::BuildingsMobilityModel']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Box_methods(root_module, cls): cls.add_output_stream_operator() ## box.h (module 'mobility'): ns3::Box::Box(ns3::Box const & arg0) [copy constructor] cls.add_constructor([param('ns3::Box const &', 'arg0')]) ## box.h (module 'mobility'): ns3::Box::Box(double _xMin, double _xMax, double _yMin, double _yMax, double _zMin, double _zMax) [constructor] cls.add_constructor([param('double', '_xMin'), param('double', '_xMax'), param('double', '_yMin'), param('double', '_yMax'), param('double', '_zMin'), param('double', '_zMax')]) ## box.h (module 'mobility'): ns3::Box::Box() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::Vector ns3::Box::CalculateIntersection(ns3::Vector const & current, ns3::Vector const & speed) const [member function] cls.add_method('CalculateIntersection', 'ns3::Vector', [param('ns3::Vector const &', 'current'), param('ns3::Vector const &', 'speed')], is_const=True) ## box.h (module 'mobility'): ns3::Box::Side ns3::Box::GetClosestSide(ns3::Vector const & position) const [member function] cls.add_method('GetClosestSide', 'ns3::Box::Side', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): bool ns3::Box::IsInside(ns3::Vector const & position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): ns3::Box::xMax [variable] cls.add_instance_attribute('xMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::xMin [variable] cls.add_instance_attribute('xMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMax [variable] cls.add_instance_attribute('yMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMin [variable] cls.add_instance_attribute('yMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMax [variable] cls.add_instance_attribute('zMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMin [variable] cls.add_instance_attribute('zMin', 'double', is_const=False) return def register_Ns3BuildingContainer_methods(root_module, cls): ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::BuildingContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingContainer const &', 'arg0')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer() [constructor] cls.add_constructor([]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::Ptr<ns3::Building> building) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(std::string buildingName) [constructor] cls.add_constructor([param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::BuildingContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::BuildingContainer', 'other')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(std::string buildingName) [member function] cls.add_method('Add', 'void', [param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::BuildingContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'i')], is_const=True) ## building-container.h (module 'buildings'): static ns3::BuildingContainer ns3::BuildingContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::BuildingContainer', [], is_static=True) ## building-container.h (module 'buildings'): uint32_t ns3::BuildingContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3BuildingList_methods(root_module, cls): ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList() [constructor] cls.add_constructor([]) ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList(ns3::BuildingList const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingList const &', 'arg0')]) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Building >', 'building')], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static ns3::Ptr<ns3::Building> ns3::BuildingList::GetBuilding(uint32_t n) [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'n')], is_static=True) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::GetNBuildings() [member function] cls.add_method('GetNBuildings', 'uint32_t', [], is_static=True) return def register_Ns3BuildingsHelper_methods(root_module, cls): ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper() [constructor] cls.add_constructor([]) ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper(ns3::BuildingsHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingsHelper const &', 'arg0')]) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeConsistent(ns3::Ptr<ns3::BuildingsMobilityModel> bmm) [member function] cls.add_method('MakeConsistent', 'void', [param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'bmm')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeMobilityModelConsistent() [member function] cls.add_method('MakeMobilityModelConsistent', 'void', [], is_static=True) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3ConstantVelocityHelper_methods(root_module, cls): ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::ConstantVelocityHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConstantVelocityHelper const &', 'arg0')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper() [constructor] cls.add_constructor([]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position, ns3::Vector const & vel) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position'), param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetCurrentPosition() const [member function] cls.add_method('GetCurrentPosition', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetVelocity() const [member function] cls.add_method('GetVelocity', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Pause() [member function] cls.add_method('Pause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetPosition(ns3::Vector const & position) [member function] cls.add_method('SetPosition', 'void', [param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetVelocity(ns3::Vector const & vel) [member function] cls.add_method('SetVelocity', 'void', [param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Unpause() [member function] cls.add_method('Unpause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Update() const [member function] cls.add_method('Update', 'void', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Rectangle const & rectangle) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Rectangle const &', 'rectangle')], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Box const & bounds) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Box const &', 'bounds')], is_const=True) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function] cls.add_method('IsIpv4MappedAddress', 'bool', []) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Vector2D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector2D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) return def register_Ns3Vector3D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::z [variable] cls.add_instance_attribute('z', 'double', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator(ns3::PositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::PositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::PositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::PositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3PropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function] cls.add_method('SetNext', 'void', [param('ns3::Ptr< ns3::PropagationLossModel >', 'next')]) ## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function] cls.add_method('GetNext', 'ns3::Ptr< ns3::PropagationLossModel >', []) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('CalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3RandomBoxPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator(ns3::RandomBoxPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBoxPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomBoxPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomBoxPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomBoxPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetZ(ns3::Ptr<ns3::RandomVariableStream> z) [member function] cls.add_method('SetZ', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'z')]) return def register_Ns3RandomBuildingPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator(ns3::RandomBuildingPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBuildingPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomBuildingPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomBuildingPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomBuildingPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator(ns3::RandomDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetRho(ns3::Ptr<ns3::RandomVariableStream> rho) [member function] cls.add_method('SetRho', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetTheta(ns3::Ptr<ns3::RandomVariableStream> theta) [member function] cls.add_method('SetTheta', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'theta')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3RandomPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3RandomRectanglePositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator(ns3::RandomRectanglePositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRectanglePositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomRectanglePositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomRectanglePositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomRectanglePositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) return def register_Ns3RandomRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator(ns3::RandomRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomRoomPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3RangePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3SameRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::SameRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::SameRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::NodeContainer c) [constructor] cls.add_constructor([param('ns3::NodeContainer', 'c')]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::SameRoomPositionAllocator::AssignStreams(int64_t arg0) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'arg0')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::SameRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::SameRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::FreezeResolution() [member function] cls.add_method('FreezeResolution', 'void', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double frequency, double speed) [member function] cls.add_method('SetLambda', 'void', [param('double', 'frequency'), param('double', 'speed')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double lambda) [member function] cls.add_method('SetLambda', 'void', [param('double', 'lambda')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function] cls.add_method('SetHeightAboveZ', 'void', [param('double', 'heightAboveZ')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3UniformDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator(ns3::UniformDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::UniformDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::UniformDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::UniformDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::UniformDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetRho(double rho) [member function] cls.add_method('SetRho', 'void', [param('double', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3BoxChecker_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker(ns3::BoxChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxChecker const &', 'arg0')]) return def register_Ns3BoxValue_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxValue::BoxValue() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::BoxValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxValue const &', 'arg0')]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::Box const & value) [constructor] cls.add_constructor([param('ns3::Box const &', 'value')]) ## box.h (module 'mobility'): ns3::Ptr<ns3::AttributeValue> ns3::BoxValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## box.h (module 'mobility'): bool ns3::BoxValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## box.h (module 'mobility'): ns3::Box ns3::BoxValue::Get() const [member function] cls.add_method('Get', 'ns3::Box', [], is_const=True) ## box.h (module 'mobility'): std::string ns3::BoxValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## box.h (module 'mobility'): void ns3::BoxValue::Set(ns3::Box const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Box const &', 'value')]) return def register_Ns3Building_methods(root_module, cls): ## building.h (module 'buildings'): ns3::Building::Building(ns3::Building const & arg0) [copy constructor] cls.add_constructor([param('ns3::Building const &', 'arg0')]) ## building.h (module 'buildings'): ns3::Building::Building(double xMin, double xMax, double yMin, double yMax, double zMin, double zMax) [constructor] cls.add_constructor([param('double', 'xMin'), param('double', 'xMax'), param('double', 'yMin'), param('double', 'yMax'), param('double', 'zMin'), param('double', 'zMax')]) ## building.h (module 'buildings'): ns3::Building::Building() [constructor] cls.add_constructor([]) ## building.h (module 'buildings'): void ns3::Building::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True) ## building.h (module 'buildings'): ns3::Box ns3::Building::GetBoundaries() const [member function] cls.add_method('GetBoundaries', 'ns3::Box', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::BuildingType_t ns3::Building::GetBuildingType() const [member function] cls.add_method('GetBuildingType', 'ns3::Building::BuildingType_t', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t ns3::Building::GetExtWallsType() const [member function] cls.add_method('GetExtWallsType', 'ns3::Building::ExtWallsType_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetFloor(ns3::Vector position) const [member function] cls.add_method('GetFloor', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint32_t ns3::Building::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNFloors() const [member function] cls.add_method('GetNFloors', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsX() const [member function] cls.add_method('GetNRoomsX', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsY() const [member function] cls.add_method('GetNRoomsY', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomX(ns3::Vector position) const [member function] cls.add_method('GetRoomX', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomY(ns3::Vector position) const [member function] cls.add_method('GetRoomY', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): static ns3::TypeId ns3::Building::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building.h (module 'buildings'): bool ns3::Building::IsInside(ns3::Vector position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): void ns3::Building::SetBoundaries(ns3::Box box) [member function] cls.add_method('SetBoundaries', 'void', [param('ns3::Box', 'box')]) ## building.h (module 'buildings'): void ns3::Building::SetBuildingType(ns3::Building::BuildingType_t t) [member function] cls.add_method('SetBuildingType', 'void', [param('ns3::Building::BuildingType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetExtWallsType(ns3::Building::ExtWallsType_t t) [member function] cls.add_method('SetExtWallsType', 'void', [param('ns3::Building::ExtWallsType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetNFloors(uint16_t nfloors) [member function] cls.add_method('SetNFloors', 'void', [param('uint16_t', 'nfloors')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsX(uint16_t nroomx) [member function] cls.add_method('SetNRoomsX', 'void', [param('uint16_t', 'nroomx')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsY(uint16_t nroomy) [member function] cls.add_method('SetNRoomsY', 'void', [param('uint16_t', 'nroomy')]) return def register_Ns3BuildingsPropagationLossModel_methods(root_module, cls): ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel::BuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::BuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## buildings-propagation-loss-model.h (module 'buildings'): int64_t ns3::BuildingsPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='protected', is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::EvaluateSigma(ns3::Ptr<ns3::BuildingsMobilityModel> a, ns3::Ptr<ns3::BuildingsMobilityModel> b) const [member function] cls.add_method('EvaluateSigma', 'double', [param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'a'), param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::ExternalWallLoss(ns3::Ptr<ns3::BuildingsMobilityModel> a) const [member function] cls.add_method('ExternalWallLoss', 'double', [param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'a')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetShadowing(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetShadowing', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::HeightLoss(ns3::Ptr<ns3::BuildingsMobilityModel> n) const [member function] cls.add_method('HeightLoss', 'double', [param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'n')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::InternalWallsLoss(ns3::Ptr<ns3::BuildingsMobilityModel> a, ns3::Ptr<ns3::BuildingsMobilityModel> b) const [member function] cls.add_method('InternalWallsLoss', 'double', [param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'a'), param('ns3::Ptr< ns3::BuildingsMobilityModel >', 'b')], is_const=True, visibility='protected') return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('uint64_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function] cls.add_method('Interpolate', 'double', [param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')], visibility='private', is_virtual=True) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function] cls.add_method('Validate', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3FixedRssLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function] cls.add_method('SetRss', 'void', [param('double', 'rss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3FriisPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double frequency, double speed) [member function] cls.add_method('SetLambda', 'void', [param('double', 'frequency'), param('double', 'speed')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double lambda) [member function] cls.add_method('SetLambda', 'void', [param('double', 'lambda')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3GridBuildingAllocator_methods(root_module, cls): ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator(ns3::GridBuildingAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridBuildingAllocator const &', 'arg0')]) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator() [constructor] cls.add_constructor([]) ## building-allocator.h (module 'buildings'): ns3::BuildingContainer ns3::GridBuildingAllocator::Create(uint32_t n) const [member function] cls.add_method('Create', 'ns3::BuildingContainer', [param('uint32_t', 'n')], is_const=True) ## building-allocator.h (module 'buildings'): static ns3::TypeId ns3::GridBuildingAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building-allocator.h (module 'buildings'): void ns3::GridBuildingAllocator::SetBuildingAttribute(std::string n, ns3::AttributeValue const & v) [member function] cls.add_method('SetBuildingAttribute', 'void', [param('std::string', 'n'), param('ns3::AttributeValue const &', 'v')]) return def register_Ns3GridPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator(ns3::GridPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::GridPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaX() const [member function] cls.add_method('GetDeltaX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaY() const [member function] cls.add_method('GetDeltaY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType ns3::GridPositionAllocator::GetLayoutType() const [member function] cls.add_method('GetLayoutType', 'ns3::GridPositionAllocator::LayoutType', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinX() const [member function] cls.add_method('GetMinX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinY() const [member function] cls.add_method('GetMinY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): uint32_t ns3::GridPositionAllocator::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::GridPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::GridPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaX(double deltaX) [member function] cls.add_method('SetDeltaX', 'void', [param('double', 'deltaX')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaY(double deltaY) [member function] cls.add_method('SetDeltaY', 'void', [param('double', 'deltaY')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetLayoutType(ns3::GridPositionAllocator::LayoutType layoutType) [member function] cls.add_method('SetLayoutType', 'void', [param('ns3::GridPositionAllocator::LayoutType', 'layoutType')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinX(double xMin) [member function] cls.add_method('SetMinX', 'void', [param('double', 'xMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinY(double yMin) [member function] cls.add_method('SetMinY', 'void', [param('double', 'yMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetN(uint32_t n) [member function] cls.add_method('SetN', 'void', [param('uint32_t', 'n')]) return def register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, cls): ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::HybridBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel::HybridBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetEnvironment(ns3::EnvironmentType env) [member function] cls.add_method('SetEnvironment', 'void', [param('ns3::EnvironmentType', 'env')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetCitySize(ns3::CitySize size) [member function] cls.add_method('SetCitySize', 'void', [param('ns3::CitySize', 'size')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetFrequency(double freq) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'freq')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetRooftopHeight(double rooftopHeight) [member function] cls.add_method('SetRooftopHeight', 'void', [param('double', 'rooftopHeight')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): double ns3::HybridBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ItuR1238PropagationLossModel_methods(root_module, cls): ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel::ItuR1238PropagationLossModel() [constructor] cls.add_constructor([]) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::ItuR1238PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): int64_t ns3::ItuR1238PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3ListPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator(ns3::ListPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): void ns3::ListPositionAllocator::Add(ns3::Vector v) [member function] cls.add_method('Add', 'void', [param('ns3::Vector', 'v')]) ## position-allocator.h (module 'mobility'): int64_t ns3::ListPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::ListPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::ListPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function] cls.add_method('SetPathLossExponent', 'void', [param('double', 'n')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function] cls.add_method('GetPathLossExponent', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function] cls.add_method('SetReference', 'void', [param('double', 'referenceDistance'), param('double', 'referenceLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3MatrixPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function] cls.add_method('SetLoss', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double arg0) [member function] cls.add_method('SetDefaultLoss', 'void', [param('double', 'arg0')]) ## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3MobilityModel_methods(root_module, cls): ## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')]) ## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor] cls.add_constructor([]) ## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<const ns3::MobilityModel> position) const [member function] cls.add_method('GetDistanceFrom', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'position')], is_const=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function] cls.add_method('GetPosition', 'ns3::Vector', [], is_const=True) ## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<const ns3::MobilityModel> other) const [member function] cls.add_method('GetRelativeSpeed', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'other')], is_const=True) ## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function] cls.add_method('GetVelocity', 'ns3::Vector', [], is_const=True) ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function] cls.add_method('SetPosition', 'void', [param('ns3::Vector const &', 'position')]) ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function] cls.add_method('NotifyCourseChange', 'void', [], is_const=True, visibility='protected') ## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::DoAssignStreams(int64_t start) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'start')], visibility='private', is_virtual=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function] cls.add_method('DoGetPosition', 'ns3::Vector', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function] cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function] cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OhBuildingsPropagationLossModel_methods(root_module, cls): ## oh-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::OhBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel::OhBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## oh-buildings-propagation-loss-model.h (module 'buildings'): double ns3::OhBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3Vector2DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')]) return def register_Ns3Vector2DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector2D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector2D const &', 'value')]) return def register_Ns3Vector3DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')]) return def register_Ns3Vector3DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector3D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector3D const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3BuildingsMobilityModel_methods(root_module, cls): ## buildings-mobility-model.h (module 'buildings'): ns3::BuildingsMobilityModel::BuildingsMobilityModel(ns3::BuildingsMobilityModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingsMobilityModel const &', 'arg0')]) ## buildings-mobility-model.h (module 'buildings'): ns3::BuildingsMobilityModel::BuildingsMobilityModel() [constructor] cls.add_constructor([]) ## buildings-mobility-model.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::BuildingsMobilityModel::GetBuilding() [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', []) ## buildings-mobility-model.h (module 'buildings'): uint8_t ns3::BuildingsMobilityModel::GetFloorNumber() [member function] cls.add_method('GetFloorNumber', 'uint8_t', []) ## buildings-mobility-model.h (module 'buildings'): uint8_t ns3::BuildingsMobilityModel::GetRoomNumberX() [member function] cls.add_method('GetRoomNumberX', 'uint8_t', []) ## buildings-mobility-model.h (module 'buildings'): uint8_t ns3::BuildingsMobilityModel::GetRoomNumberY() [member function] cls.add_method('GetRoomNumberY', 'uint8_t', []) ## buildings-mobility-model.h (module 'buildings'): static ns3::TypeId ns3::BuildingsMobilityModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## buildings-mobility-model.h (module 'buildings'): bool ns3::BuildingsMobilityModel::IsIndoor() [member function] cls.add_method('IsIndoor', 'bool', []) ## buildings-mobility-model.h (module 'buildings'): bool ns3::BuildingsMobilityModel::IsOutdoor() [member function] cls.add_method('IsOutdoor', 'bool', []) ## buildings-mobility-model.h (module 'buildings'): void ns3::BuildingsMobilityModel::SetIndoor(ns3::Ptr<ns3::Building> building, uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function] cls.add_method('SetIndoor', 'void', [param('ns3::Ptr< ns3::Building >', 'building'), param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')]) ## buildings-mobility-model.h (module 'buildings'): void ns3::BuildingsMobilityModel::SetOutdoor() [member function] cls.add_method('SetOutdoor', 'void', []) ## buildings-mobility-model.h (module 'buildings'): void ns3::BuildingsMobilityModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) ## buildings-mobility-model.h (module 'buildings'): ns3::Vector ns3::BuildingsMobilityModel::DoGetPosition() const [member function] cls.add_method('DoGetPosition', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True) ## buildings-mobility-model.h (module 'buildings'): ns3::Vector ns3::BuildingsMobilityModel::DoGetVelocity() const [member function] cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True) ## buildings-mobility-model.h (module 'buildings'): void ns3::BuildingsMobilityModel::DoSetPosition(ns3::Vector const & position) [member function] cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], visibility='private', is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
import operator from numpy.fft.helper import fftshift, ifftshift, fftfreq import scipy.fft._pocketfft.helper as _helper import numpy as np __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] def rfftfreq(n, d=1.0): """DFT sample frequencies (for usage with rfft, irfft). The returned float array contains the frequency bins in cycles/unit (with zero at the start) given a window length `n` and a sample spacing `d`:: f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd Parameters ---------- n : int Window length. d : scalar, optional Sample spacing. Default is 1. Returns ------- out : ndarray The array of length `n`, containing the sample frequencies. Examples -------- >>> from scipy import fftpack >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> sig_fft = fftpack.rfft(sig) >>> n = sig_fft.size >>> timestep = 0.1 >>> freq = fftpack.rfftfreq(n, d=timestep) >>> freq array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) """ n = operator.index(n) if n < 0: raise ValueError("n = %s is not valid. " "n must be a nonnegative integer." % n) return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d) def next_fast_len(target): """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> rng = np.random.default_rng() >>> min_len = 10007 # prime length is worst case for speed >>> a = rng.standard_normal(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ # Real transforms use regular sizes so this is backwards compatible return _helper.good_size(target, True) def _good_shape(x, shape, axes): """Ensure that shape argument is valid for scipy.fftpack scipy.fftpack does not support len(shape) < x.ndim when axes is not given. """ if shape is not None and axes is None: shape = _helper._iterable_of_int(shape, 'shape') if len(shape) != np.ndim(x): raise ValueError("when given, axes and shape arguments" " have to be of the same length") return shape
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2014, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from distutils.core import setup import pages setup( name='djaodjin-pages', version=pages.__version__, author='The DjaoDjin Team', author_email='support@djaodjin.com', packages=[ 'pages', 'pages.api', 'pages.urls'], package_data={'pages': [ 'static/vendor/css/*', 'static/vendor/js/*', 'templates/pages/*.html']}, license='BSD', description='Pages Django app', )
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Transformed Distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import distribution as distributions from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.python.ops import math_ops _condition_kwargs_dict = { "bijector_kwargs": ("Python dictionary of arg names/values " "forwarded to the bijector."), "distribution_kwargs": ("Python dictionary of arg names/values " "forwarded to the distribution."), } class TransformedDistribution(distributions.Distribution): """A Transformed Distribution. A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`, and a deterministic, invertible, differentiable transform, `Y = g(X)`. The transform is typically an instance of the `Bijector` class and the base distribution is typically an instance of the `Distribution` class. A `Bijector` is expected to implement the following functions: - `forward`, - `inverse`, - `inverse_log_det_jacobian`. The semantics of these functions are outlined in the `Bijector` documentation. Shapes, type, and reparameterization are taken from the base distribution. Write `P(Y=y)` for cumulative density function of random variable (rv) `Y` and `p` for its derivative wrt to `Y`. Assume that `Y=g(X)` where `g` is continuous and `X=g^{-1}(Y)`. Write `J` for the Jacobian (of some function). A `TransformedDistribution` alters the input/outputs of a `Distribution` associated with rv `X` in the following ways: * `sample`: Mathematically: ```none Y = g(X) ``` Programmatically: ```python return bijector.forward(distribution.sample(...)) ``` * `log_prob`: Mathematically: ```none (log o p o g^{-1})(y) + (log o det o J o g^{-1})(y) ``` Programmatically: ```python return (bijector.inverse_log_det_jacobian(x) + distribution.log_prob(bijector.inverse(x)) ``` * `log_cdf`: Mathematically: ```none (log o P o g^{-1})(y) ``` Programmatically: ```python return distribution.log_prob(bijector.inverse(x)) ``` * and similarly for: `cdf`, `prob`, `log_survival_function`, `survival_function`. A simple example constructing a Log-Normal distribution from a Normal distribution: ```python ds = tf.contrib.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(mu=mu, sigma=sigma), bijector=ds.bijector.Exp(), name="LogNormalTransformedDistribution") ``` A `LogNormal` made from callables: ```python ds = tf.contrib.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(mu=mu, sigma=sigma), bijector=ds.bijector.Inline( forward_fn=tf.exp, inverse_fn=tf.log, inverse_log_det_jacobian_fn=( lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)), name="LogNormalTransformedDistribution") ``` Another example constructing a Normal from a StandardNormal: ```python ds = tf.contrib.distributions normal = ds.TransformedDistribution( distribution=ds.Normal(mu=0, sigma=1), bijector=ds.bijector.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0), name="NormalTransformedDistribution") ``` """ def __init__(self, distribution, bijector, validate_args=False, name=None): """Construct a Transformed Distribution. Args: distribution: The base distribution instance to transform. Typically an instance of `Distribution`. bijector: The object responsible for calculating the transformation. Typically an instance of `Bijector`. validate_args: Python boolean. Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. name: The name for the distribution. Default: `bijector.name + distribution.name`. """ parameters = locals() parameters.pop("self") name = name or bijector.name + distribution.name self._distribution = distribution self._bijector = bijector super(TransformedDistribution, self).__init__( dtype=self._distribution.dtype, is_continuous=self._distribution.is_continuous, is_reparameterized=self._distribution.is_reparameterized, validate_args=validate_args, allow_nan_stats=self._distribution.allow_nan_stats, parameters=parameters, # We let TransformedDistribution access _graph_parents since this class # is more like a baseclass than derived. graph_parents=(distribution._graph_parents + # pylint: disable=protected-access bijector.graph_parents), name=name) @property def distribution(self): """Base distribution, p(x).""" return self._distribution @property def bijector(self): """Function transforming x => y.""" return self._bijector def _event_shape(self): return self.bijector.forward_event_shape( self.distribution.event_shape()) def _get_event_shape(self): return self.bijector.get_forward_event_shape( self.distribution.get_event_shape()) def _batch_shape(self): return self.distribution.batch_shape() def _get_batch_shape(self): return self.distribution.get_batch_shape() @distribution_util.AppendDocstring( """Samples from the base distribution and then passes through the bijector's forward transform.""", condition_kwargs_dict=_condition_kwargs_dict) def _sample_n(self, n, seed=None, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x = self.distribution.sample(sample_shape=n, seed=seed, **distribution_kwargs) # Recall that a bijector is named for its forward transform, i.e., # `Y = g(X)`, return self.bijector.forward(x, **bijector_kwargs) @distribution_util.AppendDocstring( """Implements `(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)`, where `g^{-1}` is the inverse of `transform`. Also raises a `ValueError` if `inverse` was not provided to the distribution and `y` was not returned from `sample`.""", condition_kwargs_dict=_condition_kwargs_dict) def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian( y, **bijector_kwargs) return ildj + self.distribution.log_prob(x, **distribution_kwargs) @distribution_util.AppendDocstring( """Implements `p(g^{-1}(y)) det|J(g^{-1}(y))|`, where `g^{-1}` is the inverse of `transform`. Also raises a `ValueError` if `inverse` was not provided to the distribution and `y` was not returned from `sample`.""", condition_kwargs_dict=_condition_kwargs_dict) def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian( y, **bijector_kwargs) return math_ops.exp(ildj) * self.distribution.prob(x, **distribution_kwargs) @distribution_util.AppendDocstring( condition_kwargs_dict=_condition_kwargs_dict) def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x = self.bijector.inverse(y, **bijector_kwargs) return self.distribution.log_cdf(x, **distribution_kwargs) @distribution_util.AppendDocstring( condition_kwargs_dict=_condition_kwargs_dict) def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x = self.bijector.inverse(y, **bijector_kwargs) return self.distribution.cdf(x, **distribution_kwargs) @distribution_util.AppendDocstring( condition_kwargs_dict=_condition_kwargs_dict) def _log_survival_function(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x = self.bijector.inverse(y, **bijector_kwargs) return self.distribution.log_survival_function(x, **distribution_kwargs) @distribution_util.AppendDocstring( condition_kwargs_dict=_condition_kwargs_dict) def _survival_function(self, y, bijector_kwargs=None, distribution_kwargs=None): bijector_kwargs = bijector_kwargs or {} distribution_kwargs = distribution_kwargs or {} x = self.bijector.inverse(y, **bijector_kwargs) return self.distribution.survival_function(x, **distribution_kwargs)
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations from collections.abc import Callable from datetime import datetime from enum import Enum from functools import cached_property, lru_cache from time import sleep from typing import TYPE_CHECKING, NoReturn from sqlalchemy import Index, Integer, String, case, select from sqlalchemy.exc import OperationalError from sqlalchemy.orm import Mapped, backref, foreign, mapped_column, relationship from sqlalchemy.orm.session import make_transient from airflow._shared.observability.metrics.stats import Stats from airflow._shared.timezones import timezone from airflow.configuration import conf from airflow.exceptions import AirflowException from airflow.executors.executor_loader import ExecutorLoader from airflow.listeners.listener import get_listener_manager from airflow.models.base import ID_LEN, Base from airflow.observability.trace import DebugTrace, add_debug_span from airflow.utils.helpers import convert_camel_to_snake from airflow.utils.log.logging_mixin import LoggingMixin from airflow.utils.net import get_hostname from airflow.utils.platform import getuser from airflow.utils.session import NEW_SESSION, create_session, provide_session from airflow.utils.sqlalchemy import UtcDateTime class JobState(str, Enum): """All possible states that a Job can be in.""" RUNNING = "running" SUCCESS = "success" RESTARTING = "restarting" FAILED = "failed" def __str__(self) -> str: return self.value if TYPE_CHECKING: from sqlalchemy.orm.session import Session from airflow.executors.base_executor import BaseExecutor def _resolve_dagrun_model(): from airflow.models.dagrun import DagRun return DagRun @lru_cache def health_check_threshold(job_type: str, heartrate: int) -> int | float: grace_multiplier = 2.1 health_check_threshold_value: int | float if job_type == "SchedulerJob": health_check_threshold_value = conf.getint("scheduler", "scheduler_health_check_threshold") elif job_type == "TriggererJob": health_check_threshold_value = conf.getfloat("triggerer", "triggerer_health_check_threshold") elif job_type == "DagProcessorJob": health_check_threshold_value = conf.getint("dag_processor", "health_check_threshold") else: health_check_threshold_value = heartrate * grace_multiplier return health_check_threshold_value class Job(Base, LoggingMixin): """ The ORM class representing Job stored in the database. Jobs are processing items with state and duration that aren't task instances. """ __tablename__ = "job" id: Mapped[int] = mapped_column(Integer, primary_key=True) dag_id: Mapped[str | None] = mapped_column( String(ID_LEN), ) state: Mapped[str | None] = mapped_column(String(20)) job_type: Mapped[str | None] = mapped_column(String(30)) start_date: Mapped[datetime | None] = mapped_column(UtcDateTime()) end_date: Mapped[datetime | None] = mapped_column(UtcDateTime()) latest_heartbeat: Mapped[datetime | None] = mapped_column(UtcDateTime()) executor_class: Mapped[str | None] = mapped_column(String(500)) hostname: Mapped[str | None] = mapped_column(String(500)) unixname: Mapped[str | None] = mapped_column(String(1000)) __table_args__ = ( Index("job_type_heart", job_type, latest_heartbeat), Index("idx_job_state_heartbeat", state, latest_heartbeat), Index("idx_job_dag_id", dag_id), ) task_instances_enqueued = relationship( "TaskInstance", primaryjoin="Job.id == foreign(TaskInstance.queued_by_job_id)", backref=backref("queued_by_job", uselist=False), ) dag_runs = relationship( "DagRun", primaryjoin=lambda: Job.id == foreign(_resolve_dagrun_model().creating_job_id), backref="creating_job", ) dag_model = relationship( "DagModel", primaryjoin="Job.dag_id == DagModel.dag_id", viewonly=True, foreign_keys=[dag_id], ) """ TaskInstances which have been enqueued by this Job. Only makes sense for SchedulerJob. """ def __init__(self, executor: BaseExecutor | None = None, heartrate=None, **kwargs): # Save init parameters as DB fields self.heartbeat_failed = False self.hostname = get_hostname() if executor: self.executors = [executor] self.start_date = timezone.utcnow() self.latest_heartbeat = timezone.utcnow() self.previous_heartbeat = None if heartrate is not None: self.heartrate = heartrate self.unixname = getuser() self.max_tis_per_query: int = conf.getint("scheduler", "max_tis_per_query") try: get_listener_manager().hook.on_starting(component=self) except Exception: self.log.exception("error calling listener") super().__init__(**kwargs) @property def executor(self): return self.executors[0] @cached_property def executors(self): return ExecutorLoader.init_executors() @cached_property def heartrate(self) -> float: return Job._heartrate(str(self.job_type)) def is_alive(self) -> bool: """ Is this job currently alive. We define alive as in a state of RUNNING, and having sent a heartbeat within a multiple of the heartrate (default of 2.1) """ threshold_value = health_check_threshold(self.job_type, self.heartrate) return Job._is_alive( state=self.state, health_check_threshold_value=threshold_value, latest_heartbeat=self.latest_heartbeat, ) @provide_session def kill(self, session: Session = NEW_SESSION) -> NoReturn: """Handle on_kill callback and updates state in database.""" try: self.on_kill() except Exception as e: self.log.error("on_kill() method failed: %s", e) job = session.scalar(select(Job).where(Job.id == self.id).limit(1)) if job is not None: job.end_date = timezone.utcnow() session.merge(job) session.commit() raise AirflowException("Job shut down externally.") def on_kill(self): """Will be called when an external kill command is received.""" @provide_session def heartbeat( self, heartbeat_callback: Callable[[Session], None], session: Session = NEW_SESSION ) -> None: """ Update the job's entry in the database with the latest_heartbeat timestamp. This allows for the job to be killed externally and allows the system to monitor what is actually active. For instance, an old heartbeat for SchedulerJob would mean something is wrong. This also allows for any job to be killed externally, regardless of who is running it or on which machine it is running. Note that if your heart rate is set to 60 seconds and you call this method after 10 seconds of processing since the last heartbeat, it will sleep 50 seconds to complete the 60 seconds and keep a steady heart rate. If you go over 60 seconds before calling it, it won't sleep at all. :param heartbeat_callback: Callback that will be run when the heartbeat is recorded in the Job :param session to use for saving the job """ previous_heartbeat = self.latest_heartbeat with DebugTrace.start_span(span_name="heartbeat", component="Job") as span: try: span.set_attribute("heartbeat", str(self.latest_heartbeat)) # This will cause it to load from the db session.merge(self) previous_heartbeat = self.latest_heartbeat if self.state == JobState.RESTARTING: self.kill() # Figure out how long to sleep for sleep_for: float = 0 if self.latest_heartbeat: seconds_remaining = ( self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds() ) sleep_for = max(0, seconds_remaining) if span.is_recording(): span.add_event(name="sleep", attributes={"sleep_for": sleep_for}) sleep(sleep_for) # Update last heartbeat time with create_session() as session: # Make the session aware of this object session.merge(self) self.latest_heartbeat = timezone.utcnow() session.commit() time_since_last_heartbeat: float = ( 0 if previous_heartbeat is None else (timezone.utcnow() - previous_heartbeat).total_seconds() ) health_check_threshold_value = health_check_threshold(self.job_type, self.heartrate) if time_since_last_heartbeat > health_check_threshold_value: self.log.info("Heartbeat recovered after %.2f seconds", time_since_last_heartbeat) # At this point, the DB has updated. previous_heartbeat = self.latest_heartbeat heartbeat_callback(session) self.log.debug("[heartbeat]") self.heartbeat_failed = False except OperationalError: Stats.incr(convert_camel_to_snake(self.__class__.__name__) + "_heartbeat_failure", 1, 1) if not self.heartbeat_failed: self.log.exception("%s heartbeat failed with error", self.__class__.__name__) self.heartbeat_failed = True msg = f"{self.__class__.__name__} heartbeat got an exception" if span.is_recording(): span.add_event(name="error", attributes={"message": msg}) if self.is_alive(): self.log.error( "%s heartbeat failed with error. Scheduler may go into unhealthy state", self.__class__.__name__, ) msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler may go into unhealthy state" if span.is_recording(): span.add_event(name="error", attributes={"message": msg}) else: msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler is in unhealthy state" self.log.error(msg) if span.is_recording(): span.add_event(name="error", attributes={"message": msg}) # We didn't manage to heartbeat, so make sure that the timestamp isn't updated self.latest_heartbeat = previous_heartbeat @provide_session def prepare_for_execution(self, session: Session = NEW_SESSION): """Prepare the job for execution.""" Stats.incr(self.__class__.__name__.lower() + "_start", 1, 1) self.state = JobState.RUNNING self.start_date = timezone.utcnow() session.add(self) session.commit() make_transient(self) @provide_session def complete_execution(self, session: Session = NEW_SESSION): try: get_listener_manager().hook.before_stopping(component=self) except Exception: self.log.exception("error calling listener") self.end_date = timezone.utcnow() session.merge(self) session.commit() Stats.incr(self.__class__.__name__.lower() + "_end", 1, 1) @provide_session def most_recent_job(self, session: Session = NEW_SESSION) -> Job | None: """Return the most recent job of this type, if any, based on last heartbeat received.""" return most_recent_job(str(self.job_type), session=session) @staticmethod def _heartrate(job_type: str) -> float: if job_type == "TriggererJob": return conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC") if job_type == "SchedulerJob": return conf.getfloat("scheduler", "SCHEDULER_HEARTBEAT_SEC") # Heartrate used to be hardcoded to scheduler, so in all other # cases continue to use that value for back compat return conf.getfloat("scheduler", "JOB_HEARTBEAT_SEC") @staticmethod def _is_alive( state: JobState | str | None, health_check_threshold_value: float | int, latest_heartbeat: datetime | None, ) -> bool: if latest_heartbeat is None: return False return ( state == JobState.RUNNING and (timezone.utcnow() - latest_heartbeat).total_seconds() < health_check_threshold_value ) @provide_session def most_recent_job(job_type: str, session: Session = NEW_SESSION) -> Job | None: """ Return the most recent job of this type, if any, based on last heartbeat received. Jobs in "running" state take precedence over others to make sure alive job is returned if it is available. :param job_type: job type to query for to get the most recent job for :param session: Database session """ return session.scalar( select(Job) .where(Job.job_type == job_type) .order_by( # Put "running" jobs at the front. case({JobState.RUNNING: 0}, value=Job.state, else_=1), Job.latest_heartbeat.desc(), ) .limit(1) ) @provide_session def run_job( job: Job, execute_callable: Callable[[], int | None], session: Session = NEW_SESSION ) -> int | None: """ Run the job. The Job is always an ORM object and setting the state is happening within the same DB session and the session is kept open throughout the whole execution. :meta private: """ job.prepare_for_execution(session=session) try: return execute_job(job, execute_callable=execute_callable) finally: job.complete_execution(session=session) def execute_job(job: Job, execute_callable: Callable[[], int | None]) -> int | None: """ Execute the job. Job execution requires no session as generally executing session does not require an active database connection. The session might be temporary acquired and used if the job runs heartbeat during execution, but this connection is only acquired for the time of heartbeat and in case of AIP-44 implementation it happens over the Internal API rather than directly via the database. After the job is completed, state of the Job is updated and it should be updated in the database, which happens in the "complete_execution" step (which again can be executed locally in case of database operations or over the Internal API call. :param job: Job to execute - DB job. It does not really matter, because except of running the heartbeat and state setting, the runner should not modify the job state. :param execute_callable: callable to execute when running the job. :meta private: """ ret = None try: ret = execute_callable() # In case of max runs or max duration job.state = JobState.SUCCESS except SystemExit: # In case of ^C or SIGTERM job.state = JobState.SUCCESS except Exception: job.state = JobState.FAILED raise return ret @add_debug_span def perform_heartbeat( job: Job, heartbeat_callback: Callable[[Session], None], only_if_necessary: bool ) -> None: """ Perform heartbeat for the Job passed to it,optionally checking if it is necessary. :param job: job to perform heartbeat for :param heartbeat_callback: callback to run by the heartbeat :param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for triggerer for example) """ seconds_remaining: float = 0.0 if job.latest_heartbeat and job.heartrate: seconds_remaining = job.heartrate - (timezone.utcnow() - job.latest_heartbeat).total_seconds() if seconds_remaining > 0 and only_if_necessary: return job.heartbeat(heartbeat_callback=heartbeat_callback)
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/jobs/job.py
# Copyright 2011-2013 Colin Scott # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import sys import os.path sys.path.append(os.path.dirname(__file__) + "/../../..") from sts.topology import MeshTopology from pox.openflow.software_switch import SoftwareSwitch from pox.openflow.libopenflow_01 import * from config.invariant_checks import check_for_two_loop class MockSimulation(object): def __init__(self, topology): self.topology = topology class InvariantCheckTest(unittest.TestCase): def basic_test(self): topo = MeshTopology(num_switches=2) message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"), action=ofp_action_output(port=1)) topo.switches[1].table.process_flow_mod(message) simulation = MockSimulation(topo) violations = check_for_two_loop(simulation) self.assertNotEqual(violations, []) def test_no_loop(self): topo = MeshTopology(num_switches=2) message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"), action=ofp_action_output(port=2)) topo.switches[1].table.process_flow_mod(message) simulation = MockSimulation(topo) violations = check_for_two_loop(simulation) self.assertEqual(violations, [])
unknown
codeparrot/codeparrot-clean
// Configuration shared with both libm and libm-test use std::env; use std::path::PathBuf; #[allow(dead_code)] pub struct Config { pub manifest_dir: PathBuf, pub out_dir: PathBuf, pub opt_level: String, pub cargo_features: Vec<String>, pub target_arch: String, pub target_env: String, pub target_family: Option<String>, pub target_os: String, pub target_string: String, pub target_vendor: String, pub target_features: Vec<String>, } impl Config { pub fn from_env() -> Self { let target_features = env::var("CARGO_CFG_TARGET_FEATURE") .map(|feats| feats.split(',').map(ToOwned::to_owned).collect()) .unwrap_or_default(); let cargo_features = env::vars() .filter_map(|(name, _value)| name.strip_prefix("CARGO_FEATURE_").map(ToOwned::to_owned)) .map(|s| s.to_lowercase().replace("_", "-")) .collect(); Self { manifest_dir: PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()), out_dir: PathBuf::from(env::var("OUT_DIR").unwrap()), opt_level: env::var("OPT_LEVEL").unwrap(), cargo_features, target_arch: env::var("CARGO_CFG_TARGET_ARCH").unwrap(), target_env: env::var("CARGO_CFG_TARGET_ENV").unwrap(), target_family: env::var("CARGO_CFG_TARGET_FAMILY").ok(), target_os: env::var("CARGO_CFG_TARGET_OS").unwrap(), target_string: env::var("TARGET").unwrap(), target_vendor: env::var("CARGO_CFG_TARGET_VENDOR").unwrap(), target_features, } } } /// Libm gets most config options made available. #[allow(dead_code)] pub fn emit_libm_config(cfg: &Config) { emit_intrinsics_cfg(); emit_arch_cfg(); emit_optimization_cfg(cfg); emit_cfg_shorthands(cfg); emit_cfg_env(cfg); emit_f16_f128_cfg(cfg); } /// Tests don't need most feature-related config. #[allow(dead_code)] pub fn emit_test_config(cfg: &Config) { emit_optimization_cfg(cfg); emit_cfg_shorthands(cfg); emit_cfg_env(cfg); emit_f16_f128_cfg(cfg); } /// Simplify the feature logic for enabling intrinsics so code only needs to use /// `cfg(intrinsics_enabled)`. fn emit_intrinsics_cfg() { println!("cargo:rustc-check-cfg=cfg(intrinsics_enabled)"); // Disabled by default; `unstable-intrinsics` enables again; `force-soft-floats` overrides // to disable. if cfg!(feature = "unstable-intrinsics") && !cfg!(feature = "force-soft-floats") { println!("cargo:rustc-cfg=intrinsics_enabled"); } } /// Simplify the feature logic for enabling arch-specific features so code only needs to use /// `cfg(arch_enabled)`. fn emit_arch_cfg() { println!("cargo:rustc-check-cfg=cfg(arch_enabled)"); // Enabled by default via the "arch" feature, `force-soft-floats` overrides to disable. if cfg!(feature = "arch") && !cfg!(feature = "force-soft-floats") { println!("cargo:rustc-cfg=arch_enabled"); } } /// Some tests are extremely slow. Emit a config option based on optimization level. fn emit_optimization_cfg(cfg: &Config) { println!("cargo:rustc-check-cfg=cfg(optimizations_enabled)"); if !matches!(cfg.opt_level.as_str(), "0" | "1") { println!("cargo:rustc-cfg=optimizations_enabled"); } } /// Provide an alias for common longer config combinations. fn emit_cfg_shorthands(cfg: &Config) { println!("cargo:rustc-check-cfg=cfg(x86_no_sse)"); if cfg.target_arch == "x86" && !cfg.target_features.iter().any(|f| f == "sse") { // Shorthand to detect i586 targets println!("cargo:rustc-cfg=x86_no_sse"); } } /// Reemit config that we make use of for test logging. fn emit_cfg_env(cfg: &Config) { println!( "cargo:rustc-env=CFG_CARGO_FEATURES={:?}", cfg.cargo_features ); println!("cargo:rustc-env=CFG_OPT_LEVEL={}", cfg.opt_level); println!( "cargo:rustc-env=CFG_TARGET_FEATURES={:?}", cfg.target_features ); } /// Configure whether or not `f16` and `f128` support should be enabled. fn emit_f16_f128_cfg(cfg: &Config) { println!("cargo:rustc-check-cfg=cfg(f16_enabled)"); println!("cargo:rustc-check-cfg=cfg(f128_enabled)"); // `unstable-float` enables these features. if !cfg!(feature = "unstable-float") { return; } // Set whether or not `f16` and `f128` are supported at a basic level by LLVM. This only means // that the backend will not crash when using these types and generates code that can be called // without crashing (no infinite recursion). This does not mean that the platform doesn't have // ABI or other bugs. // // We do this here rather than in `rust-lang/rust` because configuring via cargo features is // not straightforward. // // Original source of this list: // <https://github.com/rust-lang/compiler-builtins/pull/652#issuecomment-2266151350> let f16_enabled = match cfg.target_arch.as_str() { // Unsupported <https://github.com/llvm/llvm-project/issues/94434> "arm64ec" => false, // Selection failure <https://github.com/llvm/llvm-project/issues/50374> "s390x" => false, // Infinite recursion <https://github.com/llvm/llvm-project/issues/97981> // FIXME(llvm): loongarch fixed by <https://github.com/llvm/llvm-project/pull/107791> "csky" => false, "hexagon" => false, "loongarch64" => false, "mips" | "mips64" | "mips32r6" | "mips64r6" => false, "powerpc" | "powerpc64" => false, "sparc" | "sparc64" => false, "wasm32" | "wasm64" => false, // Most everything else works as of LLVM 19 _ => true, }; let f128_enabled = match cfg.target_arch.as_str() { // Unsupported (libcall is not supported) <https://github.com/llvm/llvm-project/issues/121122> "amdgpu" => false, // Unsupported <https://github.com/llvm/llvm-project/issues/94434> "arm64ec" => false, // Selection failure <https://github.com/llvm/llvm-project/issues/96432> "mips64" | "mips64r6" => false, // Selection failure <https://github.com/llvm/llvm-project/issues/95471> "nvptx64" => false, // Selection failure <https://github.com/llvm/llvm-project/issues/101545> "powerpc64" if &cfg.target_os == "aix" => false, // Selection failure <https://github.com/llvm/llvm-project/issues/41838> "sparc" => false, // Most everything else works as of LLVM 19 _ => true, }; // If the feature is set, disable these types. let disable_both = env::var_os("CARGO_FEATURE_NO_F16_F128").is_some(); println!("cargo:rustc-check-cfg=cfg(f16_enabled)"); println!("cargo:rustc-check-cfg=cfg(f128_enabled)"); if f16_enabled && !disable_both { println!("cargo:rustc-cfg=f16_enabled"); } if f128_enabled && !disable_both { println!("cargo:rustc-cfg=f128_enabled"); } }
rust
github
https://github.com/nodejs/node
deps/crates/vendor/libm/configure.rs
from datetime import datetime from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.test import TestCase from event.models import Event from job.models import Job from organization.models import Organization from shift.models import Shift, VolunteerShift from shift.services import * from volunteer.models import Volunteer from volunteer.services import * class ShiftMethodTests(TestCase): def test_add_shift_hours(self): u1 = User.objects.create_user('Yoshi') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v1.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #register will return an exception on error (such as when invalid parameters are passed) #if an exception does get raised, this test will automatically fail register(v1.id, s1.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id)) register(v1.id, s2.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id)) register(v1.id, s3.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id)) start_time = datetime.time(hour=9, minute=0) end_time = datetime.time(hour=10, minute=0) add_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id); self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=10, minute=0) end_time = datetime.time(hour=12, minute=0) add_shift_hours(v1.id, s2.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id); self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=5, minute=0) end_time = datetime.time(hour=6, minute=0) add_shift_hours(v1.id, s3.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id); self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) def test_calculate_total_report_hours(self): duration_list = [1, 1, 1, 1] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [1.5, 1.34, 2.3, 9, 4.7] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [0.03, 0.023, 0.53, 0.863, 0.23, 0.57] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [12, 24, 23.5, 15.67, 22.453, 3.42] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [5] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [0, 0, 0, 0] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) duration_list = [0] report_list = [] total_hours = 0 for duration in duration_list: total_hours += duration report = {} report["duration"] = duration report_list.append(report) self.assertEqual(calculate_total_report_hours(report_list), total_hours) def test_cancel_shift_registration(self): u1 = User.objects.create_user('Yoshi') u2 = User.objects.create_user('John') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v2 = Volunteer(first_name = "John", last_name = "Doe", address = "7 Alpine Street", city = "Maplegrove", state = "Wyoming", country = "USA", phone_number = "23454545", email = "john@test.com", user = u2) v1.save() v2.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #test cases when try to cancel when they aren't signed up for a shift with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v1.id, s1.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v1.id, s1.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v1.id, s2.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v1.id, s3.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v2.id, s1.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v2.id, s2.id) with self.assertRaises(ObjectDoesNotExist): cancel_shift_registration(v2.id, s3.id) #register volunteers to shifts register(v1.id, s1.id) register(v1.id, s2.id) register(v1.id, s3.id) register(v2.id, s1.id) register(v2.id, s2.id) register(v2.id, s3.id) #test typical cases cancel_shift_registration(v1.id, s1.id) cancel_shift_registration(v1.id, s2.id) cancel_shift_registration(v1.id, s3.id) #cancel_shift_registration(v2.id, s1.id) #why is this throwing ObjectDoesNotExist? cancel_shift_registration(v2.id, s2.id) cancel_shift_registration(v2.id, s3.id) def test_calculate_duration(self): start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=2, minute=0) delta_time_hours = 1 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=45) end_time = datetime.time(hour=2, minute=0) delta_time_hours = 0.25 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=2, minute=30) delta_time_hours = 1.5 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=1, minute=45) delta_time_hours = 0.75 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=13, minute=0) delta_time_hours = 12 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=5, minute=45) delta_time_hours = 4.75 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=1, minute=0) delta_time_hours = 0 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=23, minute=0) delta_time_hours = 22 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=22, minute=0) end_time = datetime.time(hour=1, minute=0) delta_time_hours = 3 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=13, minute=0) end_time = datetime.time(hour=1, minute=0) delta_time_hours = 12 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=0, minute=0) end_time = datetime.time(hour=23, minute=0) delta_time_hours = 23 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) start_time = datetime.time(hour=23, minute=0) end_time = datetime.time(hour=0, minute=0) delta_time_hours = 1 self.assertEqual(calculate_duration(start_time, end_time), delta_time_hours) def test_clear_shift_hours(self): u1 = User.objects.create_user('Yoshi') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v1.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() register(v1.id, s1.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id)) register(v1.id, s2.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id)) register(v1.id, s3.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id)) start_time = datetime.time(hour=9, minute=0) end_time = datetime.time(hour=10, minute=0) add_shift_hours(v1.id, s1.id, start_time, end_time) start_time = datetime.time(hour=10, minute=0) end_time = datetime.time(hour=12, minute=0) add_shift_hours(v1.id, s2.id, start_time, end_time) start_time = datetime.time(hour=5, minute=0) end_time = datetime.time(hour=6, minute=0) add_shift_hours(v1.id, s3.id, start_time, end_time) clear_shift_hours(v1.id, s1.id) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNone(volunteer_shift.start_time) self.assertIsNone(volunteer_shift.end_time) clear_shift_hours(v1.id, s2.id) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id) self.assertIsNone(volunteer_shift.start_time) self.assertIsNone(volunteer_shift.end_time) clear_shift_hours(v1.id, s3.id) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id) self.assertIsNone(volunteer_shift.start_time) self.assertIsNone(volunteer_shift.end_time) def test_edit_shift_hours(self): u1 = User.objects.create_user('Yoshi') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v1.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j1.save() s1 = Shift(date = "2012-10-23", start_time = "1:00", end_time = "12:00", max_volunteers = 1, job = j1) s1.save() register(v1.id, s1.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id)) start_time = datetime.time(hour=9, minute=0) end_time = datetime.time(hour=10, minute=0) add_shift_hours(v1.id, s1.id, start_time, end_time) start_time = datetime.time(hour=10, minute=0) end_time = datetime.time(hour=11, minute=0) edit_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNotNone(volunteer_shift.start_time) self.assertIsNotNone(volunteer_shift.end_time) self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=1, minute=0) end_time = datetime.time(hour=4, minute=0) edit_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNotNone(volunteer_shift.start_time) self.assertIsNotNone(volunteer_shift.end_time) self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=4, minute=15) end_time = datetime.time(hour=12, minute=35) edit_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNotNone(volunteer_shift.start_time) self.assertIsNotNone(volunteer_shift.end_time) self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=2, minute=5) end_time = datetime.time(hour=4, minute=15) edit_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNotNone(volunteer_shift.start_time) self.assertIsNotNone(volunteer_shift.end_time) self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) start_time = datetime.time(hour=5, minute=0) end_time = datetime.time(hour=5, minute=30) edit_shift_hours(v1.id, s1.id, start_time, end_time) volunteer_shift = VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id) self.assertIsNotNone(volunteer_shift.start_time) self.assertIsNotNone(volunteer_shift.end_time) self.assertEqual(volunteer_shift.start_time, start_time) self.assertEqual(volunteer_shift.end_time, end_time) def test_get_shift_by_id(self): e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j1.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j1) s1.save() s2.save() s3.save() #test typical cases self.assertIsNotNone(get_shift_by_id(s1.id)) self.assertIsNotNone(get_shift_by_id(s2.id)) self.assertIsNotNone(get_shift_by_id(s3.id)) self.assertEqual(get_shift_by_id(s1.id), s1) self.assertEqual(get_shift_by_id(s2.id), s2) self.assertEqual(get_shift_by_id(s3.id), s3) #test non-existant cases self.assertIsNone(get_shift_by_id(100)) self.assertIsNone(get_shift_by_id(200)) self.assertIsNone(get_shift_by_id(300)) self.assertIsNone(get_shift_by_id(400)) self.assertNotEqual(get_shift_by_id(100), s1) self.assertNotEqual(get_shift_by_id(100), s2) self.assertNotEqual(get_shift_by_id(100), s3) self.assertNotEqual(get_shift_by_id(200), s1) self.assertNotEqual(get_shift_by_id(200), s2) self.assertNotEqual(get_shift_by_id(200), s3) self.assertNotEqual(get_shift_by_id(300), s1) self.assertNotEqual(get_shift_by_id(300), s2) self.assertNotEqual(get_shift_by_id(300), s3) def get_shifts_ordered_by_date(self): e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j1.save() s1 = Shift(date = "2012-1-10", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-6-25", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-12-9", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j1) s1.save() s2.save() s3.save() #test typical case shift_list = get_shifts_ordered_by_date(j1.id) self.assertIsNotNone(shift_list) self.assertNotEqual(shift_list, False) self.assertEqual(len(shift_list), 3) self.assertIn(s1, shift_list) self.assertIn(s2, shift_list) self.assertIn(s3, shift_list) #test order self.assertEqual(shift_list[0].date, s1.date) self.assertEqual(shift_list[1].date, s2.date) self.assertEqual(shift_list[2].date, s3.date) def test_get_unlogged_shifts_by_volunteer_id(self): u1 = User.objects.create_user('Yoshi') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v1.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #sign up register(v1.id, s1.id) register(v1.id, s2.id) register(v1.id, s3.id) #test typical case shift_list = get_unlogged_shifts_by_volunteer_id(v1.id) self.assertIsNotNone(shift_list) self.assertNotEqual(shift_list, False) self.assertEqual(len(shift_list), 3) self.assertIn(s1, shift_list) self.assertIn(s2, shift_list) self.assertIn(s3, shift_list) def test_get_volunteer_shift_by_id(self): u1 = User.objects.create_user('Yoshi') u2 = User.objects.create_user('John') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v2 = Volunteer(first_name = "John", last_name = "Doe", address = "7 Alpine Street", city = "Maplegrove", state = "Wyoming", country = "USA", phone_number = "23454545", email = "john@test.com", user = u2) v1.save() v2.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #test cases where signed up register(v1.id, s1.id) register(v1.id, s2.id) register(v1.id, s3.id) register(v2.id, s1.id) register(v2.id, s2.id) register(v2.id, s3.id) self.assertEqual(get_volunteer_shift_by_id(v1.id, s1.id), VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id)) self.assertEqual(get_volunteer_shift_by_id(v1.id, s2.id), VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id)) self.assertEqual(get_volunteer_shift_by_id(v1.id, s3.id), VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id)) #self.assertEqual(get_volunteer_shift_by_id(v2.id, s1.id), VolunteerShift.objects.get(volunteer_id=v2.id, shift_id=s1.id)) #why does this throw DoesNotExist? self.assertEqual(get_volunteer_shift_by_id(v2.id, s2.id), VolunteerShift.objects.get(volunteer_id=v2.id, shift_id=s2.id)) self.assertEqual(get_volunteer_shift_by_id(v2.id, s3.id), VolunteerShift.objects.get(volunteer_id=v2.id, shift_id=s3.id)) def test_is_signed_up(self): u1 = User.objects.create_user('Yoshi') u2 = User.objects.create_user('John') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v2 = Volunteer(first_name = "John", last_name = "Doe", address = "7 Alpine Street", city = "Maplegrove", state = "Wyoming", country = "USA", phone_number = "23454545", email = "john@test.com", user = u2) v1.save() v2.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #test cases where not signed up yet self.assertFalse(is_signed_up(v1.id, s1.id)) self.assertFalse(is_signed_up(v1.id, s2.id)) self.assertFalse(is_signed_up(v1.id, s3.id)) #test cases where signed up register(v1.id, s1.id) register(v1.id, s2.id) register(v1.id, s3.id) self.assertTrue(is_signed_up(v1.id, s1.id)) self.assertTrue(is_signed_up(v1.id, s2.id)) self.assertTrue(is_signed_up(v1.id, s3.id)) #test case where more than one volunteer signs up for the same shift self.assertFalse(is_signed_up(v2.id, s1.id)) self.assertFalse(is_signed_up(v2.id, s2.id)) self.assertFalse(is_signed_up(v2.id, s3.id)) register(v2.id, s2.id) register(v2.id, s3.id) self.assertFalse(is_signed_up(v2.id, s1.id)) self.assertTrue(is_signed_up(v2.id, s2.id)) self.assertTrue(is_signed_up(v2.id, s3.id)) def test_register(self): RESULT_IS_VALID = "IS_VALID" ERROR_CODE_ALREADY_SIGNED_UP = "ERROR_CODE_ALREADY_SIGNED_UP" ERROR_CODE_NO_SLOTS_REMAINING = "ERROR_CODE_NO_SLOTS_REMAINING" u1 = User.objects.create_user('Yoshi') u2 = User.objects.create_user('John') v1 = Volunteer(first_name = "Yoshi", last_name = "Turtle", address = "Mario Land", city = "Nintendo Land", state = "Nintendo State", country = "Nintendo Nation", phone_number = "2374983247", email = "yoshi@nintendo.com", user = u1) v2 = Volunteer(first_name = "John", last_name = "Doe", address = "7 Alpine Street", city = "Maplegrove", state = "Wyoming", country = "USA", phone_number = "23454545", email = "john@test.com", user = u2) v1.save() v2.save() e1 = Event(name = "Open Source Event", start_date = "2012-10-22", end_date = "2012-10-23") e1.save() j1 = Job(name = "Software Developer", start_date = "2012-10-22", end_date = "2012-10-23", description = "A software job", event = e1) j2 = Job(name = "Systems Administrator", start_date = "2012-9-1", end_date = "2012-10-26", description = "A systems administrator job", event = e1) j1.save() j2.save() s1 = Shift(date = "2012-10-23", start_time = "9:00", end_time = "3:00", max_volunteers = 1, job = j1) s2 = Shift(date = "2012-10-23", start_time = "10:00", end_time = "4:00", max_volunteers = 2, job = j1) s3 = Shift(date = "2012-10-23", start_time = "12:00", end_time = "6:00", max_volunteers = 4, job = j2) s1.save() s2.save() s3.save() #test typical cases register(v1.id, s1.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s1.id)) register(v1.id, s2.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s2.id)) register(v1.id, s3.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v1.id, shift_id=s3.id)) #test cases where volunteer tries to sign up for a shift they are already signed up for self.assertEqual(register(v1.id, s1.id), ERROR_CODE_ALREADY_SIGNED_UP) self.assertEqual(register(v1.id, s2.id), ERROR_CODE_ALREADY_SIGNED_UP) self.assertEqual(register(v1.id, s3.id), ERROR_CODE_ALREADY_SIGNED_UP) #test case where more than one volunteer signs up for the same shift #v2 can't sign up for s1 because there are no slots remaining self.assertEqual(register(v2.id, s1.id), ERROR_CODE_NO_SLOTS_REMAINING) register(v2.id, s2.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v2.id, shift_id=s2.id)) register(v2.id, s3.id) self.assertIsNotNone(VolunteerShift.objects.get(volunteer_id=v2.id, shift_id=s3.id)) #test cases where a volunteer tries to sign up for a shift they are already signed up for self.assertEqual(register(v2.id, s2.id), ERROR_CODE_ALREADY_SIGNED_UP) self.assertEqual(register(v2.id, s3.id), ERROR_CODE_ALREADY_SIGNED_UP)
unknown
codeparrot/codeparrot-clean
# coding=utf-8 from lib.Register import get_reg_class __author__ = 'Anatoli Kalysch' class Trace(list): def __init__(self, reg_size=64, tr=None): super(Trace, self).__init__() self.peephole = False self.constant_propagation = False self.standardization = False self.operand_folding = False self.stack_addr_propagation = False self.ctx_reg_size = reg_size if tr is not None: assert isinstance(tr, list) for line in tr: assert isinstance(line, Traceline) self.append(line) class Traceline(object): def __init__(self, **kwargs): self._line = [kwargs.get('thread_id'), kwargs.get('addr'), kwargs.get('disasm'), kwargs.get('ctx', ''), kwargs.get('comment', '')] self.grade = 0 def __eq__(self, other): if isinstance(other, Traceline): # grade is IGNORED, while things like comments and ctx are taken into account! return self._line == other._line else: return False def __ne__(self, other): return not self.__eq__(other) def raise_grade(self, value=1): self.grade += value def lower_grade(self, value=1): self.grade -= value self.grade = max(0, self.grade) # do not lower below zero to make zero the common denominator for trace line grades @property def thread_id(self): return self._line[0] @thread_id.setter def thread_id(self, value): self._line[0] = value @property def addr(self): return self._line[1] @addr.setter def addr(self, value): self._line[1] = value @property def disasm(self): return self._line[2] @disasm.setter def disasm(self, value): self._line[2] = value @property def disasm_len(self): return len(self.disasm) @property def ctx(self): return self._line[3] @ctx.setter def ctx(self, value): self._line[3] = value @property def comment(self): return self._line[4] @comment.setter def comment(self, value): self._line[4] = value def disasm_str(self): try: return '%s\t%s, %s' % (self.disasm[0], self.disasm[1], self.disasm[2]) except: if self.disasm_len == 2: return '%s\t%s' % (self.disasm[0], self.disasm[1]) else: return self.disasm[0] def to_str_line(self): return "%x %x %s\t\t%s\t\t%s" % (self.thread_id, self.addr, self.disasm_str(), ''.join(c for c in self.comment if self.comment is not None), ''.join('%s:%s ' % (c, self.ctx[c]) for c in self.ctx.keys() if isinstance(self.ctx, dict))) @property def is_mov(self): return self._line[2][0].__contains__('mov') @property def is_pop(self): return self._line[2][0].startswith('pop') @property def is_push(self): return self._line[2][0].startswith('push') @property def is_jmp(self): # returns true for conditional AND non-cond jumps return self._line[2][0].startswith('j') @property def is_op1_reg(self): try: return get_reg_class(self._line[2][1]) is not None except: return False @property def is_op2_reg(self): try: return get_reg_class(self._line[2][2]) is not None except: return False @property def is_comparison(self): return self.disasm[0].__contains__('cmp') or self.disasm[0].__contains__('test') @property def is_op1_mem(self): if self.disasm_len > 1: if self.disasm[1].startswith('[') and self.disasm[1].endswith(']'): return True elif self.disasm[1].__contains__('ptr'): return True else: return False else: return False @property def is_op2_mem(self): if self.disasm_len > 2: if self.disasm[2].startswith('[') and self.disasm[2].endswith(']'): return True elif self.disasm[2].__contains__('ptr'): return True else: return False else: return False
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.tests.hosts import io.ktor.events.* import io.ktor.http.* import io.ktor.server.application.* import io.ktor.server.engine.* import io.ktor.server.request.* import io.ktor.utils.io.* import kotlinx.coroutines.* import java.io.* import java.lang.reflect.* import kotlin.concurrent.* import kotlin.coroutines.* import kotlin.test.* class ReceiveBlockingPrimitiveTest { @Test fun testBlockingPrimitiveUsuallyAllowed() { testOnThread { call -> call.receive<InputStream>().close() } } private fun testOnThread( block: suspend (ApplicationCall) -> Unit ) { val result = CompletableDeferred<Unit>() val call = TestCall() thread { try { runBlocking { block(call) } result.complete(Unit) } catch (cause: Throwable) { result.completeExceptionally(cause) } } try { runBlocking { result.await() } } finally { call.close() } } private class TestCall : BaseApplicationCall( Application( applicationEnvironment {}, false, "/", Events(), EmptyCoroutineContext ) { object : ApplicationEngine { override suspend fun resolvedConnectors(): List<EngineConnectorConfig> = TODO("Not yet implemented") override val environment: ApplicationEnvironment get() = TODO("Not yet implemented") override fun start(wait: Boolean): ApplicationEngine = TODO("Not yet implemented") override fun stop(gracePeriodMillis: Long, timeoutMillis: Long) = TODO("Not yet implemented") } } ) { init { application.receivePipeline.installDefaultTransformations() } override val request: BaseApplicationRequest = object : BaseApplicationRequest(this) { override val queryParameters: Parameters get() = TODO("Not yet implemented") override val rawQueryParameters: Parameters get() = TODO("Not yet implemented") override val engineHeaders: Headers get() = TODO("Not yet implemented") override val local: RequestConnectionPoint get() = object : RequestConnectionPoint { override val scheme: String get() = TODO("Not yet implemented") override val version: String get() = TODO("Not yet implemented") @Deprecated( "Use localPort or serverPort instead", level = DeprecationLevel.ERROR ) override val port: Int get() = TODO("Not yet implemented") override val localPort: Int get() = TODO("Not yet implemented") override val serverPort: Int get() = TODO("Not yet implemented") @Deprecated( "Use localHost or serverHost instead", level = DeprecationLevel.ERROR ) override val host: String get() = TODO("Not yet implemented") override val localHost: String get() = TODO("Not yet implemented") override val serverHost: String get() = TODO("Not yet implemented") override val localAddress: String get() = TODO("Not yet implemented") override val uri: String get() = "http://test-uri.ktor.io/" override val method: HttpMethod get() = TODO("Not yet implemented") override val remoteHost: String get() = TODO("Not yet implemented") override val remotePort: Int get() = TODO("Not yet implemented") override val remoteAddress: String get() = TODO("Not yet implemented") } override val cookies: RequestCookies get() = TODO("Not yet implemented") override val engineReceiveChannel: ByteReadChannel = ByteReadChannel.Empty } override val response: BaseApplicationResponse get() = error("Shouldn't be invoked") override val coroutineContext: CoroutineContext get() = TODO("Not yet implemented") fun close() { application.dispose() } } private val prohibitParkingFunction: Method? by lazy { Class.forName("io.ktor.utils.io.jvm.javaio.PollersKt") .getMethod("prohibitParking") } private fun markParkingProhibited() { prohibitParkingFunction?.invoke(null) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-server/ktor-server-core/jvm/test/io/ktor/tests/hosts/ReceiveBlockingPrimitiveTest.kt
"""Support for MQTT Template lights.""" import logging import voluptuous as vol from homeassistant.components import mqtt from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, LightEntity, ) from homeassistant.components.mqtt import ( CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription, ) from homeassistant.const import ( CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, STATE_OFF, STATE_ON, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.restore_state import RestoreEntity import homeassistant.util.color as color_util from ..debug_info import log_messages from .schema import MQTT_LIGHT_SCHEMA_SCHEMA _LOGGER = logging.getLogger(__name__) DOMAIN = "mqtt_template" DEFAULT_NAME = "MQTT Template Light" DEFAULT_OPTIMISTIC = False CONF_BLUE_TEMPLATE = "blue_template" CONF_BRIGHTNESS_TEMPLATE = "brightness_template" CONF_COLOR_TEMP_TEMPLATE = "color_temp_template" CONF_COMMAND_OFF_TEMPLATE = "command_off_template" CONF_COMMAND_ON_TEMPLATE = "command_on_template" CONF_EFFECT_LIST = "effect_list" CONF_EFFECT_TEMPLATE = "effect_template" CONF_GREEN_TEMPLATE = "green_template" CONF_MAX_MIREDS = "max_mireds" CONF_MIN_MIREDS = "min_mireds" CONF_RED_TEMPLATE = "red_template" CONF_STATE_TEMPLATE = "state_template" CONF_WHITE_VALUE_TEMPLATE = "white_value_template" PLATFORM_SCHEMA_TEMPLATE = ( mqtt.MQTT_RW_PLATFORM_SCHEMA.extend( { vol.Optional(CONF_BLUE_TEMPLATE): cv.template, vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template, vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template, vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template, vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template, vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA, vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_EFFECT_TEMPLATE): cv.template, vol.Optional(CONF_GREEN_TEMPLATE): cv.template, vol.Optional(CONF_MAX_MIREDS): cv.positive_int, vol.Optional(CONF_MIN_MIREDS): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, vol.Optional(CONF_RED_TEMPLATE): cv.template, vol.Optional(CONF_STATE_TEMPLATE): cv.template, vol.Optional(CONF_UNIQUE_ID): cv.string, vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template, } ) .extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema) .extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema) .extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema) ) async def async_setup_entity_template( config, async_add_entities, config_entry, discovery_data ): """Set up a MQTT Template light.""" async_add_entities([MqttTemplate(config, config_entry, discovery_data)]) class MqttTemplate( MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, LightEntity, RestoreEntity, ): """Representation of a MQTT Template light.""" def __init__(self, config, config_entry, discovery_data): """Initialize a MQTT Template light.""" self._state = False self._sub_state = None self._topics = None self._templates = None self._optimistic = False # features self._brightness = None self._color_temp = None self._white_value = None self._hs = None self._effect = None self._unique_id = config.get(CONF_UNIQUE_ID) # Load config self._setup_from_config(config) device_config = config.get(CONF_DEVICE) MqttAttributes.__init__(self, config) MqttAvailability.__init__(self, config) MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update) MqttEntityDeviceInfo.__init__(self, device_config, config_entry) async def async_added_to_hass(self): """Subscribe to MQTT events.""" await super().async_added_to_hass() await self._subscribe_topics() async def discovery_update(self, discovery_payload): """Handle updated discovery message.""" config = PLATFORM_SCHEMA_TEMPLATE(discovery_payload) self._setup_from_config(config) await self.attributes_discovery_update(config) await self.availability_discovery_update(config) await self.device_info_discovery_update(config) await self._subscribe_topics() self.async_write_ha_state() def _setup_from_config(self, config): """(Re)Setup the entity.""" self._config = config self._topics = { key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC) } self._templates = { key: config.get(key) for key in ( CONF_BLUE_TEMPLATE, CONF_BRIGHTNESS_TEMPLATE, CONF_COLOR_TEMP_TEMPLATE, CONF_COMMAND_OFF_TEMPLATE, CONF_COMMAND_ON_TEMPLATE, CONF_EFFECT_TEMPLATE, CONF_GREEN_TEMPLATE, CONF_RED_TEMPLATE, CONF_STATE_TEMPLATE, CONF_WHITE_VALUE_TEMPLATE, ) } optimistic = config[CONF_OPTIMISTIC] self._optimistic = ( optimistic or self._topics[CONF_STATE_TOPIC] is None or self._templates[CONF_STATE_TEMPLATE] is None ) # features if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None: self._brightness = 255 else: self._brightness = None if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None: self._color_temp = 255 else: self._color_temp = None if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None: self._white_value = 255 else: self._white_value = None if ( self._templates[CONF_RED_TEMPLATE] is not None and self._templates[CONF_GREEN_TEMPLATE] is not None and self._templates[CONF_BLUE_TEMPLATE] is not None ): self._hs = [0, 0] else: self._hs = None self._effect = None async def _subscribe_topics(self): """(Re)Subscribe to topics.""" for tpl in self._templates.values(): if tpl is not None: tpl.hass = self.hass last_state = await self.async_get_last_state() @callback @log_messages(self.hass, self.entity_id) def state_received(msg): """Handle new MQTT messages.""" state = self._templates[ CONF_STATE_TEMPLATE ].async_render_with_possible_json_value(msg.payload) if state == STATE_ON: self._state = True elif state == STATE_OFF: self._state = False else: _LOGGER.warning("Invalid state value received") if self._brightness is not None: try: self._brightness = int( self._templates[ CONF_BRIGHTNESS_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) except ValueError: _LOGGER.warning("Invalid brightness value received") if self._color_temp is not None: try: self._color_temp = int( self._templates[ CONF_COLOR_TEMP_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) except ValueError: _LOGGER.warning("Invalid color temperature value received") if self._hs is not None: try: red = int( self._templates[ CONF_RED_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) green = int( self._templates[ CONF_GREEN_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) blue = int( self._templates[ CONF_BLUE_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) self._hs = color_util.color_RGB_to_hs(red, green, blue) except ValueError: _LOGGER.warning("Invalid color value received") if self._white_value is not None: try: self._white_value = int( self._templates[ CONF_WHITE_VALUE_TEMPLATE ].async_render_with_possible_json_value(msg.payload) ) except ValueError: _LOGGER.warning("Invalid white value received") if self._templates[CONF_EFFECT_TEMPLATE] is not None: effect = self._templates[ CONF_EFFECT_TEMPLATE ].async_render_with_possible_json_value(msg.payload) if effect in self._config.get(CONF_EFFECT_LIST): self._effect = effect else: _LOGGER.warning("Unsupported effect value received") self.async_write_ha_state() if self._topics[CONF_STATE_TOPIC] is not None: self._sub_state = await subscription.async_subscribe_topics( self.hass, self._sub_state, { "state_topic": { "topic": self._topics[CONF_STATE_TOPIC], "msg_callback": state_received, "qos": self._config[CONF_QOS], } }, ) if self._optimistic and last_state: self._state = last_state.state == STATE_ON if last_state.attributes.get(ATTR_BRIGHTNESS): self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS) if last_state.attributes.get(ATTR_HS_COLOR): self._hs = last_state.attributes.get(ATTR_HS_COLOR) if last_state.attributes.get(ATTR_COLOR_TEMP): self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP) if last_state.attributes.get(ATTR_EFFECT): self._effect = last_state.attributes.get(ATTR_EFFECT) if last_state.attributes.get(ATTR_WHITE_VALUE): self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE) async def async_will_remove_from_hass(self): """Unsubscribe when removed.""" self._sub_state = await subscription.async_unsubscribe_topics( self.hass, self._sub_state ) await MqttAttributes.async_will_remove_from_hass(self) await MqttAvailability.async_will_remove_from_hass(self) await MqttDiscoveryUpdate.async_will_remove_from_hass(self) @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness @property def color_temp(self): """Return the color temperature in mired.""" return self._color_temp @property def min_mireds(self): """Return the coldest color_temp that this light supports.""" return self._config.get(CONF_MIN_MIREDS, super().min_mireds) @property def max_mireds(self): """Return the warmest color_temp that this light supports.""" return self._config.get(CONF_MAX_MIREDS, super().max_mireds) @property def hs_color(self): """Return the hs color value [int, int].""" return self._hs @property def white_value(self): """Return the white property.""" return self._white_value @property def should_poll(self): """Return True if entity has to be polled for state. False if entity pushes its state to HA. """ return False @property def name(self): """Return the name of the entity.""" return self._config[CONF_NAME] @property def unique_id(self): """Return a unique ID.""" return self._unique_id @property def is_on(self): """Return True if entity is on.""" return self._state @property def assumed_state(self): """Return True if unable to access real state of the entity.""" return self._optimistic @property def effect_list(self): """Return the list of supported effects.""" return self._config.get(CONF_EFFECT_LIST) @property def effect(self): """Return the current effect.""" return self._effect async def async_turn_on(self, **kwargs): """Turn the entity on. This method is a coroutine. """ values = {"state": True} if self._optimistic: self._state = True if ATTR_BRIGHTNESS in kwargs: values["brightness"] = int(kwargs[ATTR_BRIGHTNESS]) if self._optimistic: self._brightness = kwargs[ATTR_BRIGHTNESS] if ATTR_COLOR_TEMP in kwargs: values["color_temp"] = int(kwargs[ATTR_COLOR_TEMP]) if self._optimistic: self._color_temp = kwargs[ATTR_COLOR_TEMP] if ATTR_HS_COLOR in kwargs: hs_color = kwargs[ATTR_HS_COLOR] # If there's a brightness topic set, we don't want to scale the RGB # values given using the brightness. if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None: brightness = 255 else: brightness = kwargs.get( ATTR_BRIGHTNESS, self._brightness if self._brightness else 255 ) rgb = color_util.color_hsv_to_RGB( hs_color[0], hs_color[1], brightness / 255 * 100 ) values["red"] = rgb[0] values["green"] = rgb[1] values["blue"] = rgb[2] if self._optimistic: self._hs = kwargs[ATTR_HS_COLOR] if ATTR_WHITE_VALUE in kwargs: values["white_value"] = int(kwargs[ATTR_WHITE_VALUE]) if self._optimistic: self._white_value = kwargs[ATTR_WHITE_VALUE] if ATTR_EFFECT in kwargs: values["effect"] = kwargs.get(ATTR_EFFECT) if self._optimistic: self._effect = kwargs[ATTR_EFFECT] if ATTR_FLASH in kwargs: values["flash"] = kwargs.get(ATTR_FLASH) if ATTR_TRANSITION in kwargs: values["transition"] = int(kwargs[ATTR_TRANSITION]) mqtt.async_publish( self.hass, self._topics[CONF_COMMAND_TOPIC], self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(**values), self._config[CONF_QOS], self._config[CONF_RETAIN], ) if self._optimistic: self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn the entity off. This method is a coroutine. """ values = {"state": False} if self._optimistic: self._state = False if ATTR_TRANSITION in kwargs: values["transition"] = int(kwargs[ATTR_TRANSITION]) mqtt.async_publish( self.hass, self._topics[CONF_COMMAND_TOPIC], self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(**values), self._config[CONF_QOS], self._config[CONF_RETAIN], ) if self._optimistic: self.async_write_ha_state() @property def supported_features(self): """Flag supported features.""" features = SUPPORT_FLASH | SUPPORT_TRANSITION if self._brightness is not None: features = features | SUPPORT_BRIGHTNESS if self._hs is not None: features = features | SUPPORT_COLOR if self._config.get(CONF_EFFECT_LIST) is not None: features = features | SUPPORT_EFFECT if self._color_temp is not None: features = features | SUPPORT_COLOR_TEMP if self._white_value is not None: features = features | SUPPORT_WHITE_VALUE return features
unknown
codeparrot/codeparrot-clean
""" Django admin page for CourseAssetCacheTtlConfig, which allows you to configure the TTL that gets used when sending cachability headers back with request course assets. """ from django.contrib import admin from config_models.admin import ConfigurationModelAdmin from .models import CourseAssetCacheTtlConfig, CdnUserAgentsConfig class CourseAssetCacheTtlConfigAdmin(ConfigurationModelAdmin): """ Basic configuration for cache TTL. """ list_display = [ 'cache_ttl' ] def get_list_display(self, request): """ Restore default list_display behavior. ConfigurationModelAdmin overrides this, but in a way that doesn't respect the ordering. This lets us customize it the usual Django admin way. """ return self.list_display class CdnUserAgentsConfigAdmin(ConfigurationModelAdmin): """ Basic configuration for CDN user agent whitelist. """ list_display = [ 'cdn_user_agents' ] def get_list_display(self, request): """ Restore default list_display behavior. ConfigurationModelAdmin overrides this, but in a way that doesn't respect the ordering. This lets us customize it the usual Django admin way. """ return self.list_display admin.site.register(CourseAssetCacheTtlConfig, CourseAssetCacheTtlConfigAdmin) admin.site.register(CdnUserAgentsConfig, CdnUserAgentsConfigAdmin)
unknown
codeparrot/codeparrot-clean
# coding: utf-8 """ ORCID Member No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: Latest Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from orcid_api_v3.models.keyword_v20 import KeywordV20 # noqa: F401,E501 from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501 class KeywordsV20(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'last_modified_date': 'LastModifiedDateV20', 'keyword': 'list[KeywordV20]', 'path': 'str' } attribute_map = { 'last_modified_date': 'last-modified-date', 'keyword': 'keyword', 'path': 'path' } def __init__(self, last_modified_date=None, keyword=None, path=None): # noqa: E501 """KeywordsV20 - a model defined in Swagger""" # noqa: E501 self._last_modified_date = None self._keyword = None self._path = None self.discriminator = None if last_modified_date is not None: self.last_modified_date = last_modified_date if keyword is not None: self.keyword = keyword if path is not None: self.path = path @property def last_modified_date(self): """Gets the last_modified_date of this KeywordsV20. # noqa: E501 :return: The last_modified_date of this KeywordsV20. # noqa: E501 :rtype: LastModifiedDateV20 """ return self._last_modified_date @last_modified_date.setter def last_modified_date(self, last_modified_date): """Sets the last_modified_date of this KeywordsV20. :param last_modified_date: The last_modified_date of this KeywordsV20. # noqa: E501 :type: LastModifiedDateV20 """ self._last_modified_date = last_modified_date @property def keyword(self): """Gets the keyword of this KeywordsV20. # noqa: E501 :return: The keyword of this KeywordsV20. # noqa: E501 :rtype: list[KeywordV20] """ return self._keyword @keyword.setter def keyword(self, keyword): """Sets the keyword of this KeywordsV20. :param keyword: The keyword of this KeywordsV20. # noqa: E501 :type: list[KeywordV20] """ self._keyword = keyword @property def path(self): """Gets the path of this KeywordsV20. # noqa: E501 :return: The path of this KeywordsV20. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this KeywordsV20. :param path: The path of this KeywordsV20. # noqa: E501 :type: str """ self._path = path def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(KeywordsV20, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, KeywordsV20): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package discovery import ( "fmt" "testing" ) func TestPluginConstraintsAllows(t *testing.T) { tests := []struct { Constraints *PluginConstraints Version string Want bool }{ { &PluginConstraints{ Versions: AllVersions, }, "1.0.0", true, }, { &PluginConstraints{ Versions: ConstraintStr(">1.0.0").MustParse(), }, "1.0.0", false, }, // This is not an exhaustive test because the callees // already have plentiful tests of their own. } for i, test := range tests { t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { version := VersionStr(test.Version).MustParse() got := test.Constraints.Allows(version) if got != test.Want { t.Logf("looking for %s in %#v", test.Version, test.Constraints) t.Errorf("wrong result %#v; want %#v", got, test.Want) } }) } } func TestPluginConstraintsAcceptsSHA256(t *testing.T) { mustUnhex := func(hex string) (ret []byte) { _, err := fmt.Sscanf(hex, "%x", &ret) if err != nil { panic(err) } return ret } tests := []struct { Constraints *PluginConstraints Digest []byte Want bool }{ { &PluginConstraints{ Versions: AllVersions, SHA256: mustUnhex("0123456789abcdef"), }, mustUnhex("0123456789abcdef"), true, }, { &PluginConstraints{ Versions: AllVersions, SHA256: mustUnhex("0123456789abcdef"), }, mustUnhex("f00dface"), false, }, { &PluginConstraints{ Versions: AllVersions, SHA256: nil, }, mustUnhex("f00dface"), true, }, } for i, test := range tests { t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { got := test.Constraints.AcceptsSHA256(test.Digest) if got != test.Want { t.Logf("%#v.AcceptsSHA256(%#v)", test.Constraints, test.Digest) t.Errorf("wrong result %#v; want %#v", got, test.Want) } }) } }
go
github
https://github.com/hashicorp/terraform
internal/plugin/discovery/requirements_test.go
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.gs.contract; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; public class ITestGoogleContractMkdir extends AbstractContractMkdirTest { @Override protected AbstractFSContract createContract(Configuration conf) { return new GoogleContract(conf); } }
java
github
https://github.com/apache/hadoop
hadoop-cloud-storage-project/hadoop-gcp/src/test/java/org/apache/hadoop/fs/gs/contract/ITestGoogleContractMkdir.java
// RUN: %check_clang_tidy -std=c++11,c++14 -check-suffix=,CXX14 %s bugprone-dangling-handle %t -- \ // RUN: -config="{CheckOptions: \ // RUN: {bugprone-dangling-handle.HandleClasses: \ // RUN: 'std::basic_string_view; ::llvm::StringRef;'}}" // RUN: %check_clang_tidy -std=c++17-or-later -check-suffix=,CXX17 %s bugprone-dangling-handle %t -- \ // RUN: -config="{CheckOptions: \ // RUN: {bugprone-dangling-handle.HandleClasses: \ // RUN: 'std::basic_string_view; ::llvm::StringRef;'}}" namespace std { template <typename T> class vector { public: using const_iterator = const T*; using iterator = T*; using size_type = int; void assign(size_type count, const T& value); iterator insert(const_iterator pos, const T& value); iterator insert(const_iterator pos, T&& value); iterator insert(const_iterator pos, size_type count, const T& value); void push_back(const T&); void push_back(T&&); void resize(size_type count, const T& value); }; template <typename, typename> class pair {}; template <typename T> class set { public: using const_iterator = const T*; using iterator = T*; std::pair<iterator, bool> insert(const T& value); std::pair<iterator, bool> insert(T&& value); iterator insert(const_iterator hint, const T& value); iterator insert(const_iterator hint, T&& value); }; template <typename Key, typename Value> class map { public: using value_type = pair<Key, Value>; value_type& operator[](const Key& key); value_type& operator[](Key&& key); }; class basic_string_view; class basic_string { public: basic_string(); basic_string(const char*); typedef basic_string_view str_view; operator str_view() const noexcept; ~basic_string(); }; typedef basic_string string; class basic_string_view { public: basic_string_view(const char*); }; typedef basic_string_view string_view; } // namespace std namespace llvm { class StringRef { public: StringRef(); StringRef(const char*); StringRef(const std::string&); }; } // namespace llvm std::string ReturnsAString(); void Positives() { std::string_view view1 = std::string(); // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] // CHECK-MESSAGES-CXX17: [[@LINE-2]]:28: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] std::string_view view_2 = ReturnsAString(); // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] // CHECK-MESSAGES-CXX17: [[@LINE-2]]:29: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] view1 = std::string(); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives const std::string& str_ref = ""; std::string_view view3 = true ? "A" : str_ref; // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives // CHECK-MESSAGES-CXX17: [[@LINE-2]]:28: warning: std::basic_string_view outlives view3 = true ? "A" : str_ref; // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives std::string_view view4(ReturnsAString()); // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives // CHECK-MESSAGES-CXX17: [[@LINE-2]]:26: warning: std::basic_string_view outlives std::string_view view5 = std::string("test"); // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] // CHECK-MESSAGES-CXX17: [[@LINE-2]]:28: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] std::string_view view6 = std::string{"test"}; // CHECK-MESSAGES-CXX14: [[@LINE-1]]:20: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] // CHECK-MESSAGES-CXX17: [[@LINE-2]]:28: warning: std::basic_string_view outlives its value [bugprone-dangling-handle] } void OtherTypes() { llvm::StringRef ref = std::string(); // CHECK-MESSAGES-CXX14: [[@LINE-1]]:19: warning: llvm::StringRef outlives its value // CHECK-MESSAGES-CXX17: [[@LINE-2]]:25: warning: llvm::StringRef outlives its value } const char static_array[] = "A"; std::string_view ReturnStatements(int i, std::string value_arg, const std::string &ref_arg) { const char array[] = "A"; const char* ptr = "A"; std::string s; static std::string ss; switch (i) { // Bad cases case 0: return array; // refers to local // CHECK-MESSAGES: [[@LINE-1]]:7: warning: std::basic_string_view outliv case 1: return s; // refers to local // CHECK-MESSAGES: [[@LINE-1]]:7: warning: std::basic_string_view outliv case 2: return std::string(); // refers to temporary // CHECK-MESSAGES: [[@LINE-1]]:7: warning: std::basic_string_view outliv case 3: return value_arg; // refers to by-value arg // CHECK-MESSAGES: [[@LINE-1]]:7: warning: std::basic_string_view outliv // Ok cases case 100: return ss; // refers to static case 101: return static_array; // refers to static case 102: return ptr; // pointer is ok case 103: return ref_arg; // refers to by-ref arg } struct S { std::string_view view() { return value; } std::string value; }; (void)[&]()->std::string_view { // This should not warn. The string is bound by reference. return s; }; (void)[=]() -> std::string_view { // This should not warn. The reference is valid as long as the lambda. return s; }; (void)[=]() -> std::string_view { // FIXME: This one should warn. We are returning a reference to a local // lambda variable. std::string local; return local; }; return ""; } void Containers() { std::vector<std::string_view> v; v.assign(3, std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives v.insert(nullptr, std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives v.insert(nullptr, 3, std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives v.push_back(std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives v.resize(3, std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives std::set<std::string_view> s; s.insert(std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives s.insert(nullptr, std::string()); // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives std::map<std::string_view, int> m; m[std::string()]; // CHECK-MESSAGES: [[@LINE-1]]:3: warning: std::basic_string_view outlives } void TakesAStringView(std::string_view); void Negatives(std::string_view default_arg = ReturnsAString()) { std::string str; std::string_view view = str; TakesAStringView(std::string()); }
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-tidy/checkers/bugprone/dangling-handle.cpp
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import traceback from data_source import DataSource from extensions_paths import EXAMPLES from future import All, Future from platform_util import GetPlatforms def _GetSampleId(sample_name): return sample_name.lower().replace(' ', '-') def GetAcceptedLanguages(request): if request is None: return [] accept_language = request.headers.get('Accept-Language', None) if accept_language is None: return [] return [lang_with_q.split(';')[0].strip() for lang_with_q in accept_language.split(',')] def CreateSamplesView(samples_list, request): return_list = [] for dict_ in samples_list: name = dict_['name'] description = dict_['description'] if description is None: description = '' if name.startswith('__MSG_') or description.startswith('__MSG_'): try: # Copy the sample dict so we don't change the dict in the cache. sample_data = dict_.copy() name_key = name[len('__MSG_'):-len('__')] description_key = description[len('__MSG_'):-len('__')] locale = sample_data['default_locale'] for lang in GetAcceptedLanguages(request): if lang in sample_data['locales']: locale = lang break locale_data = sample_data['locales'][locale] sample_data['name'] = locale_data[name_key]['message'] sample_data['description'] = locale_data[description_key]['message'] sample_data['id'] = _GetSampleId(sample_data['name']) except Exception: logging.error(traceback.format_exc()) # Revert the sample to the original dict. sample_data = dict_ return_list.append(sample_data) else: dict_['id'] = _GetSampleId(name) return_list.append(dict_) return return_list class SamplesDataSource(DataSource): '''Constructs a list of samples and their respective files and api calls. ''' def __init__(self, server_instance, request): self._platform_bundle = server_instance.platform_bundle self._request = request def _GetImpl(self, platform): cache = self._platform_bundle.GetSamplesModel(platform).GetCache() create_view = lambda samp_list: CreateSamplesView(samp_list, self._request) return cache.GetFromFileListing('' if platform == 'apps' else EXAMPLES).Then(create_view) def get(self, platform): return self._GetImpl(platform).Get() def GetRefreshPaths(self): return [platform for platform in GetPlatforms()] def Refresh(self, path): return self._GetImpl(path)
unknown
codeparrot/codeparrot-clean
""" Convenience routines for creating non-trivial Field subclasses. Add SubfieldBase as the __metaclass__ for your Field subclass, implement to_python() and the other necessary methods and everything will work seamlessly. """ from django.utils.maxlength import LegacyMaxlength class SubfieldBase(LegacyMaxlength): """ A metaclass for custom Field subclasses. This ensures the model's attribute has the descriptor protocol attached to it. """ def __new__(cls, base, name, attrs): new_class = super(SubfieldBase, cls).__new__(cls, base, name, attrs) new_class.contribute_to_class = make_contrib( attrs.get('contribute_to_class')) return new_class class Creator(object): """ A placeholder class that provides a way to set the attribute on the model. """ def __init__(self, field): self.field = field def __get__(self, obj, type=None): if obj is None: raise AttributeError('Can only be accessed via an instance.') return obj.__dict__[self.field.name] def __set__(self, obj, value): obj.__dict__[self.field.name] = self.field.to_python(value) def make_contrib(func=None): """ Returns a suitable contribute_to_class() method for the Field subclass. If 'func' is passed in, it is the existing contribute_to_class() method on the subclass and it is called before anything else. It is assumed in this case that the existing contribute_to_class() calls all the necessary superclass methods. """ def contribute_to_class(self, cls, name): if func: func(self, cls, name) else: super(self.__class__, self).contribute_to_class(cls, name) setattr(cls, self.name, Creator(self)) return contribute_to_class
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ werkzeug.testsuite.securecookie ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests the secure cookie. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest from werkzeug.testsuite import WerkzeugTestCase from werkzeug.utils import parse_cookie from werkzeug.wrappers import Request, Response from werkzeug.contrib.securecookie import SecureCookie class SecureCookieTestCase(WerkzeugTestCase): def test_basic_support(self): c = SecureCookie(secret_key=b'foo') assert c.new assert not c.modified assert not c.should_save c['x'] = 42 assert c.modified assert c.should_save s = c.serialize() c2 = SecureCookie.unserialize(s, b'foo') assert c is not c2 assert not c2.new assert not c2.modified assert not c2.should_save self.assert_equal(c2, c) c3 = SecureCookie.unserialize(s, b'wrong foo') assert not c3.modified assert not c3.new self.assert_equal(c3, {}) def test_wrapper_support(self): req = Request.from_values() resp = Response() c = SecureCookie.load_cookie(req, secret_key=b'foo') assert c.new c['foo'] = 42 self.assert_equal(c.secret_key, b'foo') c.save_cookie(resp) req = Request.from_values(headers={ 'Cookie': 'session="%s"' % parse_cookie(resp.headers['set-cookie'])['session'] }) c2 = SecureCookie.load_cookie(req, secret_key=b'foo') assert not c2.new self.assert_equal(c2, c) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(SecureCookieTestCase)) return suite
unknown
codeparrot/codeparrot-clean
""" Mersenne Twister ---------------- Generates high quality pseudo random integers with a long period. Used as the default random number generator for several languages (including Python). For a more technical overview, see the wikipedia entry. Pseudocode: http://en.wikipedia.org/wiki/Mersenne_twister """ class MersenneTwister: def __init__(self): self.state = [] self.index = 0 def seed(self, seed): """ Initialize generator. :param seed: An integer value to seed the generator with """ self.state = [] self.index = 0 self.state.append(seed) for i in range(1, 624): n = (0x6c078965 * (self.state[i-1] ^ (self.state[i-1] >> 30)) + i) n &= 0xffffffff self.state.append(n) def randint(self): """ Extracts a random number. :rtype: A random integer """ if self.index == 0: self.generate() y = self.state[self.index] y ^= y >> 11 y ^= (y << 7) & 0x9d2c5680 y ^= (y << 15) & 0xefc60000 y ^= y >> 18 self.index = (self.index + 1) % 624 return y def generate(self): """ Generates 624 random numbers and stores in the state list. """ for i in range(624): n = self.state[i] & 0x80000000 n += self.state[(i+1) % 624] & 0x7fffffff self.state[i] = self.state[(i+397) % 624] ^ (n >> 1) if n % 2 != 0: self.state[i] ^= 0x9908b0df
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python\<nl>\ # -*- coding: utf-8 -*- """ @author nik | """ import sys from collections import namedtuple # globals MTLFILE = '' DUMMY_MAPCALC_STRING_RADIANCE = 'Radiance' DUMMY_MAPCALC_STRING_DN = 'DigitalNumber' # helper functions def set_mtlfile(): """ Set user defined MTL file, if any """ if len(sys.argv) > 1: return sys.argv[1] else: return False class Landsat8(): """ Retrieve metadata from a Landsat8 MTL file. See <http://landsat.usgs.gov/Landsat8_Using_Product.php>. ToDo: - Implement toar_reflectance - Implement mechanism to translate QA pixel values to QA bits, and vice versa? - Other Landsat8 related functions/algorithms? """ def __init__(self, mtl_filename): """ Initialise class object based on a Landsat8 MTL filename. """ # read lines with open(mtl_filename, 'r') as mtl_file: mtl_lines = mtl_file.readlines() # close and remove 'mtl_file' mtl_file.close() del(mtl_file) # clean and convert MTL lines in to a named tuple self.mtl = self._to_namedtuple(mtl_lines, 'metadata') self._set_attributes() # shorten LANDSAT_SCENE_ID, SENSOR_ID self.scene_id = self.mtl.LANDSAT_SCENE_ID self.sensor = self.mtl.SENSOR_ID # bounding box related self.corner_ul = (self.mtl.CORNER_UL_LAT_PRODUCT, self.mtl.CORNER_UL_LON_PRODUCT) self.corner_lr = (self.mtl.CORNER_LR_LAT_PRODUCT, self.mtl.CORNER_LR_LON_PRODUCT) self.corner_ul_projection = (self.mtl.CORNER_UL_PROJECTION_X_PRODUCT, self.mtl.CORNER_UL_PROJECTION_Y_PRODUCT) self.corner_lr_projection = (self.mtl.CORNER_LR_PROJECTION_X_PRODUCT, self.mtl.CORNER_LR_PROJECTION_Y_PRODUCT) self.cloud_cover = self.mtl.CLOUD_COVER def _to_namedtuple(self, list_of_lines, name_for_tuple): """ This function performs the following actions on the given 'list_of_lines': - excludes lines containing the strings 'GROUP' and 'END' - removes whitespaces and doublequotes from strings - converts list of lines in to a named tuple """ import string # exclude lines containing 'GROUP', 'END' lines = [line.strip() for line in list_of_lines if not any(x in line for x in ('GROUP', 'END'))] # keep a copy, maybe useful? self._mtl_lines = lines del(list_of_lines) # empty variables to hold values field_names = [] field_values = [] # loop over lines, do some cleaning for idx in range(len(lines)): # split line in '=' line = lines[idx] line_split = line.split('=') # get field name & field value, clean whitespaces and " field_name = line_split[0].strip() field_names.append(field_name) field_value = line_split[1].strip() field_value = field_value.translate(string.maketrans("", "",), '"') field_values.append(field_value) # named tuple named_tuple = namedtuple(name_for_tuple, field_names) # return named tuple return named_tuple(*field_values) def _set_attributes(self): """ Set all parsed field names and values, from the MTL file, fed to the named tuple 'self.mtl', as attributes to the object. """ for field in self.mtl._fields: field_lowercase = field.lower() field_value = getattr(self.mtl, field) setattr(self, field_lowercase, field_value) def __str__(self): """ Return a string representation of the scene's id. """ msg = 'Landsat8 scene ID:' return msg + ' ' + self.scene_id def _get_mtl_lines(self): """ Return the "hidden" copy of the MTL lines before cleaning (lines containing 'GROUP' or 'END' are though excluded). """ return self._mtl_lines def toar_radiance(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion of Digital Numbers to TOA Radiance. OLI and TIRS band data can be converted to TOA spectral radiance using the radiance rescaling factors provided in the metadata file: Lλ = ML * Qcal + AL where: - Lλ = TOA spectral radiance (Watts/( m2 * srad * μm)) - ML = Band-specific multiplicative rescaling factor from the metadata (RADIANCE_MULT_BAND_x, where x is the band number) - AL = Band-specific additive rescaling factor from the metadata (RADIANCE_ADD_BAND_x, where x is the band number) - Qcal = Quantized and calibrated standard product pixel values (DN) Some code borrowed from <https://github.com/micha-silver/grass-landsat8/blob/master/r.in.landsat8.py> """ multiplicative_factor = getattr(self.mtl, ('RADIANCE_MULT_BAND_' + str(bandnumber))) additive_factor = getattr(self.mtl, 'RADIANCE_ADD_BAND_' + str(bandnumber)) formula = '{ML}*{DUMMY_DN} + {AL}' mapcalc = formula.format(ML=multiplicative_factor, DUMMY_DN=DUMMY_MAPCALC_STRING_DN, AL=additive_factor) return mapcalc def toar_reflectance(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion to TOA Reflectance OLI band data can also be converted to TOA planetary reflectance using reflectance rescaling coefficients provided in the product metadata file (MTL file). The following equation is used to convert DN values to TOA reflectance for OLI data as follows: ρλ' = MρQcal + Aρ where: - ρλ' = TOA planetary reflectance, without correction for solar angle. Note that ρλ' does not contain a correction for the sun angle. - Mρ = Band-specific multiplicative rescaling factor from the metadata (REFLECTANCE_MULT_BAND_x, where x is the band number) - Aρ = Band-specific additive rescaling factor from the metadata (REFLECTANCE_ADD_BAND_x, where x is the band number) - Qcal = Quantized and calibrated standard product pixel values (DN) TOA reflectance with a correction for the sun angle is then: ρλ = ρλ' = ρλ' ### Fix This! cos(θSZ) sin(θSE) ### Fix This! where: - ρλ = TOA planetary reflectance - θSE = Local sun elevation angle. The scene center sun elevation angle in degrees is provided in the metadata (SUN_ELEVATION). - θSZ = Local solar zenith angle; - θSZ = 90° - θSE For more accurate reflectance calculations, per pixel solar angles could be used instead of the scene center solar angle, but per pixel solar zenith angles are not currently provided with the Landsat 8 products. """ pass def radiance_to_temperature(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion to At-Satellite Brightness Temperature TIRS band data can be converted from spectral radiance to brightness temperature using the thermal constants provided in the metadata file: T = K2 / ln( (K1/Lλ) + 1 ) where: - T = At-satellite brightness temperature (K) - Lλ = TOA spectral radiance (Watts/( m2 * srad * μm)), below 'DUMMY_RADIANCE' - K1 = Band-specific thermal conversion constant from the metadata (K1_CONSTANT_BAND_x, where x is the band number, 10 or 11) - K2 = Band-specific thermal conversion constant from the metadata (K2_CONSTANT_BAND_x, where x is the band number, 10 or 11) """ k2 = getattr(self.mtl, ('K2_CONSTANT_BAND_' + str(bandnumber))) k1 = getattr(self.mtl, ('K1_CONSTANT_BAND_' + str(bandnumber))) formula = '{K2} / ( log({K1} / {DUMMY_RADIANCE} + 1))' mapcalc = formula.format(K2=k2, K1=k1, DUMMY_RADIANCE=DUMMY_MAPCALC_STRING_RADIANCE) return mapcalc def main(): """ Main program. """ if set_mtlfile(): MTLFILE = set_mtlfile() print "| Reading metadata from:", MTLFILE else: MTLFILE = '' if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
from collections import Mapping, MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available class RLock: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict from .packages.six import iterkeys, itervalues, PY3 __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping values = list(itervalues(self._container)) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return list(iterkeys(self._container)) _dict_setitem = dict.__setitem__ _dict_getitem = dict.__getitem__ _dict_delitem = dict.__delitem__ _dict_contains = dict.__contains__ _dict_setdefault = dict.setdefault class HTTPHeaderDict(dict): """ :param headers: An iterable of field-value pairs. Must not contain multiple field names when compared case-insensitively. :param kwargs: Additional field-value pairs to pass in to ``dict.update``. A ``dict`` like container for storing HTTP Headers. Field names are stored and compared case-insensitively in compliance with RFC 7230. Iteration provides the first case-sensitive key seen for each case-insensitive pair. Using ``__setitem__`` syntax overwrites fields that compare equal case-insensitively in order to maintain ``dict``'s api. For fields that compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` in a loop. If multiple fields that are equal case-insensitively are passed to the constructor or ``.update``, the behavior is undefined and some will be lost. >>> headers = HTTPHeaderDict() >>> headers.add('Set-Cookie', 'foo=bar') >>> headers.add('set-cookie', 'baz=quxx') >>> headers['content-length'] = '7' >>> headers['SET-cookie'] 'foo=bar, baz=quxx' >>> headers['Content-Length'] '7' """ def __init__(self, headers=None, **kwargs): dict.__init__(self) if headers is not None: if isinstance(headers, HTTPHeaderDict): self._copy_from(headers) else: self.extend(headers) if kwargs: self.extend(kwargs) def __setitem__(self, key, val): return _dict_setitem(self, key.lower(), (key, val)) def __getitem__(self, key): val = _dict_getitem(self, key.lower()) return ', '.join(val[1:]) def __delitem__(self, key): return _dict_delitem(self, key.lower()) def __contains__(self, key): return _dict_contains(self, key.lower()) def __eq__(self, other): if not isinstance(other, Mapping) and not hasattr(other, 'keys'): return False if not isinstance(other, type(self)): other = type(self)(other) return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other) def __ne__(self, other): return not self.__eq__(other) values = MutableMapping.values get = MutableMapping.get update = MutableMapping.update if not PY3: # Python 2 iterkeys = MutableMapping.iterkeys itervalues = MutableMapping.itervalues __marker = object() def pop(self, key, default=__marker): '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def discard(self, key): try: del self[key] except KeyError: pass def add(self, key, val): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ key_lower = key.lower() new_vals = key, val # Keep the common case aka no item present as fast as possible vals = _dict_setdefault(self, key_lower, new_vals) if new_vals is not vals: # new_vals was not inserted, as there was a previous one if isinstance(vals, list): # If already several items got inserted, we have a list vals.append(val) else: # vals should be a tuple then, i.e. only one item so far # Need to convert the tuple to list for further extension _dict_setitem(self, key_lower, [vals[0], vals[1], val]) def extend(*args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 2: raise TypeError("update() takes at most 2 positional " "arguments ({} given)".format(len(args))) elif not args: raise TypeError("update() takes at least 1 argument (0 given)") self = args[0] other = args[1] if len(args) >= 2 else () if isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value) def getlist(self, key): """Returns a list of all the values for the named field. Returns an empty list if the key doesn't exist.""" try: vals = _dict_getitem(self, key.lower()) except KeyError: return [] else: if isinstance(vals, tuple): return [vals[1]] else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) def _copy_from(self, other): for key in other: val = _dict_getitem(other, key) if isinstance(val, list): # Don't need to convert tuples val = list(val) _dict_setitem(self, key, val) def copy(self): clone = type(self)() clone._copy_from(self) return clone def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = _dict_getitem(self, key) for val in vals[1:]: yield vals[0], val def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = _dict_getitem(self, key) yield val[0], ', '.join(val[1:]) def items(self): return list(self.iteritems()) @classmethod def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2 """Read headers from a Python 2 httplib message object.""" ret = cls(message.items()) # ret now contains only the last header line for each duplicate. # Importing with all duplicates would be nice, but this would # mean to repeat most of the raw parsing already done, when the # message object was created. Extracting only the headers of interest # separately, the cookies, should be faster and requires less # extra code. for key in duplicates: ret.discard(key) for val in message.getheaders(key): ret.add(key, val) return ret
unknown
codeparrot/codeparrot-clean
"""Helpers for tests.""" import json import pytest from .common import MQTTMessage from tests.async_mock import patch from tests.common import load_fixture from tests.components.light.conftest import mock_light_profiles # noqa @pytest.fixture(name="generic_data", scope="session") def generic_data_fixture(): """Load generic MQTT data and return it.""" return load_fixture("ozw/generic_network_dump.csv") @pytest.fixture(name="fan_data", scope="session") def fan_data_fixture(): """Load fan MQTT data and return it.""" return load_fixture("ozw/fan_network_dump.csv") @pytest.fixture(name="light_data", scope="session") def light_data_fixture(): """Load light dimmer MQTT data and return it.""" return load_fixture("ozw/light_network_dump.csv") @pytest.fixture(name="light_new_ozw_data", scope="session") def light_new_ozw_data_fixture(): """Load light dimmer MQTT data and return it.""" return load_fixture("ozw/light_new_ozw_network_dump.csv") @pytest.fixture(name="light_no_ww_data", scope="session") def light_no_ww_data_fixture(): """Load light dimmer MQTT data and return it.""" return load_fixture("ozw/light_no_ww_network_dump.csv") @pytest.fixture(name="light_no_cw_data", scope="session") def light_no_cw_data_fixture(): """Load light dimmer MQTT data and return it.""" return load_fixture("ozw/light_no_cw_network_dump.csv") @pytest.fixture(name="light_wc_data", scope="session") def light_wc_only_data_fixture(): """Load light dimmer MQTT data and return it.""" return load_fixture("ozw/light_wc_network_dump.csv") @pytest.fixture(name="cover_data", scope="session") def cover_data_fixture(): """Load cover MQTT data and return it.""" return load_fixture("ozw/cover_network_dump.csv") @pytest.fixture(name="cover_gdo_data", scope="session") def cover_gdo_data_fixture(): """Load cover_gdo MQTT data and return it.""" return load_fixture("ozw/cover_gdo_network_dump.csv") @pytest.fixture(name="climate_data", scope="session") def climate_data_fixture(): """Load climate MQTT data and return it.""" return load_fixture("ozw/climate_network_dump.csv") @pytest.fixture(name="lock_data", scope="session") def lock_data_fixture(): """Load lock MQTT data and return it.""" return load_fixture("ozw/lock_network_dump.csv") @pytest.fixture(name="string_sensor_data", scope="session") def string_sensor_fixture(): """Load string sensor MQTT data and return it.""" return load_fixture("ozw/sensor_string_value_network_dump.csv") @pytest.fixture(name="sent_messages") def sent_messages_fixture(): """Fixture to capture sent messages.""" sent_messages = [] with patch( "homeassistant.components.mqtt.async_publish", side_effect=lambda hass, topic, payload: sent_messages.append( {"topic": topic, "payload": json.loads(payload)} ), ): yield sent_messages @pytest.fixture(name="fan_msg") async def fan_msg_fixture(hass): """Return a mock MQTT msg with a fan actuator message.""" fan_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/fan.json") ) message = MQTTMessage(topic=fan_json["topic"], payload=fan_json["payload"]) message.encode() return message @pytest.fixture(name="light_msg") async def light_msg_fixture(hass): """Return a mock MQTT msg with a light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message @pytest.fixture(name="light_no_rgb_msg") async def light_no_rgb_msg_fixture(hass): """Return a mock MQTT msg with a light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light_no_rgb.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message @pytest.fixture(name="light_rgb_msg") async def light_rgb_msg_fixture(hass): """Return a mock MQTT msg with a light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light_rgb.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message @pytest.fixture(name="light_pure_rgb_msg") async def light_pure_rgb_msg_fixture(hass): """Return a mock MQTT msg with a pure rgb light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message @pytest.fixture(name="switch_msg") async def switch_msg_fixture(hass): """Return a mock MQTT msg with a switch actuator message.""" switch_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/switch.json") ) message = MQTTMessage(topic=switch_json["topic"], payload=switch_json["payload"]) message.encode() return message @pytest.fixture(name="sensor_msg") async def sensor_msg_fixture(hass): """Return a mock MQTT msg with a sensor change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/sensor.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="binary_sensor_msg") async def binary_sensor_msg_fixture(hass): """Return a mock MQTT msg with a binary_sensor change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="binary_sensor_alt_msg") async def binary_sensor_alt_msg_fixture(hass): """Return a mock MQTT msg with a binary_sensor change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor_alt.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="cover_msg") async def cover_msg_fixture(hass): """Return a mock MQTT msg with a cover level change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/cover.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="cover_gdo_msg") async def cover_gdo_msg_fixture(hass): """Return a mock MQTT msg with a cover barrier state change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/cover_gdo.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="climate_msg") async def climate_msg_fixture(hass): """Return a mock MQTT msg with a climate mode change message.""" sensor_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/climate.json") ) message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"]) message.encode() return message @pytest.fixture(name="lock_msg") async def lock_msg_fixture(hass): """Return a mock MQTT msg with a lock actuator message.""" lock_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/lock.json") ) message = MQTTMessage(topic=lock_json["topic"], payload=lock_json["payload"]) message.encode() return message @pytest.fixture(name="stop_addon") def mock_install_addon(): """Mock stop add-on.""" with patch("homeassistant.components.hassio.async_stop_addon") as stop_addon: yield stop_addon @pytest.fixture(name="uninstall_addon") def mock_uninstall_addon(): """Mock uninstall add-on.""" with patch( "homeassistant.components.hassio.async_uninstall_addon" ) as uninstall_addon: yield uninstall_addon @pytest.fixture(name="get_addon_discovery_info") def mock_get_addon_discovery_info(): """Mock get add-on discovery info.""" with patch( "homeassistant.components.hassio.async_get_addon_discovery_info" ) as get_addon_discovery_info: yield get_addon_discovery_info
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-proxy-init.py # Author : Adrian Casajus ######################################################################## __RCSID__ = "$Id$" import sys import getpass import DIRAC from DIRAC.Core.Base import Script class Params: proxyLoc = False dnAsUsername = False def setProxyLocation( self, arg ): self.proxyLoc = arg return DIRAC.S_OK() def setDNAsUsername( self, arg ): self.dnAsUsername = True return DIRAC.S_OK() def showVersion( self, arg ): print "Version:" print " ", __RCSID__ sys.exit( 0 ) return DIRAC.S_OK() params = Params() Script.registerSwitch( "f:", "file=", "File to use as proxy", params.setProxyLocation ) Script.registerSwitch( "D", "DN", "Use DN as myproxy username", params.setDNAsUsername ) Script.registerSwitch( "i", "version", "Print version", params.showVersion ) Script.addDefaultOptionValue( "LogLevel", "always" ) Script.parseCommandLine() from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager from DIRAC.Core.Security.MyProxy import MyProxy from DIRAC.Core.Security.X509Chain import X509Chain from DIRAC.Core.Security import Locations, CS if not params.proxyLoc: params.proxyLoc = Locations.getProxyLocation() if not params.proxyLoc: print "Can't find any valid proxy" sys.exit( 1 ) print "Uploading proxy file %s" % params.proxyLoc mp = MyProxy() retVal = mp.uploadProxy( params.proxyLoc, params.dnAsUsername ) if not retVal[ 'OK' ]: print "Can't upload proxy:" print " ", retVal[ 'Message' ] sys.exit( 1 ) print "Proxy uploaded" sys.exit( 0 )
unknown
codeparrot/codeparrot-clean
import os from setuptools import setup, find_packages readme = open('README.rst', 'rt').read() import sys versionstr = '0.5.0' if os.path.exists('./requirements.txt'): with open('./requirements.txt', 'r') as reqs: __requirements__ = [x.strip() for x in reqs.readlines() if not x.startswith('--')] else: raise Exception("Missing requirements.txt in top level of package!") setup( name='WeatherAlerts', version=versionstr, author='Zeb Palmer', author_email='zeb@zebpalmer.com', packages=['weatheralerts'], package_dir={ 'weatheralerts': "weatheralerts"}, #scripts=[ "scripts/NagiosWeatherAlerts.py", #"scripts/MonitorAlertsByCounty.py", #"scripts/NWS_Alerts.py"} url='http://github.com/zebpalmer/WeatherAlerts', license='MIT', description='Parse the National Weather Service Emergency Alerts Feed (NWS CAP format), do useful stuff with it', long_description=readme, install_requires=__requirements__, use_2to3=True, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: Plugins', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'Intended Audience :: Telecommunications Industry', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities' ], )
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # This file is part of Invenio Demosite. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN. # # Invenio Demosite is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio Demosite is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. def get_number_of_copies(recid): """ Searches inside crcITEM for the number of appearances of recid @param recid: @return: Number of copies """ from invenio.legacy.dbquery import run_sql if recid: try: return run_sql('SELECT COUNT(*) FROM crcITEM WHERE id_bibrec=%s', (recid,))[0][0] except: return -1
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true # # $Id$ # # Copyright (c) 2004,2005 Minero Aoki # # This program is free software. # You can distribute and/or modify this program under the Ruby License. # For details of Ruby License, see ruby/COPYING. # require 'ripper/lexer' class Ripper # This class handles only scanner events, # which are dispatched in the 'right' order (same with input). class Filter # Creates a new Ripper::Filter instance, passes parameters +src+, # +filename+, and +lineno+ to Ripper::Lexer.new # # The lexer is for internal use only. def initialize(src, filename = '-', lineno = 1) @__lexer = Lexer.new(src, filename, lineno) @__line = nil @__col = nil @__state = nil end # The file name of the input. def filename @__lexer.filename end # The line number of the current token. # This value starts from 1. # This method is valid only in event handlers. def lineno @__line end # The column number of the current token. # This value starts from 0. # This method is valid only in event handlers. def column @__col end # The scanner's state of the current token. # This value is the bitwise OR of zero or more of the +Ripper::EXPR_*+ constants. def state @__state end # Starts the parser. # +init+ is a data accumulator and is passed to the next event handler (as # of Enumerable#inject). def parse(init = nil) data = init @__lexer.lex.each do |pos, event, tok, state| @__line, @__col = *pos @__state = state data = if respond_to?(event, true) then __send__(event, tok, data) else on_default(event, tok, data) end end data end private # This method is called when some event handler is undefined. # +event+ is :on_XXX, +token+ is the scanned token, and +data+ is a data # accumulator. # # The return value of this method is passed to the next event handler (as # of Enumerable#inject). def on_default(event, token, data) data end end end
ruby
github
https://github.com/ruby/ruby
ext/ripper/lib/ripper/filter.rb
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side * Public License v 1"; you may not use this file except in compliance with, at * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ package org.elasticsearch.gradle.internal.util; public class CiUtils { static String safeName(String input) { return input.replaceAll("[^a-zA-Z0-9_\\-\\.]+", " ").trim().replaceAll(" ", "_").toLowerCase(); } }
java
github
https://github.com/elastic/elasticsearch
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java
@source "./**/*.ts"; @source "!./**/*.ts"; @plugin "./plugin.js"; @plugin "./what\"s-this.js";
css
github
https://github.com/tailwindlabs/tailwindcss
packages/@tailwindcss-postcss/src/postcss-fix-relative-paths/fixtures/example-project/src/index.css
const val TAG = <expr>"Analysis API".toString()</expr>
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api/testData/components/compileTimeConstantProvider/evaluate/string_toString.kt
/* * Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.server.plugins import io.ktor.http.* import io.ktor.server.application.internal.* import io.ktor.util.internal.* import kotlinx.coroutines.* import kotlinx.io.* import kotlin.reflect.* /** * Base exception to indicate that the request is not correct due to * wrong/missing request parameters, body content or header values. * Throwing this exception in a handler will lead to 400 Bad Request response * unless a custom [io.ktor.plugins.StatusPages] handler registered. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.BadRequestException) */ public open class BadRequestException(message: String, cause: Throwable? = null) : Exception(message, cause) /** * This exception means that the requested resource is not found. * HTTP status 404 Not found will be replied when this exception is thrown and not caught. * 404 status page could be configured by registering a custom [io.ktor.plugins.StatusPages] handler. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.NotFoundException) */ public class NotFoundException(message: String? = "Resource not found") : Exception(message) /** * This exception is thrown when a required parameter with name [parameterName] is missing * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.MissingRequestParameterException) * * @property parameterName of missing request parameter */ @OptIn(ExperimentalCoroutinesApi::class) public class MissingRequestParameterException( public val parameterName: String ) : BadRequestException("Request parameter $parameterName is missing"), CopyableThrowable<MissingRequestParameterException> { override fun createCopy(): MissingRequestParameterException = MissingRequestParameterException(parameterName).also { it.initCauseBridge(this) } } /** * This exception is thrown when a required parameter with name [parameterName] couldn't be converted to the [type] * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.ParameterConversionException) * * @property parameterName of missing request parameter * @property type this parameter is unable to convert to */ @OptIn(ExperimentalCoroutinesApi::class) public class ParameterConversionException( public val parameterName: String, public val type: String, cause: Throwable? = null ) : BadRequestException("Request parameter $parameterName couldn't be parsed/converted to $type", cause), CopyableThrowable<ParameterConversionException> { override fun createCopy(): ParameterConversionException = ParameterConversionException(parameterName, type, this).also { it.initCauseBridge(this) } } /** * Thrown when content cannot be transformed to the desired type. * It is not defined which status code will be replied when an exception of this type is thrown and not caught. * Depending on child type it could be 4xx or 5xx status code. By default it will be 500 Internal Server Error. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.ContentTransformationException) */ public abstract class ContentTransformationException(message: String) : IOException(message) @OptIn(ExperimentalCoroutinesApi::class) public class CannotTransformContentToTypeException( private val type: KType ) : ContentTransformationException("Cannot transform this request's content to $type"), CopyableThrowable<CannotTransformContentToTypeException> { override fun createCopy(): CannotTransformContentToTypeException = CannotTransformContentToTypeException(type).also { it.initCauseBridge(this) } } /** * Thrown when there is no conversion for a content type configured. * HTTP status 415 Unsupported Media Type will be replied when this exception is thrown and not caught. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.UnsupportedMediaTypeException) */ @OptIn(ExperimentalCoroutinesApi::class) public class UnsupportedMediaTypeException( private val contentType: ContentType? ) : ContentTransformationException( contentType?.let { "Content type $it is not supported" } ?: "Content-Type header is required" ), CopyableThrowable<UnsupportedMediaTypeException> { override fun createCopy(): UnsupportedMediaTypeException = UnsupportedMediaTypeException(contentType).also { it.initCauseBridge(this) } } /** * Thrown when request body is larger than the set limit. * HTTP status 413 Payload Too Large will be replied when this exception is thrown and not caught. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.plugins.PayloadTooLargeException) */ @OptIn(ExperimentalCoroutinesApi::class) public class PayloadTooLargeException( private val sizeLimit: Long ) : ContentTransformationException("Request is larger than the limit of $sizeLimit bytes"), CopyableThrowable<PayloadTooLargeException> { override fun createCopy(): PayloadTooLargeException = PayloadTooLargeException(sizeLimit).also { it.initCauseBridge(this) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-server/ktor-server-core/common/src/io/ktor/server/plugins/Errors.kt
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp import tools from openerp.addons.crm import crm class crm_lead_report_assign(osv.osv): """ CRM Lead Report """ _name = "crm.lead.report.assign" _auto = False _description = "CRM Lead Report" _columns = { 'partner_assigned_id':fields.many2one('res.partner', 'Partner', readonly=True), 'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True), 'user_id':fields.many2one('res.users', 'User', readonly=True), 'country_id':fields.many2one('res.country', 'Country', readonly=True), 'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'date_assign': fields.date('Assign Date', readonly=True), 'create_date': fields.datetime('Create Date', readonly=True), 'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"), 'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"), 'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"), 'probability': fields.float('Avg Probability',digits=(16,2),readonly=True, group_operator="avg"), 'probability_max': fields.float('Max Probability',digits=(16,2),readonly=True, group_operator="max"), 'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True), 'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True), 'stage_id': fields.many2one ('crm.case.stage', 'Stage', domain="[('section_ids', '=', section_id)]"), 'partner_id': fields.many2one('res.partner', 'Customer' , readonly=True), 'opening_date': fields.date('Opening Date', readonly=True), 'creation_date': fields.date('Creation Date', readonly=True), 'date_closed': fields.date('Close Date', readonly=True), 'nbr': fields.integer('# of Cases', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'), 'type':fields.selection([ ('lead','Lead'), ('opportunity','Opportunity') ],'Type', help="Type is used to separate Leads and Opportunities"), } def init(self, cr): """ CRM Lead Report @param cr: the current row, from the database cursor """ tools.drop_view_if_exists(cr, 'crm_lead_report_assign') cr.execute(""" CREATE OR REPLACE VIEW crm_lead_report_assign AS ( SELECT c.id, to_char(c.create_date, 'YYYY-MM-DD') as creation_date, to_char(c.date_open, 'YYYY-MM-DD') as opening_date, to_char(c.date_closed, 'YYYY-mm-dd') as date_closed, c.date_assign, c.user_id, c.probability, c.probability as probability_max, c.stage_id, c.type, c.company_id, c.priority, c.section_id, c.partner_id, c.country_id, c.planned_revenue, c.partner_assigned_id, p.grade_id, p.date as partner_date, c.planned_revenue*(c.probability/100) as probable_revenue, 1 as nbr, date_trunc('day',c.create_date) as create_date, extract('epoch' from (c.write_date-c.create_date))/(3600*24) as delay_close, extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected, extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open FROM crm_lead c left join res_partner p on (c.partner_assigned_id=p.id) )""") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os, sys curr_path = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, os.path.join(curr_path, "../../python")) sys.path.insert(0, os.path.join(curr_path, "../../example/image-classification/symbols")) import mxnet as mx import logging import argparse import time import numpy as np from importlib import import_module from collections import namedtuple from functools import reduce logger = logging.getLogger() logger.setLevel(logging.INFO) def parse_args(): parser = argparse.ArgumentParser(description="command for benchmark kv-store") parser.add_argument('--network', type=str, default="resnet", help='the neural network to test') parser.add_argument('--gpus', type=str, default='0,1', help='the gpus to be used, e.g "0,1,2,3"') parser.add_argument('--num-layers', type=int, default=152, help='number of layers, can be used for resnet') parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type') parser.add_argument('--num-batches', type=int, default=5, help='number of batches to run') parser.add_argument('--disp-batches', type=int, default=1, help='show averaged results for every n batches') parser.add_argument('--test-results', type=int, default=1, help='if or not evalute the results correctness') parser.add_argument('--image-shape', type=str, default='3,224,224', help='input images shape') parser.add_argument('--num-classes', type=int, default=1000, help='number of classes') parser.add_argument('--optimizer', type=str, default='None', help='the optimizer set to kvstore. None means no optimizer') parser.add_argument('--gc-type', type=str, default='none', help='type of gradient compression') args = parser.parse_args() logging.info(args) return args def get_shapes(symbol, data_shape): arg_name = symbol.list_arguments() arg_shape, _, _ = symbol.infer_shape(data=data_shape) shapes = [s for n,s in zip(arg_name, arg_shape) if 'weight' in n or 'bias' in n] return shapes def diff(a, b): return np.sum(np.abs(a.asnumpy() - b.asnumpy())) def error(gpu_res, cpu_res): res = sum([sum([diff(a, b) for a in w]) for w, b in zip(gpu_res, cpu_res)]) res /= sum([np.sum(np.abs(g.asnumpy())) for g in cpu_res]) return res def run(network, optimizer, gpus, kv_store, image_shape, disp_batches, num_batches, test_results, gc_type, **kwargs): # create kvstore and optimizer devs = [mx.gpu(int(i)) for i in gpus.split(',')] kv = mx.kv.create(kv_store) if gc_type != 'none': kv.set_gradient_compression({'type': gc_type}) if optimizer is None or optimizer == 'None': opt = None else: opt = mx.optimizer.Optimizer.create_optimizer(optimizer) kv.set_optimizer(opt) updater = mx.optimizer.get_updater(mx.optimizer.Optimizer.create_optimizer(optimizer)) # create network symbol = import_module(network).get_symbol(image_shape=image_shape, **kwargs) # a fake batch size 32, which does not affect the results data_shape = (32,) + tuple([int(s) for s in image_shape.split(',')]) shapes = get_shapes(symbol, data_shape) size = float(sum([reduce(lambda x,y : x*y, s, 1) for s in shapes])) * 4 / 1e6 logging.info('num of arrays = %d, total size = %f MB' % (len(shapes), size)) for i, s in enumerate(shapes): kv.init(i, mx.nd.zeros(s)) grads_val = [[mx.random.uniform(-1,1,shape=s) for d in devs] for s in shapes] grads = [[g.as_in_context(d) for g, d in zip(gs, devs)] for gs in grads_val] weights = [[mx.nd.zeros(s, d) for d in devs] for s in shapes] cpu_grads = [mx.nd.array(sum([g.asnumpy() for g in gs]))*kv.num_workers for gs in grads_val] cpu_weights = [mx.nd.zeros(s) for s in shapes] toc = 0 Results = namedtuple('Results', ['iter', 'time', 'bandwidth', 'error']) res = [] for b in range(0, num_batches+1): tic = time.time() for i,g in enumerate(grads): kv.push(i, g, i) for i,w in enumerate(weights): kv.pull(i, w, i) for ws in weights: for w in ws: w.wait_to_read() toc += time.time() - tic if test_results: if opt == None: err = error(weights, cpu_grads) else: for i, wg in enumerate(zip(cpu_weights, cpu_grads)): updater(i, wg[1], wg[0]) err = error(weights, cpu_weights) else: err = -1 if b % disp_batches == 0: toc /= disp_batches if b != 0: # 0 is used for warmup, ignored r = Results(iter=b, time=toc, error=err, bandwidth=size*2*(len(devs)-1)/len(devs)/toc/1e3) logging.info('iter %d, %f sec, %f GB/sec per gpu, error %f' % ( r.iter, r.time, r.bandwidth, r.error)) res.append(r) toc = 0 return res if __name__ == "__main__": args = parse_args(); run(**vars(args))
unknown
codeparrot/codeparrot-clean
# Copyright 2010 Jacob Kaplan-Moss # Copyright 2011 OpenStack LLC. # Copyright (c)2012 Rackspace US, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base utilities to build API operation managers and objects on top of. """ import six import pyrax import pyrax.utils as utils class BaseResource(object): """ A resource represents a particular instance of an object (server, flavor, etc). This is pretty much just a bag for attributes. """ HUMAN_ID = False NAME_ATTR = "name" # Some resource do not have any additional details to lazy load, # so skip the unneeded API call by setting this to False. get_details = True # Atts not to display when showing the __repr__() _non_display = [] # Properties to add to the __repr__() display _repr_properties = [] def __init__(self, manager, info, key=None, loaded=False): self._loaded = loaded self.manager = manager if key: info = info[key] self._info = info self._add_details(info) @property def human_id(self): """Subclasses may override this to provide a pretty ID which can be used for bash completion. """ if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: return utils.to_slug(getattr(self, self.NAME_ATTR)) return None def _add_details(self, info): """ Takes the dict returned by the API call and sets the corresponding attributes on the object. """ for (key, val) in six.iteritems(info): if isinstance(key, six.text_type): key = key.encode(pyrax.get_encoding()) elif isinstance(key, bytes): key = key.decode("utf-8") setattr(self, key, val) def __getattr__(self, key): """ Many objects are lazy-loaded: only their most basic details are initially returned. The first time any of the other attributes are referenced, a GET is made to get the full details for the object. """ if not self.loaded: self.get() # Attribute should be set; if not, it's not valid try: return self.__dict__[key] except KeyError: raise AttributeError("'%s' object has no attribute " "'%s'." % (self.__class__, key)) def __repr__(self): reprkeys = sorted(key for key in self.__dict__.keys() if (key[0] != "_") and (key not in ("manager", "created", "updated")) and (key not in self._non_display)) reprkeys += self._repr_properties info = ", ".join("%s=%s" % (key, getattr(self, key)) for key in reprkeys) return "<%s %s>" % (self.__class__.__name__, info) def get(self): """Gets the details for the object.""" # set 'loaded' first ... so if we have to bail, we know we tried. self.loaded = True if not hasattr(self.manager, "get"): return if not self.get_details: return new = self.manager.get(self) if new: self._add_details(new._info) # This alias is used to make its purpose clearer. reload = get def delete(self): """Deletes the object.""" # set 'loaded' first ... so if we have to bail, we know we tried. self.loaded = True if not hasattr(self.manager, "delete"): return self.manager.delete(self) def __eq__(self, other): """ Two resource objects that represent the same entity in the cloud should be considered equal if they have the same ID. If they don't have IDs, but their attribute info matches, they are equal. """ if not isinstance(other, self.__class__): return False if hasattr(self, "id") and hasattr(other, "id"): return self.id == other.id return self._info == other._info def _get_loaded(self): return self._loaded def _set_loaded(self, val): self._loaded = val loaded = property(_get_loaded, _set_loaded)
unknown
codeparrot/codeparrot-clean
""" A Python "serializer". Doesn't do much serializing per se -- just converts to and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for other serializers. """ from __future__ import unicode_literals from django.apps import apps from django.conf import settings from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils import six from django.utils.encoding import force_text, is_protected_type class Serializer(base.Serializer): """ Serializes a QuerySet to basic Python objects. """ internal_use_only = True def start_serialization(self): self._current = None self.objects = [] def end_serialization(self): pass def start_object(self, obj): self._current = {} def end_object(self, obj): self.objects.append(self.get_dump_object(obj)) self._current = None def get_dump_object(self, obj): data = { "model": force_text(obj._meta), "fields": self._current, } if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): data["pk"] = force_text(obj._get_pk_val(), strings_only=True) return data def handle_field(self, obj, field): value = field._get_val_from_obj(obj) # Protected types (i.e., primitives like None, numbers, dates, # and Decimals) are passed through as is. All other values are # converted to string first. if is_protected_type(value): self._current[field.name] = value else: self._current[field.name] = field.value_to_string(obj) def handle_fk_field(self, obj, field): if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'): related = getattr(obj, field.name) if related: value = related.natural_key() else: value = None else: value = getattr(obj, field.get_attname()) if not is_protected_type(value): value = field.value_to_string(obj) self._current[field.name] = value def handle_m2m_field(self, obj, field): if field.rel.through._meta.auto_created: if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'): m2m_value = lambda value: value.natural_key() else: m2m_value = lambda value: force_text(value._get_pk_val(), strings_only=True) self._current[field.name] = [m2m_value(related) for related in getattr(obj, field.name).iterator()] def getvalue(self): return self.objects def Deserializer(object_list, **options): """ Deserialize simple Python objects back into Django ORM instances. It's expected that you pass the Python objects themselves (instead of a stream or a string) to the constructor """ db = options.pop('using', DEFAULT_DB_ALIAS) ignore = options.pop('ignorenonexistent', False) for d in object_list: # Look up the model and starting build a dict of data for it. try: Model = _get_model(d["model"]) except base.DeserializationError: if ignore: continue else: raise data = {} if 'pk' in d: data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get("pk", None)) m2m_data = {} field_names = {f.name for f in Model._meta.get_fields()} # Handle each field for (field_name, field_value) in six.iteritems(d["fields"]): if ignore and field_name not in field_names: # skip fields no longer on model continue if isinstance(field_value, str): field_value = force_text( field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True ) field = Model._meta.get_field(field_name) # Handle M2M relations if field.rel and isinstance(field.rel, models.ManyToManyRel): if hasattr(field.rel.to._default_manager, 'get_by_natural_key'): def m2m_convert(value): if hasattr(value, '__iter__') and not isinstance(value, six.text_type): return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk else: return force_text(field.rel.to._meta.pk.to_python(value), strings_only=True) else: m2m_convert = lambda v: force_text(field.rel.to._meta.pk.to_python(v), strings_only=True) m2m_data[field.name] = [m2m_convert(pk) for pk in field_value] # Handle FK fields elif field.rel and isinstance(field.rel, models.ManyToOneRel): if field_value is not None: if hasattr(field.rel.to._default_manager, 'get_by_natural_key'): if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type): obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value) value = getattr(obj, field.rel.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.rel.to._meta.pk.rel: value = value.pk else: value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value) data[field.attname] = value else: data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value) else: data[field.attname] = None # Handle all other fields else: data[field.name] = field.to_python(field_value) obj = base.build_instance(Model, data, db) yield base.DeserializedObject(obj, m2m_data) def _get_model(model_identifier): """ Helper to look up a model from an "app_label.model_name" string. """ try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ################################################################## # Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com> # February 2012 - # Version 0.3.1, Last change on Nov 15, 2012 # This software is distributed under the terms of BSD license. ################################################################## # All functions needed to build/decode LDAP messages import struct import socket import sys ERROR = -1 # Encoding structure: CLASS(bit 7,8)+PC(bit 5)+Tag (bit 0-4) dict_class={'UNIVERSAL':0x00, 'APPLICATION':0x40, 'CONTEXT_SPECIFIC':0x80, 'PRIVATE':0xC0} dict_pc={'PRIMITIVE':0, 'CONSTRUCTED':0x20} dict_tag={'EOC':0, 'BOOLEAN':1, 'INTEGER':2, 'BIT_STRING':3, 'OCTET_STRING':4, 'NULL':5, 'OBJECT_IDENTIFIER':6, 'OBJECT_DESCRIPTOR':7, 'EXTERNAL':8, 'FLOAT':9, 'ENUMERATED':10, 'EMBEDDED':11, 'UTF8':12, 'RELATIVE_OID':13, 'SEQUENCE':16, 'SET':17, 'NUMERIC_STRING':18, 'PRINTABLE_STRING':19, 'T61STRING':20, 'VIDEOTEXT_STRING':21, 'IA5STRING':22, 'UTC_TIME':23, 'GENERALIZED_TIME':24, 'GRAPHIC_STRING':25, 'VISIBLE_STRING':26, 'GENERAL_STRING':27, 'UNIVERSAL_STRING':28, 'CHARACTER_STRING':29, 'BMP_STRING':30, 'LONG_FORM':31} dict_RES={'success':0, 'operationsError':1, 'protocolError':2, 'timeLimitExceeded':3, 'sizeLimitExceeded':4, 'compareFalse':5, 'compareTrue':6, 'authMethodNotSupported':7, 'strongerAuthRequired':8, 'referral':10, 'adminLimitExceeded':11, 'unavailableCriticalExtension':12, 'confidentialityRequired':13, 'saslBindInProgress':14, 'noSuchAttribute':16, 'undefinedAttributeType':17, 'inappropriateMatching':18, 'constraintViolation':19, 'attributeOrValueExists':20, 'invalidAttributeSyntax':21, 'noSuchObject':32, 'aliasProblem':33, 'invalidDNSyntax':34, 'aliasDereferencingProblem':36, 'inappropriateAuthentication':48, 'invalidCredentials':49, 'insufficientAccessRights':50, 'busy':51, 'unavailable':52, 'unavailable':52, 'unwillingToPerform':53, 'loopDetect':54, 'namingViolation':64, 'objectClassViolation':65, 'notAllowedOnNonLeaf':66, 'notAllowedOnRDN':67, 'entryAlreadyExists':68, 'objectClassModsProhibited':69, 'affectsMultipleDSAs':71, 'other':80 } dict_APP= {'bindRequest': 0, 'bindResponse':1, 'unbindRequest':2, 'searchRequest':3, 'searchResultEntry':4, 'searchResultDone':5, 'modifyRequest':6, 'modifyResponse':7, 'addRequest':8, 'addResponse':9, 'delRequest':10, 'delResponse':11, 'modifyDNRequest':12, 'modifyDNResponse':13, 'compareRequest':15, 'compareResponse':16, 'abandonRequest':17, 'extendedRequest':18, 'extendedResponse':19, 'intermediateResponse':20 } class bindReq: def __init__(self): self.messageId=0 self.code=0 self.version=3 self.name="" self.authentication="" class LDAPResult: def __init__(self): self.messageId=0 self.code=0 self.result=0 self.matchedDN="" self.errorMSG="" class searchReq: def __init__(self): self.messageId=0 self.code=0 self.objectName=0 self.scope=3 self.derefAliases=0 self.sizeLimit=1 self.timeLimit=0 self.typesOnly=False self.filter=[] class searchRes: def __init__(self): self.messageId=0 self.code=0 self.objectName="" self.attributes=[] class modifyReq: def __init__(self): self.messageId=0 self.code=0 self.objectName="" self.operation=[] self.modification=[] self.controls=[] class addReq: def __init__(self): self.messageId=0 self.code=0 self.objectName="" self.attributes="" class delReq: def __init__(self): self.messageId=0 self.code=0 self.objectName="" #----------------------------------------------------------------------------- # From RFC: #- Only the definite form of length encoding is used. #- OCTET STRING values are encoded in the primitive form only. #- If the value of a BOOLEAN type is true, the encoding of the value octet is # set to hex "FF". #- If a value of a type is its default value, it is absent. Only some BOOLEAN # and INTEGER types have default values #- These restrictions do not apply to ASN.1 types encapsulated inside of # OCTET STRING values, such as attribute values, unless otherwise stated. #----------------------------------------------------------------------------- #Encoding section #----------------------------------------------------------------------------- # Pack according to ASN.1 (Abstract Syntax Notation One) # Basic Encoding Rules to get identifier from Class(cls), Variable-Type(pc) and Data-Type (tag) # see dict_class, dict_pc, dict_tag for values def BERencode(cls,pc,tag): enc=cls+pc+tag return "%02X"%int(enc) # Encode <value> as int with <op> identifier # Reduce len if possible def encodeInt(op,value): ilen=4 r=struct.pack("!I",int(value)).encode("hex") while r[:2]=='00': r=r[2:] ilen-=1 if ilen==1: break ret=op+'%02X'%ilen+r return ret # Encode <value> as string with <op> identifier def encodeStr(op,value): ret=op if len(value)<128: ret=ret+"%02X"%len(value) else: ret=ret+"82"+"%04X"%len(value) ret=ret+value.encode("hex") return ret # Encode Value def encodeValue(op,value): cls,pc,tag=BERdecode(op.decode("hex")) if tag in [1,2,10]: # Encode integer return encodeInt(op,value) else: return encodeStr(op,value) # Encode key=value pair (everything is as string from LDIF) def encodeKeyValue(key,value): k=encodeStr('04',key).decode('hex') if isinstance(value,list): v='' for vv in value: v=v+encodeStr('04',vv).decode('hex') else: v=encodeStr('04',value).decode('hex') ret=encodeStr('30',k+encodeStr('31',v).decode('hex')) return ret #----------------------------------------------------------------------------- #Decoding section #----------------------------------------------------------------------------- # Decode according to ASN.1 def BERdecode(byte): cls=ord(byte)>>6 pc=(ord(byte)>>5)&1 tag=ord(byte)&0x1F return cls<<6,pc<<5,tag # Decode Integer value def decodeToInt(msg): while len(msg)<8: msg="00"+msg ret=struct.unpack("!I",msg.decode("hex"))[0] return ret # Decode msg header of LDAP message till msg specific stuff def decodeHDR(msg): # Remove main envelope #30 op,msg,x=chop_BER(msg) # get msgId op,msgId,msg=chop_BER(msg) #get appId appId,msg,x=chop_BER(msg) return msgId,appId,msg,x # For tuple (t) decode value (default decoding method is as string) def decodeValue(t): if isinstance(t,tuple): (op,value)=t else: return '' cls,pc,tag=BERdecode(op.decode("hex")) if tag in [1,2,10]: # Decode Integer return decodeToInt(value) else: return value.decode("hex") # Decode application-specific attributes (hex message-undecoded) into tuples def decodeParams(msg): ret=[] while msg!='': #print "I",msg op,value,msg=chop_BER(msg) cls,pc,tag=BERdecode(op.decode("hex")) #print "D",op,value,msg if pc==0: #PRIMITIVE ret.append((op,value)) else: ret.append(decodeParams(value)) #print "R",ret return ret # Decode key=[multiple values] from list def decodeList(list): vRet='' #print "DL",list if len(list)==0: return ERROR key=decodeValue(list[0]) for v in list[1]: if vRet=='': vRet=decodeValue(v) else: vRet+=','+decodeValue(v) return key+'='+str(vRet) # Decode to proper object (match option to attribute) def decodeFinal(msgId,appId,rest,unknown): cls,pc,tag=BERdecode(appId.decode("hex")) if tag==0: # bindReq return decode_bindReq(msgId,appId,rest) if tag==1: # bindRes return decode_bindRes(msgId,appId,rest) if tag==2: # unbindReq return decode_unbindReq(msgId,appId,rest) if tag==3: # searchReq return decode_searchReq(msgId,appId,rest) if tag==4: # searchResEntry return decode_searchResEntry(msgId,appId,rest) if tag in [5,7,9,11]: # generic LDAP response return decode_LDAPResult(msgId,appId,rest) if tag==6: # modifyReq return decode_modifyReq(msgId,appId,rest,unknown) if tag==8: # addReq return decode_addReq(msgId,appId,rest,unknown) if tag==10: # deleteReq return decode_deleteReq(msgId,appId,rest,unknown) dbg="Don't know how to process AppId",tag bailOut(dbg) def decode_bindReq(msgId,appId,rest): L=bindReq() L.messageId=msgId L.code=appId #split options list=decodeParams(rest) # And place them into matching variables L.version=decodeValue(list.pop(0)) L.name=decodeValue(list.pop(0)) L.authentication=decodeValue(list.pop(0)) return L def decode_bindRes(msgId,appId,rest): L=LDAPResult() L.messageId=msgId L.code=appId #split options list=decodeParams(rest) # And place them into matching variables L.result=decodeValue(list.pop(0)) L.matchedDN=decodeValue(list.pop(0)) L.errorMSG=decodeValue(list.pop(0)) return L def decode_unbindReq(msgId,appId,rest): L=LDAPResult() L.messageId=msgId L.code=appId return L def decode_searchReq(msgId,appId,rest): L=searchReq() L.messageId=msgId L.code=appId #print "R",rest # get operation parameters op,value,msg=chop_BER(rest) L.objectName=decodeValue((op,value)) op,value,msg=chop_BER(msg) L.scope=decodeValue((op,value)) op,value,msg=chop_BER(msg) L.derefAliases=decodeValue((op,value)) op,value,msg=chop_BER(msg) L.sizeLimit=decodeValue((op,value)) op,value,msg=chop_BER(msg) L.timeLimit=decodeValue((op,value)) op,value,msg=chop_BER(msg) L.typesOnly=decodeValue((op,value)) # Filter is something I never used, so - not implemented/tested list=decodeParams(msg) #print "FL",len(list),list if isinstance(list[0],tuple): L.filter.append(decodeList(list)) else: for l in list: r=decodeList(l) if r!=ERROR: L.filter.append(r) return L def decode_searchResEntry(msgId,appId,rest): L=searchRes() L.messageId=msgId L.code=appId #get objectName op,value,msg=chop_BER(rest) L.objectName=decodeValue((op,value)) #print "I",msg # get operation parameters op,msg,x=chop_BER(msg) #print "M",msg #print "X",x # Finally split options list=decodeParams(msg) #print "L",list # And place them into matching variables for l in list: L.attributes.append(decodeList(l)) return L def decode_LDAPResult(msgId,appId,rest): L=LDAPResult() L.messageId=msgId L.code=appId #split options list=decodeParams(rest) # And place them into matching variables L.result=decodeValue(list.pop(0)) L.matchedDN=decodeValue(list.pop(0)) L.errorMSG=decodeValue(list.pop(0)) return L def decode_modifyReq(msgId,appId,rest,unknown): L=modifyReq() L.messageId=msgId L.code=appId #get objectName op,op,msg=chop_BER(rest) L.objectName=op.decode("hex") # get operation parameters op,msg,x=chop_BER(msg) # Finally split options list=decodeParams(msg) #print "L",list # And place them into matching variables for l in list: op=decodeValue(l.pop(0)) L.operation.append(op) L.modification.append(decodeList(l.pop(0))) # I have no idea if this controls works as it should if len(unknown)>0: list=decodeParams(unknown) #print "CL",list for l in list[0]: L.controls.append(decodeValue(l.pop(0))) return L def decode_addReq(msgId,appId,rest,unknown): L=addReq() L.messageId=msgId L.code=appId #get objectName op,op,msg=chop_BER(rest) L.objectName=op.decode("hex") # get operation parameters op,msg,x=chop_BER(msg) # Finally split options list=decodeParams(msg) print "L",list # And place them into matching variables return L def decode_deleteReq(msgId,appId,rest,unknown): L=searchReq() L.messageId=msgId L.code=appId #get objectName op,op,msg=chop_BER(rest) L.objectName=op.decode("hex") # get operation parameters op,msg,x=chop_BER(msg) # Finally split options list=decodeParams(msg) print "L",list return L #----------------------------------------------------------------------------- #Misc section #----------------------------------------------------------------------------- # Calculate object len (currently supports up to 64K) def calc_len(len): if len<=127: #short form ret="%02X"%int(len) else: #long form limited to 2 bytes (64K) if len<256: ret="0x81"+"%02X"%int(len) else: ret="0x82"+"%04X"%int(len) return ret # Quit program with error def bailOut(msg): print msg sys.exit(1) # Split message into parts (remove field from remaining body) def chop_msg(msg,size): return (msg[0:size],msg[size:]) # Chop len from message def chop_len(msg): (mlen,msg)=chop_msg(msg,2) if mlen>"80": # Multibyte nlen=ord(mlen.decode("hex"))&0x7f (mlen,msg)=chop_msg(msg,2*nlen) return (decodeToInt(mlen),msg) # get BER encoded option from message def chop_BER(msg): (op,msg)=chop_msg(msg,2) (oplen,msg)=chop_len(msg) (val,msg)=chop_msg(msg,2*oplen) return op,val,msg # Connect to host:port (TCP) def Connect(host,port): # Create a socket (SOCK_STREAM means a TCP socket) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) return sock # From dict_* (dictionary) find index for value def dictCmd2Name(dictionary,value): keys=dictionary.keys() values=dictionary.values() index=[i for i,x in enumerate(values) if x == value] return keys[index[0]] #----------------------------------------------------------------------------- #Create section #----------------------------------------------------------------------------- # Create generic Response message def create_LDAPResult(msgId,code,result,matchedDN,errorMSG): # Adding from end to the beginning # LDAPResult ::= SEQUENCE { # resultCode ENUMERATED, # matchedDN LDAPDN, # diagnosticMessage LDAPString, # referral [3] Referral OPTIONAL } # 04="%02X"%dict_tag['OCTET_STRING'] # 0A="%02X"%dict_tag['ENUMERATED'] ret='' ret=encodeValue('04',errorMSG)+ret ret=encodeValue('04',matchedDN)+ret ret=encodeValue('0A',result)+ret ret=encodeStr(code,ret.decode("hex")) ret=encodeStr('02',msgId.decode("hex"))+ret ret=encodeStr('30',ret.decode("hex")) return ret ###################################################### # History # 0.2.9 - Oct 11, 2012 - initial version # 0.3.0 - Oct 26, 2012 - finally got it working # - Oct 29, 2012 - msgId encoding fixed, reuseaddr fixed # - encodeTo<Type> renamed to encode<Type> (more logical) # - multiple values for key now supported # - int len now not fixed # 0.3.1 - Nov 05, 2012 - comments added, code cleanup # - logging removed because it conflicts with threaded # LDAP simulator # - add/delete/modify support # Nov 17, 2012 - decode rewrite
unknown
codeparrot/codeparrot-clean