content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
# Generic CNN classifier that uses a geojson file and gbdx imagery to classify chips import numpy as np import os, random import json, geojson from mltools import geojson_tools as gt from mltools.data_extractors import get_data_from_polygon_list as get_chips from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.models import Sequential, model_from_json from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.callbacks import ModelCheckpoint from keras.optimizers import SGD class PoolNet(object): ''' Convolutional Neural Network model to classify chips as pool/no pool INPUT classes (list [str]): Classes to train model on, exactly as they appear in the properties of any geojsons used for training. Defaults to pool classes: ['No swimming pool', 'Swimming pool']. batch_size (int): Amount of images to use for each batch during training. Defaults to 32. input_shape (tuple[int]): Shape of input chips with theano dimensional ordering (n_channels, height, width). Height and width must be equal. If an old model is loaded (old_model_name is not None), input shape will be automatically set from the architecture and does not need to be specified. Defaults to (3,125,125). old_model_name (str): Name of previous model to load (not including file extension). There should be a json architecture file and HDF5 ('.h5') weights file in the working directory under this name. If None, a new model will be compiled for training. Defaults to None. learning_rate (float): Learning rate for the first round of training. Defualts to 0.001 small_model (bool): Use a model with nine layers instead of 16. Will train faster but may be less accurate and cannot be used with large chips. Defaults to False. kernel_size (int): Size (in pixels) of the kernels to use at each convolutional layer of the network. Defaults to 3 (standard for VGGNet). ''' def __init__(self, classes=['No swimming pool', 'Swimming pool'], batch_size=32, input_shape=(3, 125, 125), small_model=False, model_name=None, learning_rate = 0.001, kernel_size=3): self.nb_classes = len(classes) self.classes = classes self.batch_size = batch_size self.small_model = small_model self.input_shape = input_shape self.lr = learning_rate self.kernel_size = kernel_size self.cls_dict = {classes[i]: i for i in xrange(len(self.classes))} if model_name: self.model_name = model_name self.model = self._load_model_architecture(model_name) self.model.load_weights(model_name + '.h5') self.input_shape = self.model.input_shape elif self.small_model: self.model = self._small_model() else: self.model = self._VGG_16() self.model_layer_names = [self.model.layers[i].get_config()['name'] for i in range(len(self.model.layers))] def _VGG_16(self): ''' Implementation of VGG 16-layer net. ''' print 'Compiling VGG Net...' model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self.input_shape)) model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu', input_shape=self.input_shape)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.nb_classes, activation='softmax')) sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True) model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy') return model def _small_model(self): ''' Alternative model architecture with fewer layers for computationally expensive training datasets ''' print 'Compiling Small Net...' model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self.input_shape)) model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu', input_shape=self.input_shape)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.nb_classes, activation='softmax')) sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True) model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy') return model def _load_model_architecture(self, model_name): ''' Load a model arcitecture from a json file INPUT model_name (str): Name of model to load OUTPUT Loaded model architecture ''' print 'Loading model {}'.format(self.model_name) #load model with open(model_name + '.json') as f: mod = model_from_json(json.load(f)) return mod def save_model(self, model_name): ''' Saves model architecture as a json file and current weigts as h5df file INPUT model_name (str): Name inder which to save the architecture and weights. This should not include the file extension. ''' # Save architecture arch, arch_json = '{}.json'.format(model_name), self.model.to_json() with open(arch, 'w') as f: json.dump(arch_json, f) # Save weights weights = '{}.h5'.format(model_name) self.model.save_weights(weights) def fit_from_geojson(self, train_geojson, max_side_dim=None, min_side_dim=0, chips_per_batch=5000, train_size=10000, validation_split=0.1, bit_depth=8, save_model=None, nb_epoch=10, shuffle_btwn_epochs=True, return_history=False, save_all_weights=True, retrain=False, learning_rate_2=0.01): ''' Fit a model from a geojson file with training data. This method iteratively yields large batches of chips to train on for each epoch. Please ensure that your current working directory contains all imagery referenced in the image_id property in train_geojson, and are named as follows: <image_id>.tif, where image_id is the catalog id of the image. INPUT train_geojson (string): Filename for the training data (must be a geojson). The geojson must be filtered such that all polygons are of valid size (as defined by max_side_dim and min_side_dim) max_side_dim (int): Maximum acceptable side dimension (in pixels) for a chip. If None, defaults to input_shape[-1]. If larger than the input shape the chips extracted will be downsampled to match the input shape. Defaults to None. min_side_dim (int): Minimum acceptable side dimension (in pixels) for a chip. Defaults to 0. chips_per_batch (int): Number of chips to yield per batch. Must be small enough to fit into memory. Defaults to 5000 (decrease for larger input sizes). train_size (int): Number of chips to use for training data. validation_split (float): Proportion of training chips to use as validation data. Defaults to 0.1. bit_depth (int): Bit depth of the image strips from which training chips are extracted. Defaults to 8 (standard for DRA'ed imagery). save_model (string): Name of model for saving. if None, does not save model to disk. Defaults to None nb_epoch (int): Number of epochs to train for. Each epoch will be trained on batches * batches_per_epoch chips. Defaults to 10. shuffle_btwn_epochs (bool): Shuffle the features in train_geojson between each epoch. Defaults to True. return_history (bool): Return a list containing metrics from past epochs. Defaults to False. save_all_weights (bool): Save model weights after each epoch. A directory called models will be created in the working directory. Defaults to True. retrain (bool): Freeze all layers except final softmax to retrain only the final weights of the model. Defaults to False learning_rate_2 (float): Learning rate for the second round of training. Only relevant if retrain is True. Defaults to 0.01. OUTPUT trained model, history ''' resize_dim, validation_data, full_hist = None, None, [] # load geojson training polygons with open(train_geojson) as f: polygons = geojson.load(f)['features'][:train_size] if len(polygons) < train_size: raise Exception('Not enough polygons to train on. Please add more training ' \ 'data or decrease train_size.') # Determine size of chips to extract and resize dimension if not max_side_dim: max_side_dim = self.input_shape[-1] elif max_side_dim != self.input_shape[-1]: resize_dim = self.input_shape # resize chips to match input shape # Recompile model with retrain params if retrain: for i in xrange(len(self.model.layers[:-1])): self.model.layers[i].trainable = False sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True) self.model.compile(loss='categorical_crossentropy', optimizer='sgd') # Set aside validation data if validation_split > 0: val_size = int(validation_split * train_size) val_data, polygons = polygons[: val_size], polygons[val_size: ] train_size = len(polygons) # extract validation chips print 'Getting validation data...\n' valX, valY = get_chips(val_data, min_side_dim=min_side_dim, max_side_dim=max_side_dim, classes=self.classes, normalize=True, return_labels=True, mask=True, bit_depth=bit_depth, show_percentage=True, assert_all_valid=True, resize_dim=resize_dim) validation_data = (valX, valY) # Train model for e in range(nb_epoch): print 'Epoch {}/{}'.format(e + 1, nb_epoch) # Make callback and diretory for saved weights if save_all_weights: chk = ModelCheckpoint(filepath="./models/epoch" + str(e) + \ "_{val_loss:.2f}.h5", verbose=1, save_weights_only=True) if 'models' not in os.listdir('.'): os.makedirs('models') if shuffle_btwn_epochs: np.random.shuffle(polygons) # Cycle through batches of chips and train for batch_start in range(0, train_size, chips_per_batch): callbacks = [] this_batch = polygons[batch_start: batch_start + chips_per_batch] # Get chips from batch X, Y = get_chips(this_batch, min_side_dim=min_side_dim, max_side_dim=max_side_dim, classes=self.classes, normalize=True, return_labels=True, mask=True, bit_depth=bit_depth, show_percentage=False, assert_all_valid=True, resize_dim=resize_dim) # Save weights if this is the final batch in the epoch if batch_start == range(0, train_size, chips_per_batch)[-1]: callbacks = [chk] # Fit the model on this batch hist = self.model.fit(X, Y, batch_size=self.batch_size, nb_epoch=1, validation_data=validation_data, callbacks=callbacks) # Dict recording loss and val_loss after each epoch full_hist.append(hist.history) if save_model: self.save_model(save_model) if return_history: return full_hist def fit_xy(self, X_train, Y_train, validation_split=0.1, save_model=None, nb_epoch=10, shuffle_btwn_epochs=True, return_history=False, save_all_weights=True, retrain=False, learning_rate_2=0.01): ''' Fit model on training chips already loaded into memory INPUT X_train (array): Training chips with the following dimensions: (train_size, num_channels, rows, cols). Dimensions of each chip should match the input_size to the model. Y_train (list): One-hot encoded labels to X_train with dimensions as follows: (train_size, n_classes) validation_split (float): Proportion of X_train to validate on while training. save_model (string): Name under which to save model. if None, does not save model. Defualts to None. nb_epoch (int): Number of training epochs to complete shuffle_btwn_epochs (bool): Shuffle the features in train_geojson between each epoch. Defaults to True. return_history (bool): Return a list containing metrics from past epochs. Defaults to False. save_all_weights (bool): Save model weights after each epoch. A directory called models will be created in the working directory. Defaults to True. retrain (bool): Freeze all layers except final softmax to retrain only the final weights of the model. Defaults to False learning_rate_2 (float): Learning rate for the second round of training. Only relevant if retrain is True. Defaults to 0.01. OUTPUT trained Keras model. ''' callbacks = [] # Recompile model with retrain params if retrain: for i in xrange(len(self.model.layers[:-1])): self.model.layers[i].trainable = False sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True) self.model.compile(loss='categorical_crossentropy', optimizer='sgd') # Define callback to save weights after each epoch if save_all_weights: chk = ModelCheckpoint(filepath="./models/ch_{epoch:02d}-{val_loss:.2f}.h5", verbose=1, save_weights_only=True) callbacks = [chk] # Fit model hist = self.model.fit(X_train, Y_train, validation_split=validation_split, callbacks=callbacks, nb_epoch=nb_epoch, shuffle=shuffle_btwn_epochs) if save_model: self.save_model(save_model) if return_history: return hist def classify_geojson(self, target_geojson, output_name, max_side_dim=None, min_side_dim=0, numerical_classes=True, chips_in_mem=5000, bit_depth=8): ''' Use the current model and weights to classify all polygons in target_geojson. The output file will have a 'CNN_class' property with the net's classification result, and a 'certainty' property with the net's certainty in the assigned classification. Please ensure that your current working directory contains all imagery referenced in the image_id property in target_geojson, and are named as follows: <image_id>.tif, where image_id is the catalog id of the image. INPUT target_geojson (string): Name of the geojson to classify. This file should only contain chips with side dimensions between min_side_dim and max_side_dim (see below). output_name (string): Name under which to save the classified geojson. max_side_dim (int): Maximum acceptable side dimension (in pixels) for a chip. If None, defaults to input_shape[-1]. If larger than the input shape the chips extracted will be downsampled to match the input shape. Defaults to None. min_side_dim (int): Minimum acceptable side dimension (in pixels) for a chip. Defaults to 0. numerical_classes (bool): Make output classifications correspond to the indicies (base 0) of the 'classes' attribute. If False, 'CNN_class' is a string with the class name. Defaults to True. chips_in_mem (int): Number of chips to load in memory at once. Decrease this parameter for larger chip sizes. Defaults to 5000. bit_depth (int): Bit depth of the image strips from which training chips are extracted. Defaults to 8 (standard for DRA'ed imagery). ''' resize_dim, yprob, ytrue = None, [], [] # Determine size of chips to extract and resize dimension if not max_side_dim: max_side_dim = self.input_shape[-1] elif max_side_dim != self.input_shape[-1]: resize_dim = self.input_shape # resize chips to match input shape # Format output filename if not output_name.endswith('.geojson'): output_name = '{}.geojson'.format(output_name) # Get polygon list from geojson with open(target_geojson) as f: features = geojson.load(f)['features'] # Classify in batches of 1000 for ix in xrange(0, len(features), chips_in_mem): this_batch = features[ix: (ix + chips_in_mem)] try: X = get_chips(this_batch, min_side_dim=min_side_dim, max_side_dim=max_side_dim, classes=self.classes, normalize=True, return_labels=False, bit_depth=bit_depth, mask=True, show_percentage=False, assert_all_valid=True, resize_dim=resize_dim) except (AssertionError): raise ValueError('Please filter the input geojson file using ' \ 'geojoson_tools.filter_geojson() and ensure all ' \ 'polygons are valid before using this method.') # Predict classes of test data yprob += list(self.model.predict_proba(X)) # Get predicted classes and certainty yhat = [np.argmax(i) for i in yprob] ycert = [str(np.max(j)) for j in yprob] if not numerical_classes: yhat = [self.classes[i] for i in yhat] # Update geojson, save as output_name data = zip(yhat, ycert) property_names = ['CNN_class', 'certainty'] gt.write_properties_to(data, property_names=property_names, input_file=target_geojson, output_file=output_name) # Tools for analyzing network performance def x_to_rgb(X): ''' Transform a normalized (3,h,w) image (theano ordering) to a (h,w,3) rgb image (tensor flow). Use this to view or save rgb polygons as images. INPUT (1) 3d array 'X': originial chip with theano dimensional ordering (3, h, w) OUTPUT (1) 3d array: rgb image in tensor flow dim-prdering (h,w,3) ''' rgb_array = np.zeros((X.shape[1], X.shape[2], 3), 'uint8') rgb_array[...,0] = X[0] * 255 rgb_array[...,1] = X[1] * 255 rgb_array[...,2] = X[2] * 255 return rgb_array
examples/polygon_classify_cnn/pool_net.py
23,321
Generic CNN classifier that uses a geojson file and gbdx imagery to classify chipsload model Save architecture Save weights load geojson training polygons Determine size of chips to extract and resize dimension resize chips to match input shape Recompile model with retrain params Set aside validation data extract validation chips Train model Make callback and diretory for saved weights Cycle through batches of chips and train Get chips from batch Save weights if this is the final batch in the epoch Fit the model on this batch Dict recording loss and val_loss after each epoch Recompile model with retrain params Define callback to save weights after each epoch Fit model Determine size of chips to extract and resize dimension resize chips to match input shape Format output filename Get polygon list from geojson Classify in batches of 1000 Predict classes of test data Get predicted classes and certainty Update geojson, save as output_name Tools for analyzing network performance
988
en
0.80258
import six from smqtk.representation import DescriptorIndex, get_data_element_impls from smqtk.utils import merge_dict, plugin, SimpleTimer try: from six.moves import cPickle as pickle except ImportError: import pickle class MemoryDescriptorIndex (DescriptorIndex): """ In-memory descriptor index with file caching. Stored descriptor elements are all held in memory in a uuid-to-element dictionary (hash table). If the path to a file cache is provided, it is loaded at construction if it exists. When elements are added to the index, the in-memory table is dumped to the cache. """ @classmethod def is_usable(cls): """ Check whether this class is available for use. :return: Boolean determination of whether this implementation is usable. :rtype: bool """ # no dependencies return True @classmethod def get_default_config(cls): """ Generate and return a default configuration dictionary for this class. This will be primarily used for generating what the configuration dictionary would look like for this class without instantiating it. By default, we observe what this class's constructor takes as arguments, turning those argument names into configuration dictionary keys. If any of those arguments have defaults, we will add those values into the configuration dictionary appropriately. The dictionary returned should only contain JSON compliant value types. It is not be guaranteed that the configuration dictionary returned from this method is valid for construction of an instance of this class. :return: Default configuration dictionary for the class. :rtype: dict """ c = super(MemoryDescriptorIndex, cls).get_default_config() c['cache_element'] = plugin.make_config(get_data_element_impls()) return c @classmethod def from_config(cls, config_dict, merge_default=True): """ Instantiate a new instance of this class given the configuration JSON-compliant dictionary encapsulating initialization arguments. :param config_dict: JSON compliant dictionary encapsulating a configuration. :type config_dict: dict :param merge_default: Merge the given configuration on top of the default provided by ``get_default_config``. :type merge_default: bool :return: Constructed instance from the provided config. :rtype: MemoryDescriptorIndex """ if merge_default: config_dict = merge_dict(cls.get_default_config(), config_dict) # Optionally construct cache element from sub-config. if config_dict['cache_element'] \ and config_dict['cache_element']['type']: e = plugin.from_plugin_config(config_dict['cache_element'], get_data_element_impls()) config_dict['cache_element'] = e else: config_dict['cache_element'] = None return super(MemoryDescriptorIndex, cls).from_config(config_dict, False) def __init__(self, cache_element=None, pickle_protocol=-1): """ Initialize a new in-memory descriptor index, or reload one from a cache. :param cache_element: Optional data element cache, loading an existing index if the element has bytes. If the given element is writable, new descriptors added to this index are cached to the element. :type cache_element: None | smqtk.representation.DataElement :param pickle_protocol: Pickling protocol to use when serializing index table to the optionally provided, writable cache element. We will use -1 by default (latest version, probably a binary form). :type pickle_protocol: int """ super(MemoryDescriptorIndex, self).__init__() # Mapping of descriptor UUID to the DescriptorElement instance. #: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement] self._table = {} # Record of optional file cache we're using self.cache_element = cache_element self.pickle_protocol = pickle_protocol if cache_element and not cache_element.is_empty(): self._log.debug("Loading cached descriptor index table from %s " "element.", cache_element.__class__.__name__) self._table = pickle.loads(cache_element.get_bytes()) def get_config(self): c = merge_dict(self.get_default_config(), { "pickle_protocol": self.pickle_protocol, }) if self.cache_element: merge_dict(c['cache_element'], plugin.to_plugin_config(self.cache_element)) return c def cache_table(self): if self.cache_element and self.cache_element.writable(): with SimpleTimer("Caching descriptor table", self._log.debug): self.cache_element.set_bytes(pickle.dumps(self._table, self.pickle_protocol)) def count(self): return len(self._table) def clear(self): """ Clear this descriptor index's entries. """ self._table = {} self.cache_table() def has_descriptor(self, uuid): """ Check if a DescriptorElement with the given UUID exists in this index. :param uuid: UUID to query for :type uuid: collections.Hashable :return: True if a DescriptorElement with the given UUID exists in this index, or False if not. :rtype: bool """ return uuid in self._table def add_descriptor(self, descriptor, no_cache=False): """ Add a descriptor to this index. Adding the same descriptor multiple times should not add multiple copies of the descriptor in the index. :param descriptor: Descriptor to index. :type descriptor: smqtk.representation.DescriptorElement :param no_cache: Do not cache the internal table if a file cache was provided. This would be used if adding many descriptors at a time, preventing a file write for every individual descriptor added. :type no_cache: bool """ self._table[descriptor.uuid()] = descriptor if not no_cache: self.cache_table() def add_many_descriptors(self, descriptors): """ Add multiple descriptors at one time. :param descriptors: Iterable of descriptor instances to add to this index. :type descriptors: collections.Iterable[smqtk.representation.DescriptorElement] """ added_something = False for d in descriptors: # using no-cache so we don't trigger multiple file writes self.add_descriptor(d, no_cache=True) added_something = True if added_something: self.cache_table() def get_descriptor(self, uuid): """ Get the descriptor in this index that is associated with the given UUID. :param uuid: UUID of the DescriptorElement to get. :type uuid: collections.Hashable :raises KeyError: The given UUID doesn't associate to a DescriptorElement in this index. :return: DescriptorElement associated with the queried UUID. :rtype: smqtk.representation.DescriptorElement """ return self._table[uuid] def get_many_descriptors(self, uuids): """ Get an iterator over descriptors associated to given descriptor UUIDs. :param uuids: Iterable of descriptor UUIDs to query for. :type uuids: collections.Iterable[collections.Hashable] :raises KeyError: A given UUID doesn't associate with a DescriptorElement in this index. :return: Iterator of descriptors associated to given uuid values. :rtype: __generator[smqtk.representation.DescriptorElement] """ for uid in uuids: yield self._table[uid] def remove_descriptor(self, uuid, no_cache=False): """ Remove a descriptor from this index by the given UUID. :param uuid: UUID of the DescriptorElement to remove. :type uuid: collections.Hashable :raises KeyError: The given UUID doesn't associate to a DescriptorElement in this index. :param no_cache: Do not cache the internal table if a file cache was provided. This would be used if adding many descriptors at a time, preventing a file write for every individual descriptor added. :type no_cache: bool """ del self._table[uuid] if not no_cache: self.cache_table() def remove_many_descriptors(self, uuids): """ Remove descriptors associated to given descriptor UUIDs from this index. :param uuids: Iterable of descriptor UUIDs to remove. :type uuids: collections.Iterable[collections.Hashable] :raises KeyError: A given UUID doesn't associate with a DescriptorElement in this index. """ for uid in uuids: # using no-cache so we don't trigger multiple file writes self.remove_descriptor(uid, no_cache=True) self.cache_table() def iterkeys(self): return six.iterkeys(self._table) def iterdescriptors(self): return six.itervalues(self._table) def iteritems(self): return six.iteritems(self._table) DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
python/smqtk/representation/descriptor_index/memory.py
9,798
In-memory descriptor index with file caching. Stored descriptor elements are all held in memory in a uuid-to-element dictionary (hash table). If the path to a file cache is provided, it is loaded at construction if it exists. When elements are added to the index, the in-memory table is dumped to the cache. Initialize a new in-memory descriptor index, or reload one from a cache. :param cache_element: Optional data element cache, loading an existing index if the element has bytes. If the given element is writable, new descriptors added to this index are cached to the element. :type cache_element: None | smqtk.representation.DataElement :param pickle_protocol: Pickling protocol to use when serializing index table to the optionally provided, writable cache element. We will use -1 by default (latest version, probably a binary form). :type pickle_protocol: int Add a descriptor to this index. Adding the same descriptor multiple times should not add multiple copies of the descriptor in the index. :param descriptor: Descriptor to index. :type descriptor: smqtk.representation.DescriptorElement :param no_cache: Do not cache the internal table if a file cache was provided. This would be used if adding many descriptors at a time, preventing a file write for every individual descriptor added. :type no_cache: bool Add multiple descriptors at one time. :param descriptors: Iterable of descriptor instances to add to this index. :type descriptors: collections.Iterable[smqtk.representation.DescriptorElement] Clear this descriptor index's entries. Instantiate a new instance of this class given the configuration JSON-compliant dictionary encapsulating initialization arguments. :param config_dict: JSON compliant dictionary encapsulating a configuration. :type config_dict: dict :param merge_default: Merge the given configuration on top of the default provided by ``get_default_config``. :type merge_default: bool :return: Constructed instance from the provided config. :rtype: MemoryDescriptorIndex Generate and return a default configuration dictionary for this class. This will be primarily used for generating what the configuration dictionary would look like for this class without instantiating it. By default, we observe what this class's constructor takes as arguments, turning those argument names into configuration dictionary keys. If any of those arguments have defaults, we will add those values into the configuration dictionary appropriately. The dictionary returned should only contain JSON compliant value types. It is not be guaranteed that the configuration dictionary returned from this method is valid for construction of an instance of this class. :return: Default configuration dictionary for the class. :rtype: dict Get the descriptor in this index that is associated with the given UUID. :param uuid: UUID of the DescriptorElement to get. :type uuid: collections.Hashable :raises KeyError: The given UUID doesn't associate to a DescriptorElement in this index. :return: DescriptorElement associated with the queried UUID. :rtype: smqtk.representation.DescriptorElement Get an iterator over descriptors associated to given descriptor UUIDs. :param uuids: Iterable of descriptor UUIDs to query for. :type uuids: collections.Iterable[collections.Hashable] :raises KeyError: A given UUID doesn't associate with a DescriptorElement in this index. :return: Iterator of descriptors associated to given uuid values. :rtype: __generator[smqtk.representation.DescriptorElement] Check if a DescriptorElement with the given UUID exists in this index. :param uuid: UUID to query for :type uuid: collections.Hashable :return: True if a DescriptorElement with the given UUID exists in this index, or False if not. :rtype: bool Check whether this class is available for use. :return: Boolean determination of whether this implementation is usable. :rtype: bool Remove a descriptor from this index by the given UUID. :param uuid: UUID of the DescriptorElement to remove. :type uuid: collections.Hashable :raises KeyError: The given UUID doesn't associate to a DescriptorElement in this index. :param no_cache: Do not cache the internal table if a file cache was provided. This would be used if adding many descriptors at a time, preventing a file write for every individual descriptor added. :type no_cache: bool Remove descriptors associated to given descriptor UUIDs from this index. :param uuids: Iterable of descriptor UUIDs to remove. :type uuids: collections.Iterable[collections.Hashable] :raises KeyError: A given UUID doesn't associate with a DescriptorElement in this index. no dependencies Optionally construct cache element from sub-config. Mapping of descriptor UUID to the DescriptorElement instance.: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement] Record of optional file cache we're using using no-cache so we don't trigger multiple file writes using no-cache so we don't trigger multiple file writes
5,057
en
0.672337
def conv(T,taille): # conv (list(list(bool)) * int -> list(list(int))) # Convertis un tableau à 2 dimensions contenent des booléens en tableau à 2 dimensions contenant des entiers tel que True = 1 et False = 0 # T (list(list(bool))) : tableau à 2 dimensions contenant des booléens # taille (int) : taille du tableau à 2 dimensions # Initialisation et traitement # tableau (list(list(int))) : tableau à 2 dimensions contenant des entiers # En même temps que l'on parcours le tableau T, on construit le tableau tableau en suivant la règle True = 1 et False = 0 tableau = [[0 if T[i][j] == False else 1 for j in range(taille)] for i in range(taille)] return tableau
conv_tableau_2_dimensions_bool_int.py
719
conv (list(list(bool)) * int -> list(list(int))) Convertis un tableau à 2 dimensions contenent des booléens en tableau à 2 dimensions contenant des entiers tel que True = 1 et False = 0 T (list(list(bool))) : tableau à 2 dimensions contenant des booléens taille (int) : taille du tableau à 2 dimensions Initialisation et traitement tableau (list(list(int))) : tableau à 2 dimensions contenant des entiers En même temps que l'on parcours le tableau T, on construit le tableau tableau en suivant la règle True = 1 et False = 0
524
fr
0.951247
"""Definitions for all core text instructions.""" from pyshgp.push.type_library import PushTypeLibrary from pyshgp.push.instruction import SimpleInstruction, ProducesManyOfTypeInstruction from pyshgp.push.types import Char from pyshgp.utils import Token def _concat(a, b): return str(b) + str(a), def _first_char(s): if len(s) == 0: return Token.revert return s[0], def _last_char(s): if len(s) == 0: return Token.revert return s[-1], def _nth_char(s, ndx): if len(s) == 0: return Token.revert return s[ndx % len(s)], def _contains(s, x): return x in s, def _p_index(s, substr): try: return s.index(substr), except ValueError: return -1, def _head(s, i): if len(s) == 0: return "", return s[:i % len(s)], def _tail(s, i): if len(s) == 0: return "", return s[i % len(s):], def _rest(s): if len(s) < 2: return "", return s[1:], def _but_last(s): if len(s) < 2: return "", return s[:-1], def _drop(s, i): if len(s) == 0: return "", return s[i % len(s):], def _but_last_n(s, i): if len(s) == 0: return "", return s[:-(i % len(s))], def _split_on(s, x): if x == "": return [] return s.split(x) def _replace_n(s, old, new, n=1): return s.replace(str(old), str(new), n), def _replace_all(s, old, new): return s.replace(str(old), str(new)), def _remove_n(s, x, n=1): return _replace_n(s, x, "", n) def _remove_all(s, x): return _replace_all(s, x, "") def _len(s): return len(s), def _reverse(s): return s[::-1], def _make_empty(): return "", def _is_empty(s): return s == "", def _occurrences_of(s, x): return s.count(str(x)), def _remove_nth(s, ndx): return s[:ndx] + s[ndx + 1:], def _set_nth(s, c, ndx): return s[:ndx] + str(c) + s[ndx + 1:], def _insert(s, x, ndx): return s[:ndx] + str(x) + s[ndx:], def _strip_whitespace(s): return s.strip(), # @TODO: Implement exec_string_iterate instruction. def _is_whitespace(c): return str(c).isspace(), def _is_letter(c): return str(c).isalpha(), def _is_digit(c): return str(c).isdigit(), def _str_from_thing(thing): return str(thing), def _char_from_bool(b): if b: return Char("T"), return Char("F"), def _char_from_ascii(i): return Char(chr(i % 128)), def _char_from_float(f): return _char_from_ascii(int(f)) def _all_chars(s): return [Char(c) for c in list(s)[::-1]] def instructions(type_library: PushTypeLibrary): """Return all core text instructions.""" i = [] for push_type in ["str", "char"]: i.append(SimpleInstruction( "{t}_concat".format(t=push_type), _concat, input_stacks=[push_type, push_type], output_stacks=["str"], code_blocks=0, docstring="Concatenates the top two {t}s and pushes the resulting string.".format(t=push_type) )) i.append(SimpleInstruction( "str_insert_{t}".format(t=push_type), _insert, input_stacks=["str", push_type, "int"], output_stacks=["str"], code_blocks=0, docstring="""Inserts {t} into the top str at index `n` and pushes the resulting string. The value for `n` is taken from the int stack.""".format(t=push_type) )) # Getting Characters i.append(SimpleInstruction( "{t}_from_first_char".format(t=push_type), _first_char, input_stacks=["str"], output_stacks=[push_type], code_blocks=0, docstring="Pushes a {t} of the first character of the top string.".format(t=push_type) )) i.append(SimpleInstruction( "{t}_from_last_char".format(t=push_type), _last_char, input_stacks=["str"], output_stacks=[push_type], code_blocks=0, docstring="Pushes a {t} of the last character of the top string.".format(t=push_type) )) i.append(SimpleInstruction( "{t}_from_nth_char".format(t=push_type), _nth_char, input_stacks=["str", "int"], output_stacks=[push_type], code_blocks=0, docstring="Pushes a {t} of the nth character of the top string. The top integer denotes nth position.".format(t=push_type) )) # Checking string contents i.append(SimpleInstruction( "str_contains_{t}".format(t=push_type), _contains, input_stacks=["str", push_type], output_stacks=["bool"], code_blocks=0, docstring="Pushes true if the next {t} is in the top string. Pushes false otherwise.".format(t=push_type) )) i.append(SimpleInstruction( "str_index_of_{t}".format(t=push_type), _p_index, input_stacks=["str", push_type], output_stacks=["int"], code_blocks=0, docstring="Pushes the index of the next {t} in the top string. If not found, pushes -1.".format(t=push_type) )) # Splitting # @TODO: srt_split_on_space instruction i.append(ProducesManyOfTypeInstruction( "str_split_on_{t}".format(t=push_type), _split_on, input_stacks=["str", push_type], output_stack="str", code_blocks=0, docstring="Pushes multiple strs produced by splitting the top str on the top {t}.".format(t=push_type) )) # Replacements i.append(SimpleInstruction( "str_replace_first_{t}".format(t=push_type), _replace_n, input_stacks=["str", push_type, push_type], output_stacks=["str"], code_blocks=0, docstring="""Pushes the str produced by replaceing the first occurrence of the top {t} with the second {t}.""".format(t=push_type) )) i.append(SimpleInstruction( "str_replace_n_{t}".format(t=push_type), _replace_n, input_stacks=["str", push_type, push_type, "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes the str produced by replaceing the first `n` occurrences of the top {t} with the second {t}. The value for `n` is the top int.""".format(t=push_type) )) i.append(SimpleInstruction( "str_replace_all_{t}".format(t=push_type), _replace_all, input_stacks=["str", push_type, push_type], output_stacks=["str"], code_blocks=0, docstring="""Pushes the str produced by replaceing all occurrences of the top {t} with the second {t}.""".format(t=push_type) )) # Removals i.append(SimpleInstruction( "str_remove_first_{t}".format(t=push_type), _remove_n, input_stacks=["str", push_type], output_stacks=["str"], code_blocks=0, docstring="Pushes the str produced by removing the first occurrence of the top {t}.".format(t=push_type) )) i.append(SimpleInstruction( "str_remove_n_{t}".format(t=push_type), _remove_n, input_stacks=["str", push_type, "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes the str produced by remvoing the first `n` occurrences of the top {t}. The value for `n` is the top int.""".format(t=push_type) )) i.append(SimpleInstruction( "str_remove_all_{t}".format(t=push_type), _remove_all, input_stacks=["str", push_type], output_stacks=["str"], code_blocks=0, docstring="Pushes the str produced by removing all occurrences of the top {t}.".format(t=push_type) )) # Misc i.append(SimpleInstruction( "str_occurrences_of_{t}".format(t=push_type), _occurrences_of, input_stacks=["str", push_type], output_stacks=["int"], code_blocks=0, docstring="Pushes the number of times the top {t} occurs in the top str to the int stack.".format(t=push_type) )) i.append(SimpleInstruction( "str_reverse", _reverse, input_stacks=["str"], output_stacks=["str"], code_blocks=0, docstring="""Takes the top string and pushes it reversed.""" )) i.append(SimpleInstruction( "str_head", _head, input_stacks=["str", "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes a string of the first `n` characters from the top string. The value for `n` is the top int mod the length of the string.""" )) i.append(SimpleInstruction( "str_tail", _tail, input_stacks=["str", "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes a string of the last `n` characters from the top string. The value for `n` is the top int mod the length of the string.""" )) i.append(SimpleInstruction( "str_append_char", _concat, input_stacks=["str", "char"], output_stacks=["str"], code_blocks=0, docstring="Appends the top char to the top string pushes the resulting string." )) i.append(SimpleInstruction( "str_rest", _rest, input_stacks=["str"], output_stacks=["str"], code_blocks=0, docstring="Pushes the top str without its first character." )) i.append(SimpleInstruction( "str_but_last", _but_last, input_stacks=["str"], output_stacks=["str"], code_blocks=0, docstring="Pushes the top str without its last character." )) i.append(SimpleInstruction( "str_drop", _drop, input_stacks=["str", "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes the top str without its first `n` character. The value for `n` is the top int mod the length of the string.""" )) i.append(SimpleInstruction( "str_but_last_n", _but_last_n, input_stacks=["str", "int"], output_stacks=["str"], code_blocks=0, docstring="""Pushes the top str without its last `n` character. The value for `n` is the top int mod the length of the string.""" )) i.append(SimpleInstruction( "str_length", _len, input_stacks=["str"], output_stacks=["int"], code_blocks=0, docstring="Pushes the length of the top str to the int stack." )) i.append(SimpleInstruction( "str_make_empty", _make_empty, input_stacks=[], output_stacks=["str"], code_blocks=0, docstring="Pushes an empty string." )) i.append(SimpleInstruction( "str_is_empty_string", _is_empty, input_stacks=["str"], output_stacks=["bool"], code_blocks=0, docstring="Pushes True if top string is empty. Pushes False otherwise." )) i.append(SimpleInstruction( "str_remove_nth", _remove_nth, input_stacks=["str", "int"], output_stacks=["str"], code_blocks=0, docstring="Pushes the top str with the nth character removed." )) i.append(SimpleInstruction( "str_set_nth", _set_nth, input_stacks=["str", "char", "int"], output_stacks=["str"], code_blocks=0, docstring="Pushes the top str with the nth character set to the top character." )) i.append(SimpleInstruction( "str_strip_whitespace", _strip_whitespace, input_stacks=["str"], output_stacks=["str"], code_blocks=0, docstring="Pushes the top str with trailing and leading whitespace stripped." )) # @TODO: Instructions for trim_left and trim_right # @TODO: Instructions for pad_left and pad_right # CHARACTER INSTRUCTIONS i.append(SimpleInstruction( "char_is_whitespace", _is_whitespace, input_stacks=["char"], output_stacks=["bool"], code_blocks=0, docstring="Pushes True if the top Char is whitespace. Pushes False otherwise." )) i.append(SimpleInstruction( "char_is_letter", _is_letter, input_stacks=["char"], output_stacks=["bool"], code_blocks=0, docstring="Pushes True if the top Char is a letter. Pushes False otherwise." )) i.append(SimpleInstruction( "char_is_digit", _is_digit, input_stacks=["char"], output_stacks=["bool"], code_blocks=0, docstring="Pushes True if the top Char is a numeric digit. Pushes False otherwise." )) # TYPE CONVERTING for push_type in ["bool", "int", "float", "char"]: i.append(SimpleInstruction( "str_from_{t}".format(t=push_type), _str_from_thing, input_stacks=[push_type], output_stacks=["str"], code_blocks=0, docstring="Pushes the top {t} converted into a str.".format(t=push_type) )) i.append(SimpleInstruction( "char_from_bool", _char_from_bool, input_stacks=["bool"], output_stacks=["char"], code_blocks=0, docstring="""Pushes the char \"T\" if the top bool is True. If the top bool is False, pushes the char \"F\".""" )) i.append(SimpleInstruction( "char_from_ascii_int", _char_from_ascii, input_stacks=["int"], output_stacks=["char"], code_blocks=0, docstring="Pushes the top int converted into a Character by using the int mod 128 as an ascii value." )) i.append(SimpleInstruction( "char_from_float", _char_from_float, input_stacks=["float"], output_stacks=["char"], code_blocks=0, docstring="""Pushes the top float converted into a Character by flooring the float to an int, taking the int mod 128, and using it as an ascii value.""" )) i.append(ProducesManyOfTypeInstruction( "chars_from_str", _all_chars, input_stacks=["str"], output_stack="char", code_blocks=0, docstring="""Pushes each character of the top str to the char stack in reverse order.""" )) return i
pyshgp/push/instructions/text.py
14,722
Return all core text instructions. Definitions for all core text instructions. @TODO: Implement exec_string_iterate instruction. Getting Characters Checking string contents Splitting @TODO: srt_split_on_space instruction Replacements Removals Misc @TODO: Instructions for trim_left and trim_right @TODO: Instructions for pad_left and pad_right CHARACTER INSTRUCTIONS TYPE CONVERTING
386
en
0.57542
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Apache Beam SDK for Python setup file.""" from __future__ import absolute_import from __future__ import print_function import os import platform import sys import warnings from distutils.version import StrictVersion # Pylint and isort disagree here. # pylint: disable=ungrouped-imports import setuptools from pkg_resources import DistributionNotFound from pkg_resources import get_distribution from setuptools.command.build_py import build_py from setuptools.command.develop import develop from setuptools.command.egg_info import egg_info from setuptools.command.sdist import sdist from setuptools.command.test import test def get_version(): global_names = {} exec( # pylint: disable=exec-used open(os.path.join( os.path.dirname(os.path.abspath(__file__)), 'apache_beam/version.py') ).read(), global_names ) return global_names['__version__'] PACKAGE_NAME = 'apache-beam' PACKAGE_VERSION = get_version() PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python' PACKAGE_URL = 'https://beam.apache.org' PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam' PACKAGE_AUTHOR = 'Apache Software Foundation' PACKAGE_EMAIL = 'dev@beam.apache.org' PACKAGE_KEYWORDS = 'apache beam' PACKAGE_LONG_DESCRIPTION = ''' Apache Beam is a unified programming model for both batch and streaming data processing, enabling efficient execution across diverse distributed execution engines and providing extensibility points for connecting to different technologies and user communities. ''' REQUIRED_PIP_VERSION = '7.0.0' _PIP_VERSION = get_distribution('pip').version if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION): warnings.warn( "You are using version {0} of pip. " \ "However, version {1} is recommended.".format( _PIP_VERSION, REQUIRED_PIP_VERSION ) ) REQUIRED_CYTHON_VERSION = '0.28.1' try: _CYTHON_VERSION = get_distribution('cython').version if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION): warnings.warn( "You are using version {0} of cython. " \ "However, version {1} is recommended.".format( _CYTHON_VERSION, REQUIRED_CYTHON_VERSION ) ) except DistributionNotFound: # do nothing if Cython is not installed pass # Currently all compiled modules are optional (for performance only). if platform.system() == 'Windows': # Windows doesn't always provide int64_t. cythonize = lambda *args, **kwargs: [] else: try: # pylint: disable=wrong-import-position from Cython.Build import cythonize except ImportError: cythonize = lambda *args, **kwargs: [] REQUIRED_PACKAGES = [ 'avro>=1.8.1,<2.0.0; python_version < "3.0"', 'avro-python3>=1.8.1,<2.0.0; python_version >= "3.0"', 'crcmod>=1.7,<2.0', 'dill>=0.2.9,<0.2.10', 'fastavro>=0.21.4,<0.22', 'future>=0.16.0,<1.0.0', 'futures>=3.2.0,<4.0.0; python_version < "3.0"', 'grpcio>=1.8,<2', 'hdfs>=2.1.0,<3.0.0', 'httplib2>=0.8,<=0.12.0', 'mock>=1.0.1,<3.0.0', 'oauth2client>=2.0.1,<4', # grpcio 1.8.1 and above requires protobuf 3.5.0.post1. 'protobuf>=3.5.0.post1,<4', # [BEAM-6287] pyarrow is not supported on Windows for Python 2 ('pyarrow>=0.11.1,<0.14.0; python_version >= "3.0" or ' 'platform_system != "Windows"'), 'pydot>=1.2.0,<1.3', 'pytz>=2018.3', # [BEAM-5628] Beam VCF IO is not supported in Python 3. 'pyvcf>=0.6.8,<0.7.0; python_version < "3.0"', 'pyyaml>=3.12,<4.0.0', 'typing>=3.6.0,<3.7.0; python_version < "3.5.0"', ] REQUIRED_TEST_PACKAGES = [ 'nose>=1.3.7', 'numpy>=1.14.3,<2', 'pandas>=0.23.4,<0.24', 'parameterized>=0.6.0,<0.7.0', 'pyhamcrest>=1.9,<2.0', 'tenacity>=5.0.2,<6.0', ] GCP_REQUIREMENTS = [ 'cachetools>=3.1.0,<4', 'google-apitools>=0.5.28,<0.5.29', # [BEAM-4543] googledatastore is not supported in Python 3. 'proto-google-cloud-datastore-v1>=0.90.0,<=0.90.4; python_version < "3.0"', # [BEAM-4543] googledatastore is not supported in Python 3. 'googledatastore>=7.0.1,<7.1; python_version < "3.0"', 'google-cloud-datastore>=1.7.1,<2.0.0', 'google-cloud-pubsub>=0.39.0,<0.40.0', # GCP packages required by tests 'google-cloud-bigquery>=1.6.0,<1.7.0', 'google-cloud-core>=0.28.1,<0.30.0', 'google-cloud-bigtable>=0.31.1,<0.33.0', ] # We must generate protos after setup_requires are installed. def generate_protos_first(original_cmd): try: # See https://issues.apache.org/jira/browse/BEAM-2366 # pylint: disable=wrong-import-position import gen_protos class cmd(original_cmd, object): def run(self): gen_protos.generate_proto_files() super(cmd, self).run() return cmd except ImportError: warnings.warn("Could not import gen_protos, skipping proto generation.") return original_cmd python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' if sys.version_info[0] == 3: warnings.warn( 'Python 3 support for the Apache Beam SDK is not yet fully supported. ' 'You may encounter buggy behavior or missing features.') setuptools.setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=PACKAGE_DESCRIPTION, long_description=PACKAGE_LONG_DESCRIPTION, url=PACKAGE_URL, download_url=PACKAGE_DOWNLOAD_URL, author=PACKAGE_AUTHOR, author_email=PACKAGE_EMAIL, packages=setuptools.find_packages(), package_data={'apache_beam': [ '*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', 'testing/data/*.yaml', 'portability/api/*.yaml']}, ext_modules=cythonize([ 'apache_beam/**/*.pyx', 'apache_beam/coders/coder_impl.py', 'apache_beam/metrics/execution.py', 'apache_beam/runners/common.py', 'apache_beam/runners/worker/logger.py', 'apache_beam/runners/worker/opcounters.py', 'apache_beam/runners/worker/operations.py', 'apache_beam/transforms/cy_combiners.py', 'apache_beam/utils/counters.py', 'apache_beam/utils/windowed_value.py', ]), install_requires=REQUIRED_PACKAGES, python_requires=python_requires, test_suite='nose.collector', tests_require=REQUIRED_TEST_PACKAGES, extras_require={ 'docs': ['Sphinx>=1.5.2,<2.0'], 'test': REQUIRED_TEST_PACKAGES, 'gcp': GCP_REQUIREMENTS, }, zip_safe=False, # PyPI package information. classifiers=[ 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache License, Version 2.0', keywords=PACKAGE_KEYWORDS, entry_points={ 'nose.plugins.0.10': [ 'beam_test_plugin = test_config:BeamTestPlugin', ]}, cmdclass={ 'build_py': generate_protos_first(build_py), 'develop': generate_protos_first(develop), 'egg_info': generate_protos_first(egg_info), 'sdist': generate_protos_first(sdist), 'test': generate_protos_first(test), }, )
sdks/python/setup.py
8,112
Apache Beam SDK for Python setup file. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Pylint and isort disagree here. pylint: disable=ungrouped-imports pylint: disable=exec-used do nothing if Cython is not installed Currently all compiled modules are optional (for performance only). Windows doesn't always provide int64_t. pylint: disable=wrong-import-position grpcio 1.8.1 and above requires protobuf 3.5.0.post1. [BEAM-6287] pyarrow is not supported on Windows for Python 2 [BEAM-5628] Beam VCF IO is not supported in Python 3. [BEAM-4543] googledatastore is not supported in Python 3. [BEAM-4543] googledatastore is not supported in Python 3. GCP packages required by tests We must generate protos after setup_requires are installed. See https://issues.apache.org/jira/browse/BEAM-2366 pylint: disable=wrong-import-position PyPI package information.
1,561
en
0.827769
import os import requests # configurations to be adjusted # 1. put here URL (see textfile) base_url = "https://data-dataref.ifremer.fr/stereo/AA_2015/2015-03-05_10-35-00_12Hz/input/cam1/" # 2. decide which (range of) images start = 0 end = 149 # 3. name folder to save images to, best take from url (change "/" to "_") download_folder = 'AA_2015_2015-03-05_10-35-00_12Hz' img_appendix = "_01" # as the datasat is providing stereo, we only need mono, not to be changed #create a download folder if not yet existing current_directory = os.getcwd() final_directory = os.path.join(current_directory, download_folder) if not os.path.exists(final_directory): os.makedirs(final_directory) # run through all url to download images individually, same file name as in original dataset #start to uncomment # while start <= end: # img_name = f'{start:06d}' + img_appendix + '.tif' # #print(f"image_name is: " + img_name) # url = base_url + img_name # r = requests.get(url, allow_redirects=True) # # print(f"loading url: " + url) # # # save image in download_folder # path_dest = os.path.join(final_directory, img_name) # open(path_dest, 'wb').write(r.content) # # start += 1 # # print("Done") #end to uncomment #Alternative with .txt list (AA-Videos need it) with open("/Users/rueskamp/Documents/Studium SE/05_WS21/Projekt_See/codebase/dataset_preparation/AA_2015_2015-03-05_10-35-00_12Hz.txt", "r") as f: list2 = [] for item in f: one, two = item.split(">", 1) img_name = one # #print(f"image_name is: " + img_name) url = base_url + img_name r = requests.get(url, allow_redirects=True) # print(f"loading url: " + url) # # save image in download_folder path_dest = os.path.join(final_directory, img_name) open(path_dest, 'wb').write(r.content) print("Done")
dataset_preparation/downloading.py
1,883
configurations to be adjusted 1. put here URL (see textfile) 2. decide which (range of) images 3. name folder to save images to, best take from url (change "/" to "_") as the datasat is providing stereo, we only need mono, not to be changedcreate a download folder if not yet existing run through all url to download images individually, same file name as in original datasetstart to uncomment while start <= end: img_name = f'{start:06d}' + img_appendix + '.tif' print(f"image_name is: " + img_name) url = base_url + img_name r = requests.get(url, allow_redirects=True) print(f"loading url: " + url) save image in download_folder path_dest = os.path.join(final_directory, img_name) open(path_dest, 'wb').write(r.content) start += 1 print("Done")end to uncommentAlternative with .txt list (AA-Videos need it) print(f"image_name is: " + img_name) print(f"loading url: " + url) save image in download_folder
947
en
0.773175
# :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack import os import uuid import tempfile import pytest import ftrack_api.cache @pytest.fixture(params=['proxy', 'layered', 'memory', 'file', 'serialised']) def cache(request): '''Return cache.''' if request.param == 'proxy': cache = ftrack_api.cache.ProxyCache( ftrack_api.cache.MemoryCache() ) elif request.param == 'layered': cache = ftrack_api.cache.LayeredCache( [ftrack_api.cache.MemoryCache()] ) elif request.param == 'memory': cache = ftrack_api.cache.MemoryCache() elif request.param == 'file': cache_path = os.path.join( tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex) ) cache = ftrack_api.cache.FileCache(cache_path) def cleanup(): '''Cleanup.''' try: os.remove(cache_path) except OSError: # BSD DB (Mac OSX) implementation of the interface will append # a .db extension. os.remove(cache_path + '.db') request.addfinalizer(cleanup) elif request.param == 'serialised': cache = ftrack_api.cache.SerialisedCache( ftrack_api.cache.MemoryCache(), encode=lambda value: value, decode=lambda value: value ) else: raise ValueError( 'Unrecognised cache fixture type {0!r}'.format(request.param) ) return cache class Class(object): '''Class for testing.''' def method(self, key): '''Method for testing.''' def function(mutable, x, y=2): '''Function for testing.''' mutable['called'] = True return {'result': x + y} def assert_memoised_call( memoiser, function, expected, args=None, kw=None, memoised=True ): '''Assert *function* call via *memoiser* was *memoised*.''' mapping = {'called': False} if args is not None: args = (mapping,) + args else: args = (mapping,) result = memoiser.call(function, args, kw) assert result == expected assert mapping['called'] is not memoised def test_get(cache): '''Retrieve item from cache.''' cache.set('key', 'value') assert cache.get('key') == 'value' def test_get_missing_key(cache): '''Fail to retrieve missing item from cache.''' with pytest.raises(KeyError): cache.get('key') def test_set(cache): '''Set item in cache.''' with pytest.raises(KeyError): cache.get('key') cache.set('key', 'value') assert cache.get('key') == 'value' def test_remove(cache): '''Remove item from cache.''' cache.set('key', 'value') cache.remove('key') with pytest.raises(KeyError): cache.get('key') def test_remove_missing_key(cache): '''Fail to remove missing key.''' with pytest.raises(KeyError): cache.remove('key') def test_keys(cache): '''Retrieve keys of items in cache.''' assert cache.keys() == [] cache.set('a', 'a_value') cache.set('b', 'b_value') cache.set('c', 'c_value') assert sorted(cache.keys()) == sorted(['a', 'b', 'c']) def test_clear(cache): '''Remove items from cache.''' cache.set('a', 'a_value') cache.set('b', 'b_value') cache.set('c', 'c_value') assert cache.keys() cache.clear() assert not cache.keys() def test_clear_using_pattern(cache): '''Remove items that match pattern from cache.''' cache.set('matching_key', 'value') cache.set('another_matching_key', 'value') cache.set('key_not_matching', 'value') assert cache.keys() cache.clear(pattern='.*matching_key$') assert cache.keys() == ['key_not_matching'] def test_clear_encountering_missing_key(cache, mocker): '''Clear missing key.''' # Force reporting keys that are not actually valid for test purposes. mocker.patch.object(cache, 'keys', lambda: ['missing']) assert cache.keys() == ['missing'] # Should not error even though key not valid. cache.clear() # The key was not successfully removed so should still be present. assert cache.keys() == ['missing'] def test_layered_cache_propagates_value_on_get(): '''Layered cache propagates value on get.''' caches = [ ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache() ] cache = ftrack_api.cache.LayeredCache(caches) # Set item on second level cache only. caches[1].set('key', 'value') # Retrieving key via layered cache should propagate it automatically to # higher level caches only. assert cache.get('key') == 'value' assert caches[0].get('key') == 'value' with pytest.raises(KeyError): caches[2].get('key') def test_layered_cache_remove_at_depth(): '''Remove key that only exists at depth in LayeredCache.''' caches = [ ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache() ] cache = ftrack_api.cache.LayeredCache(caches) # Set item on second level cache only. caches[1].set('key', 'value') # Removing key that only exists at depth should not raise key error. cache.remove('key') # Ensure key was removed. assert not cache.keys() def test_expand_references(): '''Test that references are expanded from serialized cache.''' cache_path = os.path.join( tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex) ) def make_cache(session, cache_path): '''Create a serialised file cache.''' serialized_file_cache = ftrack_api.cache.SerialisedCache( ftrack_api.cache.FileCache(cache_path), encode=session.encode, decode=session.decode ) return serialized_file_cache # Populate the serialized file cache. session = ftrack_api.Session( cache=lambda session, cache_path=cache_path:make_cache( session, cache_path ) ) expanded_results = dict() query_string = 'select asset.parent from AssetVersion where asset is_not None limit 10' for sequence in session.query(query_string): asset = sequence.get('asset') expanded_results.setdefault( asset.get('id'), asset.get('parent') ) # Fetch the data from cache. new_session = ftrack_api.Session( cache=lambda session, cache_path=cache_path:make_cache( session, cache_path ) ) new_session_two = ftrack_api.Session( cache=lambda session, cache_path=cache_path:make_cache( session, cache_path ) ) # Make sure references are merged. for sequence in new_session.query(query_string): asset = sequence.get('asset') assert ( asset.get('parent') == expanded_results[asset.get('id')] ) # Use for fetching directly using get. assert ( new_session_two.get(asset.entity_type, asset.get('id')).get('parent') == expanded_results[asset.get('id')] ) @pytest.mark.parametrize('items, key', [ (({},), '{}'), (({}, {}), '{}{}') ], ids=[ 'single object', 'multiple objects' ]) def test_string_key_maker_key(items, key): '''Generate key using string key maker.''' key_maker = ftrack_api.cache.StringKeyMaker() assert key_maker.key(*items) == key @pytest.mark.parametrize('items, key', [ ( ({},), '\x01\x01' ), ( ({'a': 'b'}, [1, 2]), '\x01' '\x80\x02U\x01a.' '\x02' '\x80\x02U\x01b.' '\x01' '\x00' '\x03' '\x80\x02K\x01.' '\x00' '\x80\x02K\x02.' '\x03' ), ( (function,), '\x04function\x00unit.test_cache' ), ( (Class,), '\x04Class\x00unit.test_cache' ), ( (Class.method,), '\x04method\x00Class\x00unit.test_cache' ), ( (callable,), '\x04callable' ) ], ids=[ 'single mapping', 'multiple objects', 'function', 'class', 'method', 'builtin' ]) def test_object_key_maker_key(items, key): '''Generate key using string key maker.''' key_maker = ftrack_api.cache.ObjectKeyMaker() assert key_maker.key(*items) == key def test_memoised_call(): '''Call memoised function.''' memoiser = ftrack_api.cache.Memoiser() # Initial call should not be memoised so function is executed. assert_memoised_call( memoiser, function, args=(1,), expected={'result': 3}, memoised=False ) # Identical call should be memoised so function is not executed again. assert_memoised_call( memoiser, function, args=(1,), expected={'result': 3}, memoised=True ) # Differing call is not memoised so function is executed. assert_memoised_call( memoiser, function, args=(3,), expected={'result': 5}, memoised=False ) def test_memoised_call_variations(): '''Call memoised function with identical arguments using variable format.''' memoiser = ftrack_api.cache.Memoiser() expected = {'result': 3} # Call function once to ensure is memoised. assert_memoised_call( memoiser, function, args=(1,), expected=expected, memoised=False ) # Each of the following calls should equate to the same key and make # use of the memoised value. for args, kw in [ ((), {'x': 1}), ((), {'x': 1, 'y': 2}), ((1,), {'y': 2}), ((1,), {}) ]: assert_memoised_call( memoiser, function, args=args, kw=kw, expected=expected ) # The following calls should all be treated as new variations and so # not use any memoised value. assert_memoised_call( memoiser, function, kw={'x': 2}, expected={'result': 4}, memoised=False ) assert_memoised_call( memoiser, function, kw={'x': 3, 'y': 2}, expected={'result': 5}, memoised=False ) assert_memoised_call( memoiser, function, args=(4, ), kw={'y': 2}, expected={'result': 6}, memoised=False ) assert_memoised_call( memoiser, function, args=(5, ), expected={'result': 7}, memoised=False ) def test_memoised_mutable_return_value(): '''Avoid side effects for returned mutable arguments when memoising.''' memoiser = ftrack_api.cache.Memoiser() arguments = ({'called': False}, 1) result_a = memoiser.call(function, arguments) assert result_a == {'result': 3} assert arguments[0]['called'] # Modify mutable externally and check that stored memoised value is # unchanged. del result_a['result'] arguments[0]['called'] = False result_b = memoiser.call(function, arguments) assert result_b == {'result': 3} assert not arguments[0]['called']
openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py
10,847
Class for testing. Assert *function* call via *memoiser* was *memoised*. Return cache. Cleanup. Function for testing. Create a serialised file cache. Method for testing. Remove items from cache. Clear missing key. Remove items that match pattern from cache. Test that references are expanded from serialized cache. Retrieve item from cache. Fail to retrieve missing item from cache. Retrieve keys of items in cache. Layered cache propagates value on get. Remove key that only exists at depth in LayeredCache. Call memoised function. Call memoised function with identical arguments using variable format. Avoid side effects for returned mutable arguments when memoising. Generate key using string key maker. Remove item from cache. Fail to remove missing key. Set item in cache. Generate key using string key maker. :coding: utf-8 :copyright: Copyright (c) 2015 ftrack BSD DB (Mac OSX) implementation of the interface will append a .db extension. Force reporting keys that are not actually valid for test purposes. Should not error even though key not valid. The key was not successfully removed so should still be present. Set item on second level cache only. Retrieving key via layered cache should propagate it automatically to higher level caches only. Set item on second level cache only. Removing key that only exists at depth should not raise key error. Ensure key was removed. Populate the serialized file cache. Fetch the data from cache. Make sure references are merged. Use for fetching directly using get. Initial call should not be memoised so function is executed. Identical call should be memoised so function is not executed again. Differing call is not memoised so function is executed. Call function once to ensure is memoised. Each of the following calls should equate to the same key and make use of the memoised value. The following calls should all be treated as new variations and so not use any memoised value. Modify mutable externally and check that stored memoised value is unchanged.
2,012
en
0.804073
import sys import time import math import psutil import pytest import threading from loky import TimeoutError from loky import get_reusable_executor from loky.backend import get_context # Set a large timeout as it should only be reached in case of deadlocks TIMEOUT = 40 _test_event = None def initializer_event(event): """Initializer that set a global test event for test synchronization""" global _test_event _test_event = event def _direct_children_with_cmdline(p): """Helper to fetch cmdline from children process list""" children_with_cmdline = [] for c in p.children(): try: cmdline = " ".join(c.cmdline()) if not c.is_running() or not cmdline: # Under linux is_running() can return True even though # the command line data can no longer be read from # /proc/<pid>/cmdline. This looks like a race condition # between /proc/<pid>/stat and /proc/<pid>/cmdline # when the process is being terminated by the OS. continue children_with_cmdline.append((c, cmdline)) except (OSError, psutil.NoSuchProcess, psutil.AccessDenied): # These errors indicate that the process has terminated while # we were processing the info. Just discard it. pass return children_with_cmdline def _running_children_with_cmdline(p): all_children = _direct_children_with_cmdline(p) workers = [(c, cmdline) for c, cmdline in all_children if ('semaphore_tracker' not in cmdline and 'resource_tracker' not in cmdline and 'multiprocessing.forkserver' not in cmdline)] forkservers = [c for c, cmdline in all_children if 'multiprocessing.forkserver' in cmdline] for fs in forkservers: workers.extend(_direct_children_with_cmdline(fs)) return workers def _check_subprocesses_number(executor, expected_process_number=None, expected_max_process_number=None, patience=100): # Wait for terminating processes to disappear children_cmdlines = _running_children_with_cmdline(psutil.Process()) pids_cmdlines = [(c.pid, cmdline) for c, cmdline in children_cmdlines] children_pids = {pid for pid, _ in pids_cmdlines} if executor is not None: worker_pids = set(executor._processes.keys()) else: # Bypass pids checks when executor has been garbage # collected worker_pids = children_pids if expected_process_number is not None: try: assert len(children_pids) == expected_process_number, pids_cmdlines assert len(worker_pids) == expected_process_number, pids_cmdlines assert worker_pids == children_pids, pids_cmdlines except AssertionError: if expected_process_number != 0: raise # there is a race condition with the /proc/<pid>/ system clean up # and our utilization of psutil. The Process is considered alive by # psutil even though it have been terminated. Wait for the system # clean up in this case. for _ in range(patience): if not _running_children_with_cmdline(psutil.Process()): break time.sleep(.1) else: raise if expected_max_process_number is not None: assert len(children_pids) <= expected_max_process_number, pids_cmdlines assert len(worker_pids) <= expected_max_process_number, pids_cmdlines def _check_executor_started(executor): # Submit a small job to make sure that the pool is an working state res = executor.submit(id, None) try: res.result(timeout=TIMEOUT) except TimeoutError: print('\n' * 3, res.done(), executor._call_queue.empty(), executor._result_queue.empty()) print(executor._processes) print(threading.enumerate()) from faulthandler import dump_traceback dump_traceback() executor.submit(dump_traceback).result(TIMEOUT) raise RuntimeError("Executor took too long to run basic task.") class ExecutorMixin: worker_count = 5 @classmethod def setup_class(cls): print(f"setup class with {cls.context}") global _test_event if _test_event is None: _test_event = cls.context.Event() @classmethod def teardown_class(cls): print(f"teardown class with {cls.context}") global _test_event if _test_event is not None: _test_event = None @pytest.fixture(autouse=True) def setup_method(self): global _test_event assert _test_event is not None try: self.executor = self.executor_type( max_workers=self.worker_count, context=self.context, initializer=initializer_event, initargs=(_test_event,)) except NotImplementedError as e: self.skipTest(str(e)) _check_executor_started(self.executor) _check_subprocesses_number(self.executor, self.worker_count) def teardown_method(self, method): # Make sure executor is not broken if it should not be executor = getattr(self, 'executor', None) if executor is not None: expect_broken_pool = hasattr(method, "broken_pool") # old pytest for mark in getattr(method, "pytestmark", []): if mark.name == "broken_pool": expect_broken_pool = True is_actually_broken = executor._flags.broken is not None assert is_actually_broken == expect_broken_pool t_start = time.time() executor.shutdown(wait=True, kill_workers=True) dt = time.time() - t_start assert dt < 10, "Executor took too long to shutdown" _check_subprocesses_number(executor, 0) def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0.1) for _ in range(self.worker_count)] for f in futures: f.result() @classmethod def check_no_running_workers(cls, patience=5, sleep_duration=0.01): deadline = time.time() + patience while time.time() <= deadline: time.sleep(sleep_duration) p = psutil.Process() workers = _running_children_with_cmdline(p) if not workers: return # Patience exhausted: log the remaining workers command line and # raise error. print("Remaining worker processes command lines:", file=sys.stderr) for w, cmdline in workers: print(w.pid, w.status(), end='\n', file=sys.stderr) print(cmdline, end='\n\n', file=sys.stderr) raise AssertionError( f'Expected no more running worker processes but got {len(workers)}' f' after waiting {patience:0.3f}s.' ) class ReusableExecutorMixin: def setup_method(self, method): default_start_method = get_context().get_start_method() assert default_start_method == "loky", default_start_method executor = get_reusable_executor(max_workers=2) _check_executor_started(executor) # There can be less than 2 workers because of the worker timeout _check_subprocesses_number(executor, expected_max_process_number=2) def teardown_method(self, method): """Make sure the executor can be recovered after the tests""" executor = get_reusable_executor(max_workers=2) assert executor.submit(math.sqrt, 1).result() == 1 # There can be less than 2 workers because of the worker timeout _check_subprocesses_number(executor, expected_max_process_number=2) @classmethod def teardown_class(cls): executor = get_reusable_executor(max_workers=2) executor.shutdown(wait=True)
tests/_executor_mixin.py
8,129
Helper to fetch cmdline from children process list Initializer that set a global test event for test synchronization Make sure the executor can be recovered after the tests Set a large timeout as it should only be reached in case of deadlocks Under linux is_running() can return True even though the command line data can no longer be read from /proc/<pid>/cmdline. This looks like a race condition between /proc/<pid>/stat and /proc/<pid>/cmdline when the process is being terminated by the OS. These errors indicate that the process has terminated while we were processing the info. Just discard it. Wait for terminating processes to disappear Bypass pids checks when executor has been garbage collected there is a race condition with the /proc/<pid>/ system clean up and our utilization of psutil. The Process is considered alive by psutil even though it have been terminated. Wait for the system clean up in this case. Submit a small job to make sure that the pool is an working state Make sure executor is not broken if it should not be old pytest Make sure that the executor is ready to do work before running the tests. This should reduce the probability of timeouts in the tests. Patience exhausted: log the remaining workers command line and raise error. There can be less than 2 workers because of the worker timeout There can be less than 2 workers because of the worker timeout
1,391
en
0.946654
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function try: unicode except NameError: unicode = str unichr = chr import bisect import os import sys import types import curses import app.config import app.controller import app.cu_editor import app.em_editor import app.string import app.text_buffer import app.vi_editor # The terminal area that the curses can draw to. mainCursesWindow = None class ViewWindow: """A view window is a base window that does not get focus or have TextBuffer. See class ActiveWindow for a window that can get focus. See class Window for a window that can get focus and have a TextBuffer. """ def __init__(self, program, parent): """ Args: parent is responsible for the order in which this window is updated, relative to its siblings. """ if app.config.strict_debug: assert issubclass(self.__class__, ViewWindow), self assert issubclass(program.__class__, app.ci_program.CiProgram), self if parent is not None: assert issubclass(parent.__class__, ViewWindow), parent self.program = program self.parent = parent self.isFocusable = False self.top = 0 self.left = 0 self.rows = 1 self.cols = 1 self.scrollRow = 0 self.scrollCol = 0 self.showCursor = True self.writeLineRow = 0 self.zOrder = [] def addStr(self, row, col, text, colorPair): """Overwrite text at row, column with text. The caller is responsible for avoiding overdraw. """ if app.config.strict_debug: app.log.check_le(row, self.rows) app.log.check_le(col, self.cols) self.program.backgroundFrame.addStr(self.top + row, self.left + col, text.encode('utf-8'), colorPair) def reattach(self): self.setParent(self.parent) def blank(self, colorPair): """Clear the window.""" for i in range(self.rows): self.addStr(i, 0, ' ' * self.cols, colorPair) def bringChildToFront(self, child): """Bring it to the top layer.""" try: self.zOrder.remove(child) except ValueError: pass self.zOrder.append(child) def bringToFront(self): """Bring it to the top layer.""" self.parent.bringChildToFront(self) def changeFocusTo(self, changeTo): if app.config.strict_debug: assert issubclass(self.__class__, ViewWindow), self assert issubclass(changeTo.__class__, ViewWindow), changeTo topWindow = self while topWindow.parent: topWindow = topWindow.parent topWindow.changeFocusTo(changeTo) def colorPref(self, colorType, delta=0): return self.program.color.get(colorType, delta) def contains(self, row, col): """Determine whether the position at row, col lay within this window.""" for i in self.zOrder: if i.contains(row, col): return i return (self.top <= row < self.top + self.rows and self.left <= col < self.left + self.cols and self) def debugDraw(self): programWindow = self while programWindow.parent is not None: programWindow = programWindow.parent programWindow.debugDraw(self) def deselect(self): pass def detach(self): """Hide the window by removing self from parents' children, but keep same parent to be reattached later.""" try: self.parent.zOrder.remove(self) except ValueError: pass def layoutHorizontally(self, children, separation=0): left = self.left cols = self.cols for view in children: preferredCols = view.preferredSize(self.rows, max(0, cols))[1] view.reshape(self.top, left, self.rows, max(0, min(cols, preferredCols))) delta = view.cols + separation left += delta cols -= delta def layoutVertically(self, children, separation=0): top = self.top rows = self.rows for view in children: preferredRows = view.preferredSize(max(0, rows), self.cols)[0] view.reshape(top, self.left, max(0, min(rows, preferredRows)), self.cols) delta = view.rows + separation top += delta rows -= delta def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseWheelDown(self, shift, ctrl, alt): pass def mouseWheelUp(self, shift, ctrl, alt): pass def moveTo(self, top, left): self.top = top self.left = left def moveBy(self, top, left): self.top += top self.left += left def _childFocusableWindow(self, reverse=False): windows = self.zOrder[:] if reverse: windows.reverse() for i in windows: if i.isFocusable: return i else: r = i._childFocusableWindow(reverse) if r is not None: return r def nextFocusableWindow(self, start, reverse=False): """Windows without |isFocusable| are skipped. Ignore (skip) |start| when searching. Args: start (window): the child window to start from. If |start| is not found, start from the first child window. reverse (bool): if True, find the prior focusable window. Returns: A window that should be focused. See also: showFullWindowHierarchy() which can help in debugging. """ windows = self.parent.zOrder[:] if reverse: windows.reverse() try: found = windows.index(start) except ValueError: found = -1 windows = windows[found + 1:] for i in windows: if i.isFocusable: return i else: r = i._childFocusableWindow(reverse) if r is not None: return r r = self.parent.nextFocusableWindow(self.parent, reverse) if r is not None: return r return self._childFocusableWindow(reverse) def normalize(self): self.parent.normalize() def onPrefChanged(self, category, name): self.parent.onPrefChanged(category, name) def paint(self, row, col, count, colorPair): """Paint text a row, column with colorPair. fyi, I thought this may be faster than using addStr to paint over the text with a different colorPair. It looks like there isn't a significant performance difference between chgat and addstr. """ mainCursesWindow.chgat(self.top + row, self.left + col, count, colorPair) def preferredSize(self, rowLimit, colLimit): # Derived classes should override this. return rowLimit, colLimit def presentModal(self, changeTo, paneRow, paneCol): self.parent.presentModal(changeTo, paneRow, paneCol) def priorFocusableWindow(self, start): return self.nextFocusableWindow(start, True) def quitNow(self): self.program.quitNow() def render(self): """Redraw window.""" for child in self.zOrder: child.render() def showWindowHierarchy(self, indent=' '): """For debugging.""" focus = u'[f]' if self.isFocusable else u'[ ]' extra = u'' if hasattr(self, 'label'): extra += u' "' + self.label + u'"' app.log.info("%s%s%s%s" % (indent, focus, self, extra)) for child in self.zOrder: child.showWindowHierarchy(indent + u' ') def showFullWindowHierarchy(self, indent=u' '): """For debugging.""" f = self while f.parent is not None: f = f.parent assert f f.showWindowHierarchy() def doPreCommand(self): pass def longTimeSlice(self): """returns whether work is finished (no need to call again).""" return True def shortTimeSlice(self): """returns whether work is finished (no need to call again).""" return True def reshape(self, top, left, rows, cols): self.moveTo(top, left) self.resizeTo(rows, cols) #app.log.debug(self, top, left, rows, cols) def resizeBottomBy(self, rows): self.rows += rows def resizeBy(self, rows, cols): self.rows += rows self.cols += cols def resizeTo(self, rows, cols): #app.log.detail(rows, cols, self) if app.config.strict_debug: assert rows >= 0, rows assert cols >= 0, cols self.rows = rows self.cols = cols def resizeTopBy(self, rows): self.top += rows self.rows -= rows def setParent(self, parent, layerIndex=sys.maxsize): """Setting the parent will cause the the window to refresh (i.e. if self was hidden with detach() it will no longer be hidden).""" if app.config.strict_debug: assert issubclass(self.__class__, ViewWindow), self assert issubclass(parent.__class__, ViewWindow), parent if self.parent: try: self.parent.zOrder.remove(self) except ValueError: pass self.parent = parent if parent: self.parent.zOrder.insert(layerIndex, self) def writeLine(self, text, color): """Simple line writer for static windows.""" if app.config.strict_debug: assert isinstance(text, unicode) text = text[:self.cols] text = text + u' ' * max(0, self.cols - len(text)) self.program.backgroundFrame.addStr(self.top + self.writeLineRow, self.left, text.encode(u'utf-8'), color) self.writeLineRow += 1 def getProgram(self): return self.program class ActiveWindow(ViewWindow): """An ActiveWindow may have focus and a controller.""" def __init__(self, program, parent): if app.config.strict_debug: assert issubclass(self.__class__, ActiveWindow), self assert issubclass(program.__class__, app.ci_program.CiProgram), repr(program) if parent is not None: assert issubclass(parent.__class__, ViewWindow), parent ViewWindow.__init__(self, program, parent) self.controller = None self.hasFocus = False self.isFocusable = True def focus(self): """ Note: to focus a view it must have a controller. Focusing a view without a controller would make the program appear to freeze since nothing would be responding to user input. """ self.hasFocus = True self.controller.focus() def setController(self, controller): if app.config.strict_debug: assert issubclass(self.__class__, Window), self self.controller = controller(self) def unfocus(self): self.hasFocus = False self.controller.unfocus() class Window(ActiveWindow): """A Window holds a TextBuffer and a controller that operates on the TextBuffer.""" def __init__(self, program, parent): if app.config.strict_debug: assert issubclass(self.__class__, Window), self assert issubclass(program.__class__, app.ci_program.CiProgram), self assert issubclass(parent.__class__, ViewWindow), parent ActiveWindow.__init__(self, program, parent) self.hasCaptiveCursor = self.program.prefs.editor['captiveCursor'] self.textBuffer = None def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseClick(paneRow, paneCol, shift, ctrl, alt) def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseDoubleClick(paneRow, paneCol, shift, ctrl, alt) def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseMoved(paneRow, paneCol, shift, ctrl, alt) def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseRelease(paneRow, paneCol, shift, ctrl, alt) def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseTripleClick(paneRow, paneCol, shift, ctrl, alt) def mouseWheelDown(self, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseWheelDown(shift, ctrl, alt) def mouseWheelUp(self, shift, ctrl, alt): if self.textBuffer: self.textBuffer.mouseWheelUp(shift, ctrl, alt) def preferredSize(self, rowLimit, colLimit): return min(rowLimit, len(self.textBuffer.lines)), colLimit def render(self): if self.textBuffer: self.textBuffer.draw(self) ViewWindow.render(self) def setController(self, controller): ActiveWindow.setController(self, controller) self.controller.setTextBuffer(self.textBuffer) def setTextBuffer(self, textBuffer): textBuffer.setView(self) self.textBuffer = textBuffer def doPreCommand(self): if self.textBuffer is not None: self.textBuffer.setMessage() def longTimeSlice(self): """returns whether work is finished (no need to call again).""" finished = True tb = self.textBuffer if tb is not None and tb.parser.resumeAtRow < len(tb.lines): tb.parseDocument() # If a user event came in while parsing, the parsing will be paused # (to be resumed after handling the event). finished = tb.parser.resumeAtRow >= len(tb.lines) for child in self.zOrder: finished = finished and child.longTimeSlice() return finished def shortTimeSlice(self): """returns whether work is finished (no need to call again).""" tb = self.textBuffer if tb is not None: tb.parseScreenMaybe() return tb.parser.resumeAtRow >= len(tb.lines) return True class LabelWindow(ViewWindow): """A text label. The label is inert, it will pass events to its parent. """ def __init__(self, program, parent, label, preferredWidth=None, align=u'left'): if app.config.strict_debug: assert issubclass(program.__class__, app.ci_program.CiProgram), self assert issubclass(parent.__class__, ViewWindow), parent assert isinstance(label, unicode) assert preferredWidth is None or isinstance(preferredWidth, int) assert isinstance(align, unicode) ViewWindow.__init__(self, program, parent) self.label = label self.preferredWidth = preferredWidth self.align = -1 if align == u'left' else 1 self.color = self.program.color.get(u'keyword') def preferredSize(self, rowLimit, colLimit): if app.config.strict_debug: assert self.parent assert rowLimit >= 0 assert colLimit >= 0 preferredWidth = (self.preferredWidth if self.preferredWidth is not None else len(self.label)) return (min(rowLimit, 1), min(colLimit, preferredWidth)) def render(self): if self.rows <= 0: return line = self.label[:self.cols] line = u"%*s" % (self.cols * self.align, line) self.addStr(0, 0, line, self.color) ViewWindow.render(self) class LabeledLine(Window): """A single line with a label. This is akin to a line prompt or gui modal dialog. It's used for things like 'find' and 'goto line'. """ def __init__(self, program, parent, label): if app.config.strict_debug: assert issubclass(self.__class__, LabeledLine), self assert issubclass(program.__class__, app.ci_program.CiProgram), self assert issubclass(parent.__class__, ViewWindow), parent Window.__init__(self, program, parent) self.host = parent tb = app.text_buffer.TextBuffer(self.program) tb.rootGrammar = self.program.prefs.grammars[u'none'] self.setTextBuffer(tb) self.label = label self.leftColumn = ViewWindow(self.program, self) # TODO(dschuyler) Add self.rightColumn. def focus(self): self.bringToFront() if not self.controller: app.log.info(self, repr(self.label)) Window.focus(self) def preferredSize(self, rowLimit, colLimit): return min(rowLimit, 1), colLimit def render(self): #app.log.info('LabeledLine', self.label, self.rows, self.cols) if self.rows <= 0: return self.leftColumn.addStr(0, 0, self.label, self.program.color.get(u'keyword')) Window.render(self) def reshape(self, top, left, rows, cols): labelWidth = len(self.label) Window.reshape(self, top, left + labelWidth, rows, max(0, cols - labelWidth)) self.leftColumn.reshape(top, left, rows, labelWidth) def setLabel(self, label): self.label = label self.reshape(self.top, self.left, self.rows, self.cols) class Menu(ViewWindow): """Work in progress on a context menu.""" def __init__(self, program, host): if app.config.strict_debug: assert issubclass(self.__class__, Menu), self assert issubclass(host.__class__, ActiveWindow) ViewWindow.__init__(self, program, host) self.host = host self.label = u'' self.lines = [] self.commands = [] def addItem(self, label, command): self.lines.append(label) self.commands.append(command) def clear(self): self.lines = [] self.commands = [] def moveSizeToFit(self, left, top): self.clear() self.addItem(u'some menu', None) #self.addItem('sort', self.host.textBuffer.sortSelection) self.addItem(u'cut', self.host.textBuffer.editCut) self.addItem(u'paste', self.host.textBuffer.editPaste) longest = 0 for i in self.lines: if len(i) > longest: longest = len(i) self.reshape(left, top, len(self.lines), longest + 2) def render(self): color = self.program.color.get(u'context_menu') self.writeLineRow = 0 for i in self.lines[:self.rows]: self.writeLine(" " + i, color) ViewWindow.render(self) class LineNumbers(ViewWindow): def __init__(self, program, host): ViewWindow.__init__(self, program, host) self.host = host def drawLineNumbers(self): limit = min(self.rows, len(self.host.textBuffer.lines) - self.host.scrollRow) cursorBookmarkColorIndex = None visibleBookmarks = self.getVisibleBookmarks(self.host.scrollRow, self.host.scrollRow + limit) currentBookmarkIndex = 0 colorPrefs = self.program.color for i in range(limit): color = colorPrefs.get(u'line_number') currentRow = self.host.scrollRow + i if currentBookmarkIndex < len(visibleBookmarks): currentBookmark = visibleBookmarks[currentBookmarkIndex] else: currentBookmark = None # Use a different color if the row is associated with a bookmark. if currentBookmark: if (currentRow >= currentBookmark.begin and currentRow <= currentBookmark.end): color = colorPrefs.get( currentBookmark.data.get(u'colorIndex')) if self.host.textBuffer.penRow == currentRow: cursorBookmarkColorIndex = currentBookmark.data.get( u'colorIndex') if currentRow + 1 > currentBookmark.end: currentBookmarkIndex += 1 self.addStr(i, 0, u' %5d ' % (currentRow + 1), color) # Draw indicators for text off of the left edge. if self.host.scrollCol > 0: color = colorPrefs.get(u'line_overflow') for i in range(limit): if len(self.host.textBuffer.lines[self.host.scrollRow + i]) > 0: self.addStr(i, 6, u' ', color) # Draw blank line number rows past the end of the document. color = colorPrefs.get(u'outside_document') for i in range(limit, self.rows): self.addStr(i, 0, u' ', color) # Highlight the line numbers for the current cursor line. cursorAt = self.host.textBuffer.penRow - self.host.scrollRow if 0 <= cursorAt < limit: if cursorBookmarkColorIndex: if self.program.prefs.startup[u'numColors'] == 8: color = colorPrefs.get(cursorBookmarkColorIndex) else: color = colorPrefs.get(cursorBookmarkColorIndex % 32 + 128) else: color = colorPrefs.get(u'line_number_current') self.addStr(cursorAt, 1, u'%5d' % (self.host.textBuffer.penRow + 1), color) def getVisibleBookmarks(self, beginRow, endRow): """ Args: beginRow (int): the index of the line number that you want the list of bookmarks to start from. endRow (int): the index of the line number that you want the list of bookmarks to end at (exclusive). Returns: A list containing the bookmarks that are displayed on the screen. If there are no bookmarks, returns an empty list. """ bookmarkList = self.host.textBuffer.bookmarks beginIndex = endIndex = 0 if len(bookmarkList): needle = app.bookmark.Bookmark(beginRow, beginRow, {}) beginIndex = bisect.bisect_left(bookmarkList, needle) if beginIndex > 0 and bookmarkList[beginIndex - 1].end >= beginRow: beginIndex -= 1 needle.range = (endRow, endRow) endIndex = bisect.bisect_left(bookmarkList, needle) return bookmarkList[beginIndex:endIndex] def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): if ctrl: app.log.info(u'click at', paneRow, paneCol) return self.host.changeFocusTo(self.host) tb = self.host.textBuffer if self.host.scrollRow + paneRow >= len(tb.lines): tb.selectionNone() return if shift: if tb.selectionMode == app.selectable.kSelectionNone: tb.selectionLine() self.mouseRelease(paneRow, paneCol, shift, ctrl, alt) else: tb.cursorMoveAndMark( self.host.scrollRow + paneRow - tb.penRow, 0, self.host.scrollRow + paneRow - tb.markerRow, 0, app.selectable.kSelectionNone - tb.selectionMode) self.mouseRelease(paneRow, paneCol, shift, ctrl, alt) def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt): self.host.textBuffer.selectionAll() def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt): app.log.info(paneRow, paneCol, shift) self.host.textBuffer.mouseClick(paneRow, paneCol - self.cols, True, ctrl, alt) def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt): app.log.info(paneRow, paneCol, shift) tb = self.host.textBuffer tb.selectLineAt(self.host.scrollRow + paneRow) def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt): pass def mouseWheelDown(self, shift, ctrl, alt): self.host.mouseWheelDown(shift, ctrl, alt) def mouseWheelUp(self, shift, ctrl, alt): self.host.mouseWheelUp(shift, ctrl, alt) def render(self): self.drawLineNumbers() class LogWindow(ViewWindow): def __init__(self, program, parent): ViewWindow.__init__(self, program, parent) self.lines = app.log.getLines() self.renderCounter = 0 def render(self): self.renderCounter += 1 app.log.meta(u" " * 10, self.renderCounter, u"- screen render -") self.writeLineRow = 0 colorPrefs = self.program.color colorA = colorPrefs.get(u'default') colorB = colorPrefs.get(u'highlight') for i in self.lines[-self.rows:]: color = colorA if len(i) and i[-1] == u'-': color = colorB self.writeLine(i, color) ViewWindow.render(self) class InteractiveFind(Window): def __init__(self, program, host): Window.__init__(self, program, host) self.host = host self.expanded = False self.setController(app.cu_editor.InteractiveFind) indent = u' ' self.findLine = LabeledLine(self.program, self, u'Find: ') self.findLine.setController(app.cu_editor.InteractiveFindInput) self.findLine.setParent(self) self.replaceLine = LabeledLine(self.program, self, u'Replace: ') self.replaceLine.setController(app.cu_editor.InteractiveReplaceInput) self.replaceLine.setParent(self) self.matchOptionsRow = RowWindow(self.program, self, 2) self.matchOptionsRow.setParent(self) # If findUseRegex is false, re.escape the search. OptionsToggle(self.program, self.matchOptionsRow, u'regex', u'editor', u'findUseRegex') # If findWholeWord, wrap with \b. OptionsToggle(self.program, self.matchOptionsRow, u'wholeWord', u'editor', u'findWholeWord') # If findIgnoreCase, pass ignore case flag to regex. OptionsToggle(self.program, self.matchOptionsRow, u'ignoreCase', u'editor', u'findIgnoreCase') if 0: # Use locale. OptionsToggle(self.program, self.matchOptionsRow, u'locale', u'editor', u'findLocale') # Span lines. OptionsToggle(self.program, self.matchOptionsRow, u'multiline', u'editor', u'findMultiline') # Dot matches anything (even \n). OptionsToggle(self.program, self.matchOptionsRow, u'dotAll', u'editor', u'findDotAll') # Unicode match. OptionsToggle(self.program, self.matchOptionsRow, u'unicode', u'editor', u'findUnicode') # Replace uppercase with upper and lowercase with lower. OptionsToggle(self.program, self.matchOptionsRow, u'smartCaps', u'editor', u'findReplaceSmartCaps') if 0: self.scopeOptions, self.scopeRow = self.addSelectOptionsRow( indent + u'scope ', [u'file', u'directory', u'openFiles', u'project']) (self.changeCaseOptions, self.changeCaseRow) = self.addSelectOptionsRow( indent + u'changeCase', [u'none', u'smart', u'upper', u'lower']) (self.withinOptions, self.withinOptionsRow) = self.addSelectOptionsRow( indent + u'within ', [ u'any', u'code', u'comment', u'error', u'markup', u'misspelled', # Find in misspelled words. u'quoted', # Find in strings. ]) (self.searchSelectionOption, self.searchSelectionRow) = self.addSelectOptionsRow( indent + u'selection ', [u'any', u'yes', u'no']) (self.searchChangedOption, self.searchChangedRow) = self.addSelectOptionsRow( indent + u'changed ', [u'any', u'yes', u'no']) self.pathsLine = LabeledLine(self.program, self, u'Paths: ') self.pathsLine.setController(app.cu_editor.InteractiveFindInput) self.pathsLine.setParent(self) def reattach(self): Window.reattach(self) # TODO(dschuyler): consider removing expanded control. # See https://github.com/google/ci_edit/issues/170 self.expanded = True self.parent.layout() def detach(self): Window.detach(self) self.parent.layout() def addSelectOptionsRow(self, label, optionsList): """Such as a radio group.""" optionsRow = OptionsRow(self.program, self) optionsRow.color = self.program.color.get(u'keyword') optionsRow.addLabel(label) optionsDict = {} optionsRow.beginGroup() for key in optionsList: optionsDict[key] = False optionsRow.addSelection(key, optionsDict) optionsRow.endGroup() optionsDict[optionsList[0]] = True optionsRow.setParent(self) return optionsDict, optionsRow def bringChildToFront(self, child): # The find window doesn't reorder children. pass def focus(self): self.reattach() if app.config.strict_debug: assert self.parent assert self.findLine.parent assert self.rows > 0, self.rows assert self.findLine.rows > 0, self.findLine.rows self.controller.focus() self.changeFocusTo(self.findLine) def preferredSize(self, rowLimit, colLimit): if app.config.strict_debug: assert self.parent assert rowLimit >= 0 assert colLimit >= 0 if self.parent and self in self.parent.zOrder and self.expanded: return (min(rowLimit, len(self.zOrder)), colLimit) return (1, -1) def expandFindWindow(self, expanded): self.expanded = expanded self.parent.layout() def reshape(self, top, left, rows, cols): Window.reshape(self, top, left, rows, cols) self.layoutVertically(self.zOrder) def unfocus(self): self.detach() Window.unfocus(self) class MessageLine(ViewWindow): """The message line appears at the bottom of the screen.""" def __init__(self, program, host): ViewWindow.__init__(self, program, host) self.host = host self.message = None self.renderedMessage = None def render(self): colorPrefs = self.program.color if self.message: if self.message != self.renderedMessage: self.writeLineRow = 0 self.writeLine(self.message, colorPrefs.get(u'message_line')) else: self.blank(colorPrefs.get(u'message_line')) class StatusLine(ViewWindow): """The status line appears at the bottom of the screen. It shows the current line and column the cursor is on. """ def __init__(self, program, host): ViewWindow.__init__(self, program, host) self.host = host def render(self): tb = self.host.textBuffer colorPrefs = self.program.color color = colorPrefs.get(u'status_line') if self.host.showTips: tipRows = app.help.docs[u'tips'] if len(tipRows) + 1 < self.rows: for i in range(self.rows): self.addStr(i, 0, u' ' * self.cols, color) for i, k in enumerate(tipRows): self.addStr(i + 1, 4, k, color) self.addStr(1, 40, u"(Press F1 to show/hide tips)", color | curses.A_REVERSE) statusLine = u'' if tb.message: statusLine = tb.message[0] color = (tb.message[1] if tb.message[1] is not None else colorPrefs.get(u'status_line')) if 0: if tb.isDirty(): statusLine += u' * ' else: statusLine += u' . ' # Percentages. rowPercentage = 0 colPercentage = 0 lineCount = len(tb.lines) if lineCount: rowPercentage = self.host.textBuffer.penRow * 100 // lineCount charCount = len(tb.lines[self.host.textBuffer.penRow]) if charCount and self.host.textBuffer.penCol != 0: colPercentage = self.host.textBuffer.penCol * 100 // charCount # Format. rightSide = u'' if len(statusLine): rightSide += u' |' if self.program.prefs.startup.get('showLogWindow'): rightSide += u' %s | %s |' % (tb.cursorGrammarName(), tb.selectionModeName()) rightSide += u' %4d,%2d | %3d%%,%3d%%' % ( self.host.textBuffer.penRow + 1, self.host.textBuffer.penCol + 1, rowPercentage, colPercentage) statusLine += \ u' ' * (self.cols - len(statusLine) - len(rightSide)) + rightSide self.addStr(self.rows - 1, 0, statusLine[:self.cols], color) class TopInfo(ViewWindow): def __init__(self, program, host): ViewWindow.__init__(self, program, host) self.host = host self.borrowedRows = 0 self.lines = [] self.mode = 2 def onChange(self): if self.mode == 0: return tb = self.host.textBuffer lines = [] # TODO: Make dynamic topInfo work properly if len(tb.lines): lineCursor = self.host.scrollRow line = "" # Check for extremely small window. if len(tb.lines) > lineCursor: while len(line) == 0 and lineCursor > 0: line = tb.lines[lineCursor] lineCursor -= 1 if len(line): indent = len(line) - len(line.lstrip(u' ')) lineCursor += 1 while lineCursor < len(tb.lines): line = tb.lines[lineCursor] if not len(line): continue z = len(line) - len(line.lstrip(u' ')) if z > indent: indent = z lineCursor += 1 else: break while indent and lineCursor > 0: line = tb.lines[lineCursor] if len(line): z = len(line) - len(line.lstrip(u' ')) if z < indent: indent = z lines.append(line) lineCursor -= 1 pathLine = app.string.pathEncode(self.host.textBuffer.fullPath) if 1: if tb.isReadOnly: pathLine += u' [RO]' if 1: if tb.isDirty(): pathLine += u' * ' else: pathLine += u' . ' lines.append(pathLine[-self.cols:]) self.lines = lines infoRows = len(self.lines) if self.mode > 0: infoRows = self.mode if self.borrowedRows != infoRows: self.host.topRows = infoRows self.host.layout() self.borrowedRows = infoRows def render(self): """Render the context information at the top of the window.""" lines = self.lines[-self.mode:] lines.reverse() color = self.program.color.get('top_info') for i, line in enumerate(lines): self.addStr(i, 0, (line + u' ' * (self.cols - len(line)))[:self.cols], color) for i in range(len(lines), self.rows): self.addStr(i, 0, u' ' * self.cols, color) def reshape(self, top, left, rows, cols): self.borrowedRows = 0 ViewWindow.reshape(self, top, left, rows, cols) class InputWindow(Window): """This is the main content window. Often the largest pane displayed. """ def __init__(self, program, host): if app.config.strict_debug: assert host Window.__init__(self, program, host) self.host = host self.showFooter = True self.savedScrollPositions = {} self.showLineNumbers = self.program.prefs.editor.get( 'showLineNumbers', True) self.showMessageLine = True self.showRightColumn = True self.showTopInfo = True self.statusLineCount = 0 if self.program.prefs.status.get( 'seenTips') else 8 self.topRows = 2 # Number of lines in default TopInfo status. self.controller = app.controller.MainController(self) self.controller.add(app.em_editor.EmacsEdit(self)) self.controller.add(app.vi_editor.ViEdit(self)) self.controller.add(app.cu_editor.CuaPlusEdit(self)) # What does the user appear to want: edit, quit, or something else? self.userIntent = 'edit' if 1: self.confirmClose = LabeledLine( self.program, self, "Save changes? (yes, no, or cancel): ") self.confirmClose.setController(app.cu_editor.ConfirmClose) if 1: self.confirmOverwrite = LabeledLine( self.program, self, "Overwrite exiting file? (yes or no): ") self.confirmOverwrite.setController(app.cu_editor.ConfirmOverwrite) self.contextMenu = Menu(self.program, self) if 1: # wip on multi-line interactive find. self.interactiveFind = InteractiveFind(self.program, self) self.interactiveFind.setParent(self, 0) else: self.interactiveFind = LabeledLine(self.program, self, u'find: ') self.interactiveFind.setController(app.cu_editor.InteractiveFind) if 1: self.interactiveGoto = LabeledLine(self.program, self, u'goto: ') self.interactiveGoto.setController(app.cu_editor.InteractiveGoto) if 1: self.interactivePrediction = LabeledLine(self.program, self, u'p: ') self.interactivePrediction.setController( app.cu_editor.InteractivePrediction) if 1: self.interactivePrompt = LabeledLine(self.program, self, u"e: ") self.interactivePrompt.setController( app.cu_editor.InteractivePrompt) if 1: self.interactiveQuit = LabeledLine( self.program, self, u"Save changes? (yes, no, or cancel): ") self.interactiveQuit.setController(app.cu_editor.InteractiveQuit) if 1: self.topInfo = TopInfo(self.program, self) self.topInfo.setParent(self, 0) if not self.showTopInfo: self.topInfo.detach() if 1: self.statusLine = StatusLine(self.program, self) self.statusLine.setParent(self, 0) if not self.showFooter: self.statusLine.detach() if 1: self.lineNumberColumn = LineNumbers(self.program, self) self.lineNumberColumn.setParent(self, 0) if not self.showLineNumbers: self.lineNumberColumn.detach() if 1: self.logoCorner = ViewWindow(self.program, self) self.logoCorner.name = u'Logo' self.logoCorner.setParent(self, 0) if 1: self.rightColumn = ViewWindow(self.program, self) self.rightColumn.name = u'Right' self.rightColumn.setParent(self, 0) if not self.showRightColumn: self.rightColumn.detach() if 1: self.popupWindow = PopupWindow(self.program, self) if self.showMessageLine: self.messageLine = MessageLine(self.program, self) self.messageLine.setParent(self, 0) self.showTips = self.program.prefs.status.get(u'showTips') self.statusLineCount = 8 if self.showTips else 1 if 0: def splitWindow(self): """Experimental.""" app.log.info() other = InputWindow(self.prg, self) other.setTextBuffer(self.textBuffer) app.log.info() self.prg.zOrder.append(other) self.prg.layout() app.log.info() def layout(self): """Change self and sub-windows to fit within the given rectangle.""" top, left, rows, cols = self.outerShape lineNumbersCols = 7 topRows = self.topRows bottomRows = max(1, self.interactiveFind.preferredSize(rows, cols)[0]) # The top, left of the main window is the rows, cols of the logo corner. self.logoCorner.reshape(top, left, 2, lineNumbersCols) if self.showTopInfo and rows > topRows and cols > lineNumbersCols: self.topInfo.reshape(top, left + lineNumbersCols, topRows, cols - lineNumbersCols) top += topRows rows -= topRows rows -= bottomRows bottomFirstRow = top + rows self.confirmClose.reshape(bottomFirstRow, left, bottomRows, cols) self.confirmOverwrite.reshape(bottomFirstRow, left, bottomRows, cols) self.interactivePrediction.reshape(bottomFirstRow, left, bottomRows, cols) self.interactivePrompt.reshape(bottomFirstRow, left, bottomRows, cols) self.interactiveQuit.reshape(bottomFirstRow, left, bottomRows, cols) if self.showMessageLine: self.messageLine.reshape(bottomFirstRow, left, bottomRows, cols) self.interactiveFind.reshape(bottomFirstRow, left, bottomRows, cols) if 1: self.interactiveGoto.reshape(bottomFirstRow, left, bottomRows, cols) if self.showFooter and rows > 0: self.statusLine.reshape(bottomFirstRow - self.statusLineCount, left, self.statusLineCount, cols) rows -= self.statusLineCount if self.showLineNumbers and cols > lineNumbersCols: self.lineNumberColumn.reshape(top, left, rows, lineNumbersCols) cols -= lineNumbersCols left += lineNumbersCols if self.showRightColumn and cols > 0: self.rightColumn.reshape(top, left + cols - 1, rows, 1) cols -= 1 Window.reshape(self, top, left, rows, cols) def drawLogoCorner(self): """.""" logo = self.logoCorner if logo.rows <= 0 or logo.cols <= 0: return color = self.program.color.get('logo') for i in range(logo.rows): logo.addStr(i, 0, u' ' * logo.cols, color) logo.addStr(0, 1, u'ci' [:self.cols], color) logo.render() def drawRightEdge(self): """Draw makers to indicate text extending past the right edge of the window.""" maxRow, maxCol = self.rows, self.cols limit = min(maxRow, len(self.textBuffer.lines) - self.scrollRow) colorPrefs = self.program.color for i in range(limit): color = colorPrefs.get('right_column') if len(self.textBuffer.lines[ i + self.scrollRow]) - self.scrollCol > maxCol: color = colorPrefs.get('line_overflow') self.rightColumn.addStr(i, 0, u' ', color) color = colorPrefs.get('outside_document') for i in range(limit, maxRow): self.rightColumn.addStr(i, 0, u' ', color) def focus(self): self.layout() if self.showMessageLine: self.messageLine.bringToFront() Window.focus(self) def nextFocusableWindow(self, start, reverse=False): # Keep the tab focus in the child branch. (The child view will call # this, tell the child there is nothing to tab to up here). return None def render(self): self.topInfo.onChange() self.drawLogoCorner() self.drawRightEdge() Window.render(self) def reshape(self, top, left, rows, cols): """Change self and sub-windows to fit within the given rectangle.""" app.log.detail(top, left, rows, cols) Window.reshape(self, top, left, rows, cols) self.outerShape = (top, left, rows, cols) self.layout() def setTextBuffer(self, textBuffer): if app.config.strict_debug: assert issubclass( textBuffer.__class__, app.text_buffer.TextBuffer), \ repr(textBuffer) app.log.info('setTextBuffer') if self.textBuffer is not None: self.savedScrollPositions[self.textBuffer.fullPath] = ( self.scrollRow, self.scrollCol) #self.normalize() textBuffer.lineLimitIndicator = self.program.prefs.editor[ 'lineLimitIndicator'] textBuffer.debugRedo = self.program.prefs.startup.get('debugRedo') Window.setTextBuffer(self, textBuffer) self.controller.setTextBuffer(textBuffer) savedScroll = self.savedScrollPositions.get(self.textBuffer.fullPath) if savedScroll is not None: self.scrollRow, self.scrollCol = savedScroll else: historyScroll = self.textBuffer.fileHistory.get('scroll') if historyScroll is not None: self.scrollRow, self.scrollCol = historyScroll else: self.textBuffer.scrollToOptimalScrollPosition() def startup(self): bufferManager = self.program.bufferManager for f in self.program.prefs.startup.get('cliFiles', []): tb = bufferManager.loadTextBuffer(f['path']) if tb is None: # app.log.info('failed to load', repr(f["path"])) continue tb.parseDocument() if f['row'] is not None: if f['col'] is not None: tb.selectText(f['row'], f['col'], 0, app.selectable.kSelectionNone) else: tb.selectText(f['row'], 0, 0, app.selectable.kSelectionNone) if self.program.prefs.startup.get('readStdin'): bufferManager.readStdin() bufferManager.buffers.reverse() tb = bufferManager.topBuffer() if not tb: tb = bufferManager.newTextBuffer() self.setTextBuffer(tb) # Should parsing the document be a standard part of setTextBuffer? TBD. self.textBuffer.parseDocument() openToLine = self.program.prefs.startup.get('openToLine') if openToLine is not None: self.textBuffer.selectText(openToLine - 1, 0, 0, app.selectable.kSelectionNone) def toggleShowTips(self): self.showTips = not self.showTips self.statusLineCount = 8 if self.showTips else 1 self.layout() self.program.prefs.save('status', 'showTips', self.showTips) def unfocus(self): if self.showMessageLine: self.messageLine.detach() Window.unfocus(self) class OptionsSelectionWindow(ViewWindow): """Mutex window.""" def __init__(self, program, parent): if app.config.strict_debug: assert parent is not None ViewWindow.__init__(self, program, parent) self.color = self.program.color.get('top_info') def reshape(self, top, left, rows, cols): ViewWindow.reshape(self, top, left, rows, cols) self.layoutHorizontally(self.zOrder) def childSelected(self, selectedChild): app.log.info(self.zOrder) for child in self.zOrder: if child is not selectedChild: child.deselect() def render(self): self.blank(self.color) ViewWindow.render(self) class OptionsTrinaryStateWindow(Window): def __init__(self, program, parent, label, prefCategory, prefName): if app.config.strict_debug: assert isinstance(label, unicode) assert isinstance(prefCategory, unicode) assert isinstance(prefName, unicode) Window.__init__(self, program, parent) # TODO(dschuyler): Creating a text buffer is rather heavy for a toggle # control. This should get some optimization. self.setTextBuffer(app.text_buffer.TextBuffer(self.program)) self.setController(app.cu_editor.ToggleController) self.setParent(parent) self.name = label self.prefCategory = prefCategory self.prefName = prefName colorPrefs = self.program.color self.color = colorPrefs.get('keyword') self.focusColor = colorPrefs.get('selected') self.textBuffer.view.showCursor = False def focus(self): Window.focus(self) def setUp(self, toggleOn, toggleOff, toggleUndefined, width=None): if app.config.strict_debug: assert isinstance(toggleOn, unicode) assert isinstance(toggleOff, unicode) assert isinstance(toggleUndefined, unicode) assert width is None or isinstance(width, int) self.toggleOn = toggleOn self.toggleOff = toggleOff self.toggleUndefined = toggleUndefined longest = max(len(toggleOn), len(toggleOff), len(toggleUndefined)) self.width = width if width is not None else longest self.updateLabel() def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): self.controller.toggleValue() def onPrefChanged(self, category, name): Window.onPrefChanged(self, category, name) if category != self.prefCategory or name != self.prefName: return self.updateLabel() def updateLabel(self): pref = self.program.prefs.category(self.prefCategory)[self.prefName] if pref is None: label = self.toggleUndefined else: label = self.toggleOn if pref else self.toggleOff self.label = u'%*s' % (self.width, label) def preferredSize(self, rowLimit, colLimit): return min(rowLimit, 1), min(colLimit, abs(self.width)) def render(self): Window.render(self) if self.rows <= 0: return self.writeLineRow = 0 color = self.focusColor if self.hasFocus else self.color self.writeLine(self.label[:self.cols], color) class OptionsToggle(OptionsTrinaryStateWindow): def __init__(self, program, parent, label, prefCategory, prefName, width=None): if app.config.strict_debug: assert isinstance(label, unicode) assert isinstance(prefCategory, unicode) assert isinstance(prefName, unicode) OptionsTrinaryStateWindow.__init__(self, program, parent, label, prefCategory, prefName) # I considered these unicode characters, but [x] looks clearer to me. # toggleOn = unichr(0x2612) + ' ' + control['name'] # toggleOff = unichr(0x2610) + ' ' + control['name'] OptionsTrinaryStateWindow.setUp(self, u'[x]' + label, u'[ ]' + label, u'[-]' + label, width) class RowWindow(ViewWindow): def __init__(self, program, host, separator): if app.config.strict_debug: assert host ViewWindow.__init__(self, program, host) self.color = self.program.color.get('keyword') self.separator = separator def preferredSize(self, rowLimit, colLimit): return min(rowLimit, 1), colLimit def render(self): self.blank(self.color) ViewWindow.render(self) def reshape(self, top, left, rows, cols): ViewWindow.reshape(self, top, left, rows, cols) #app.log.info(top, left, rows, cols, self) self.layoutHorizontally(self.zOrder, self.separator) class OptionsRow(ViewWindow): class ControlElement: def __init__(self, elementType, name, reference, width=None, sep=" "): self.type = elementType self.name = name self.reference = reference self.width = width if width is not None else len(name) self.sep = sep def __init__(self, program, host): if app.config.strict_debug: assert host ViewWindow.__init__(self, program, host) self.host = host self.color = self.program.color.get('top_info') self.controlList = [] self.group = None def addElement(self, draw, kind, name, reference, width, sep, extraWidth=0): if app.config.strict_debug: assert isinstance(name, unicode) assert isinstance(sep, unicode) assert width is None or isinstance(width, int) assert isinstance(extraWidth, int) if reference is not None: assert isinstance(reference, dict) assert name in reference if self.group is not None: self.group.append(len(self.controlList)) element = { 'dict': reference, 'draw': draw, 'name': name, 'sep': sep, 'type': kind, 'width': width if width is not None else len(name) + extraWidth } self.controlList.append(element) return element def addLabel(self, name, width=None, sep=u" "): def draw(control): return control[u'name'] return self.addElement(draw, u'label', name, None, width, sep) def addSortHeader(self, name, reference, width=None, sep=u" |"): def draw(control): decoration = u'v' if control[u'dict'][control[u'name']] else u'^' if control[u'dict'][control[u'name']] is None: decoration = u'-' if control[u'width'] < 0: return u'%s %s' % (control[u'name'], decoration) return u'%s %s' % (decoration, control[u'name']) self.addElement(draw, u'sort', name, reference, width, sep, len(u' v')) def addSelection(self, name, reference, width=None, sep=u" "): if app.config.strict_debug: assert isinstance(name, unicode) if 1: toggleOn = u'(*)' + name toggleOff = u'( )' + name def draw(control): return toggleOn if control[u'dict'][control[u'name']] else toggleOff width = max(width, min(len(toggleOn), len(toggleOff))) self.addElement(draw, u'selection', name, reference, width, sep, len(u'(*)')) def removeThis_addToggle(self, name, reference, width=None, sep=u" "): if app.config.strict_debug: assert isinstance(name, unicode) if 1: toggleOn = u'[x]' + name toggleOff = u'[ ]' + name if 0: toggleOn = unichr(0x2612) + ' ' + control['name'] toggleOff = unichr(0x2610) + ' ' + control['name'] if 0: toggleOn = '[+' + control['name'] + ']' toggleOff = '[-' + control['name'] + ']' def draw(control): return toggleOn if control['dict'][control['name']] else toggleOff width = max(width, min(len(toggleOn), len(toggleOff))) self.addElement(draw, u'toggle', name, reference, width, sep, len('[-]')) def beginGroup(self): """Like a radio group, or column sort headers.""" self.group = [] def endGroup(self): """Like a radio group, or column sort headers.""" pass def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): #row = self.scrollRow + paneRow col = self.scrollCol + paneCol offset = 0 for index, control in enumerate(self.controlList): width = abs(control['width']) if offset <= col < offset + width: if control['type'] == 'selection': name = control['name'] for element in self.group: elementName = self.controlList[element]['name'] self.controlList[element]['dict'][elementName] = False control['dict'][name] = True self.host.controller.optionChanged(name, control['dict'][name]) break if control['type'] == 'sort': name = control['name'] newValue = not control['dict'][name] if index in self.group: for element in self.group: elementName = self.controlList[element]['name'] self.controlList[element]['dict'][ elementName] = None control['dict'][name] = newValue self.host.controller.optionChanged(name, control['dict'][name]) break if control['type'] == 'toggle': name = control['name'] control['dict'][name] = not control['dict'][name] self.host.controller.optionChanged(name, control['dict'][name]) break offset += width + len(control['sep']) def preferredSize(self, rowLimit, colLimit): return min(rowLimit, 1), colLimit def render(self): if self.rows <= 0: return line = u'' for control in self.controlList: label = control['draw'](control) line += u'%*s%s' % (control['width'], label, control['sep']) if len(line) >= self.cols: break self.writeLineRow = 0 self.writeLine(line[:self.cols], self.color) class PopupWindow(Window): def __init__(self, program, host): if app.config.strict_debug: assert host Window.__init__(self, program, host) self.host = host self.controller = app.cu_editor.PopupController(self) self.setTextBuffer(app.text_buffer.TextBuffer(self.program)) self.longestLineLength = 0 self.__message = [] self.showOptions = True # This will be displayed and should contain the keys that respond to # user input. This should be updated if you change the controller's # command set. self.options = [] def render(self): """Display a box of text in the center of the window.""" maxRows, maxCols = self.host.rows, self.host.cols cols = min(self.longestLineLength + 6, maxCols) rows = min(len(self.__message) + 4, maxRows) self.resizeTo(rows, cols) self.moveTo(maxRows // 2 - rows // 2, maxCols // 2 - cols // 2) color = self.program.color.get('popup_window') for row in range(rows): if row == rows - 2 and self.showOptions: message = '/'.join(self.options) elif row == 0 or row >= rows - 3: self.addStr(row, 0, ' ' * cols, color) continue else: message = self.__message[row - 1] lineLength = len(message) spacing1 = (cols - lineLength) // 2 spacing2 = cols - lineLength - spacing1 self.addStr(row, 0, ' ' * spacing1 + message + ' ' * spacing2, color) def setMessage(self, message): """Sets the Popup window's message to the given message. message (str): A string that you want to display. Returns: None. """ self.__message = message.split("\n") self.longestLineLength = max([len(line) for line in self.__message]) def setOptionsToDisplay(self, options): """ This function is used to change the options that are displayed in the popup window. They will be separated by a '/' character when displayed. Args: options (list): A list of possible keys which the user can press and should be responded to by the controller. """ self.options = options def setTextBuffer(self, textBuffer): Window.setTextBuffer(self, textBuffer) self.controller.setTextBuffer(textBuffer) def unfocus(self): self.detach() Window.unfocus(self) class PaletteWindow(Window): """A window with example foreground and background text colors.""" def __init__(self, prg, host): Window.__init__(self, prg, host) self.prg = prg self.resizeTo(16, 16 * 5) self.moveTo(8, 8) self.controller = app.cu_editor.PaletteDialogController(self) self.setTextBuffer(app.text_buffer.TextBuffer(self.program)) def render(self): width = 16 rows = 16 colorPrefs = self.program.color for i in range(width): for k in range(rows): self.addStr(k, i * 5, ' %3d ' % (i + k * width,), colorPrefs.get(i + k * width)) def setTextBuffer(self, textBuffer): Window.setTextBuffer(self, textBuffer) self.controller.setTextBuffer(textBuffer) def unfocus(self): self.detach() Window.unfocus(self) class SortableHeaderWindow(OptionsTrinaryStateWindow): def __init__(self, program, parent, label, prefCategory, prefName, width=None): if app.config.strict_debug: assert issubclass(program.__class__, app.ci_program.CiProgram), program assert isinstance(label, unicode) assert isinstance(prefCategory, unicode) assert isinstance(prefName, unicode) OptionsTrinaryStateWindow.__init__(self, program, parent, label, prefCategory, prefName) self.color = self.program.color.get(u'top_info') def draw(label, decoration, width): if width < 0: x = u'%s %s' % (label, decoration) else: x = u'%s %s' % (decoration, label) return u'%*s' % (width, x) OptionsTrinaryStateWindow.setUp(self, draw(label, u'v', width), draw(label, u'^', width), draw(label, u'-', width)) def deselect(self): self.controller.clearValue() def mouseClick(self, paneRow, paneCol, shift, ctrl, alt): self.parent.childSelected(self) self.controller.toggleValue()
app/window.py
64,104
An ActiveWindow may have focus and a controller. This is the main content window. Often the largest pane displayed. A text label. The label is inert, it will pass events to its parent. A single line with a label. This is akin to a line prompt or gui modal dialog. It's used for things like 'find' and 'goto line'. Work in progress on a context menu. The message line appears at the bottom of the screen. Mutex window. A window with example foreground and background text colors. The status line appears at the bottom of the screen. It shows the current line and column the cursor is on. A view window is a base window that does not get focus or have TextBuffer. See class ActiveWindow for a window that can get focus. See class Window for a window that can get focus and have a TextBuffer. A Window holds a TextBuffer and a controller that operates on the TextBuffer. Args: parent is responsible for the order in which this window is updated, relative to its siblings. Such as a radio group. Overwrite text at row, column with text. The caller is responsible for avoiding overdraw. Like a radio group, or column sort headers. Clear the window. Bring it to the top layer. Bring it to the top layer. Determine whether the position at row, col lay within this window. Hide the window by removing self from parents' children, but keep same parent to be reattached later. . Draw makers to indicate text extending past the right edge of the window. Like a radio group, or column sort headers. Note: to focus a view it must have a controller. Focusing a view without a controller would make the program appear to freeze since nothing would be responding to user input. Args: beginRow (int): the index of the line number that you want the list of bookmarks to start from. endRow (int): the index of the line number that you want the list of bookmarks to end at (exclusive). Returns: A list containing the bookmarks that are displayed on the screen. If there are no bookmarks, returns an empty list. Change self and sub-windows to fit within the given rectangle. returns whether work is finished (no need to call again). returns whether work is finished (no need to call again). Windows without |isFocusable| are skipped. Ignore (skip) |start| when searching. Args: start (window): the child window to start from. If |start| is not found, start from the first child window. reverse (bool): if True, find the prior focusable window. Returns: A window that should be focused. See also: showFullWindowHierarchy() which can help in debugging. Paint text a row, column with colorPair. fyi, I thought this may be faster than using addStr to paint over the text with a different colorPair. It looks like there isn't a significant performance difference between chgat and addstr. Redraw window. Render the context information at the top of the window. Display a box of text in the center of the window. Change self and sub-windows to fit within the given rectangle. Sets the Popup window's message to the given message. message (str): A string that you want to display. Returns: None. This function is used to change the options that are displayed in the popup window. They will be separated by a '/' character when displayed. Args: options (list): A list of possible keys which the user can press and should be responded to by the controller. Setting the parent will cause the the window to refresh (i.e. if self was hidden with detach() it will no longer be hidden). returns whether work is finished (no need to call again). returns whether work is finished (no need to call again). For debugging. For debugging. Experimental. Simple line writer for static windows. Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The terminal area that the curses can draw to. Derived classes should override this.app.log.debug(self, top, left, rows, cols)app.log.detail(rows, cols, self) If a user event came in while parsing, the parsing will be paused (to be resumed after handling the event). TODO(dschuyler) Add self.rightColumn.app.log.info('LabeledLine', self.label, self.rows, self.cols)self.addItem('sort', self.host.textBuffer.sortSelection) Use a different color if the row is associated with a bookmark. Draw indicators for text off of the left edge. Draw blank line number rows past the end of the document. Highlight the line numbers for the current cursor line. If findUseRegex is false, re.escape the search. If findWholeWord, wrap with \b. If findIgnoreCase, pass ignore case flag to regex. Use locale. Span lines. Dot matches anything (even \n). Unicode match. Replace uppercase with upper and lowercase with lower. Find in misspelled words. Find in strings. TODO(dschuyler): consider removing expanded control. See https://github.com/google/ci_edit/issues/170 The find window doesn't reorder children. Percentages. Format. TODO: Make dynamic topInfo work properly Check for extremely small window. Number of lines in default TopInfo status. What does the user appear to want: edit, quit, or something else? wip on multi-line interactive find. The top, left of the main window is the rows, cols of the logo corner. Keep the tab focus in the child branch. (The child view will call this, tell the child there is nothing to tab to up here).self.normalize() app.log.info('failed to load', repr(f["path"])) Should parsing the document be a standard part of setTextBuffer? TBD. TODO(dschuyler): Creating a text buffer is rather heavy for a toggle control. This should get some optimization. I considered these unicode characters, but [x] looks clearer to me. toggleOn = unichr(0x2612) + ' ' + control['name'] toggleOff = unichr(0x2610) + ' ' + control['name']app.log.info(top, left, rows, cols, self)row = self.scrollRow + paneRow This will be displayed and should contain the keys that respond to user input. This should be updated if you change the controller's command set.
6,463
en
0.855413
import functools import typing from aws_cdk import core from cdk_resources.utils import ( app_context, ALLOWED_ENVIRONMENTS, get_environment, ) __all__ = ["ResourceStack", "register_stacks"] class ResourceStack(core.Stack): """ """ EXISTING_RESOURCES = None RESOURCES = None def __init__( self, scope: core.App, stack_id: str, **kwargs ) -> None: super().__init__(scope, stack_id, **kwargs) # Update Context app_context.update(app=scope, current_stack=self) if self.is_valid_environment is False: raise Exception( f"`{get_environment()}` must be a valid environment allowed " f"values {ALLOWED_ENVIRONMENTS}" ) # Existing resources for resources in self.EXISTING_RESOURCES or []: resource_name, Resource, resource_attrs = ( self.get_resource_name(resources[0]), resources[1], (resources[2] if len(resources) == 3 else {}), ) setattr( self, resource_name, Resource( scope=self, construct_id=resources[0], **resource_attrs, ), ) # Own Resources for resources in self.RESOURCES or []: resource_name, Resource, resource_attrs = ( self.get_resource_name(resources[0]), resources[1], (resources[2] if len(resources) == 3 else {}), ) resource = Resource(scope=self, construct_id=resource_name) setattr(self, resource_name, resource) @staticmethod def get_resource_name(value: typing.Union[str, typing.Callable]) -> str: return value() if hasattr(value, "__call__") else value @property @functools.lru_cache(maxsize=None) def is_valid_environment(self) -> bool: if len(ALLOWED_ENVIRONMENTS) == 0: return True environment = get_environment() return environment is not None and environment in ALLOWED_ENVIRONMENTS def register_stacks( app: core.App, aws_env: core.Environment, stacks: list ) -> None: # Create Stacks for stack in stacks: stack_id, stack_class, stack_kwargs = ( stack[0], stack[1], (stack[2] if len(stack) == 3 else {}), ) stack_class(app, stack_id, env=aws_env, **stack_kwargs)
cdk_resources/stacks.py
2,511
Update Context Existing resources Own Resources Create Stacks
61
en
0.439315
# Generated by Django 3.1.1 on 2020-09-08 18:18 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('grocery', '0003_auto_20200908_1417'), ] operations = [ migrations.AlterField( model_name='item', name='list', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item', to='grocery.list'), ), migrations.AlterField( model_name='list', name='user', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='list', to=settings.AUTH_USER_MODEL), ), ]
ExpenseTracker/grocery/migrations/0004_auto_20200908_1918.py
838
Generated by Django 3.1.1 on 2020-09-08 18:18
45
en
0.69757
import re # # Модуль 2 из домашнего задания для 4 вебинара. # # Пользователь вводит любые цифры через запятую. # Сохранить цифры в список. # Получить новый список в котором будут только уникальные элементы исходного. # Вывести его на экран. # s_input = input("Введите элементы списка через разделитель [,:/]: ") l_numbers = list(map(int, re.split(",|:|/", s_input))) print([x for x in l_numbers if l_numbers.count(x) == 1])
2seq.py
645
Модуль 2 из домашнего задания для 4 вебинара. Пользователь вводит любые цифры через запятую. Сохранить цифры в список. Получить новый список в котором будут только уникальные элементы исходного. Вывести его на экран.
220
ru
0.998518
#!/usr/bin/env python3 # Copyright (c) 2016-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode BASE58, P2PKH and P2SH addresses.""" from .script import hash256, hash160, sha256, CScript, OP_0 from .util import bytes_to_hex_str, hex_str_to_bytes from . import segwit_addr chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def byte_to_base58(b, version): result = '' str = bytes_to_hex_str(b) str = bytes_to_hex_str(chr(version).encode('latin-1')) + str checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str))) str += checksum[:8] value = int('0x'+str,0) while value > 0: result = chars[value % 58] + result value //= 58 while (str[:2] == '00'): result = chars[0] + result str = str[2:] return result # TODO: def base58_decode def keyhash_to_p2pkh(hash, main = False): assert (len(hash) == 20) version = 76 if main else 138 return byte_to_base58(hash, version) def scripthash_to_p2sh(hash, main = False): assert (len(hash) == 20) version = 28 if main else 88 return byte_to_base58(hash, version) def key_to_p2pkh(key, main = False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main = False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def key_to_p2sh_p2wpkh(key, main = False): key = check_key(key) p2shscript = CScript([OP_0, hash160(key)]) return script_to_p2sh(p2shscript, main) def program_to_witness(version, program, main = False): if (type(program) is str): program = hex_str_to_bytes(program) assert 0 <= version <= 16 assert 2 <= len(program) <= 40 assert version > 0 or len(program) in [20, 32] return segwit_addr.encode("xpc" if main else "xpcrt", version, program) def script_to_p2wsh(script, main = False): script = check_script(script) return program_to_witness(0, sha256(script), main) def key_to_p2wpkh(key, main = False): key = check_key(key) return program_to_witness(0, hash160(key), main) def script_to_p2sh_p2wsh(script, main = False): script = check_script(script) p2shscript = CScript([OP_0, sha256(script)]) return script_to_p2sh(p2shscript, main) def check_key(key): if (type(key) is str): key = hex_str_to_bytes(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert(False) def check_script(script): if (type(script) is str): script = hex_str_to_bytes(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert(False)
test/functional/test_framework/address.py
2,853
Encode and decode BASE58, P2PKH and P2SH addresses. !/usr/bin/env python3 Copyright (c) 2016-2018 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. TODO: def base58_decode Assuming this is hex string Assuming this is hex string
339
en
0.632721
# Copyright (c) 2021 - for information on the respective copyright owner # see the NOTICE file and/or the repository https://github.com/micro-ROS/system_modes. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import launch import launch.actions from launch.substitutions import LaunchConfiguration import launch_ros.actions logger = launch.substitutions.LaunchConfiguration("log_level") def generate_launch_description(): return launch.LaunchDescription([ launch.actions.DeclareLaunchArgument( 'lookup_table', description='Path to lookup table'), launch.actions.DeclareLaunchArgument( 'log_level', default_value=["info"], description='Logging level'), launch_ros.actions.Node( package='micro_ros_diagnostic_bridge', executable='diagnostic_bridge', parameters=[{'lookup_table': LaunchConfiguration('lookup_table')}], output='screen', arguments=['--ros-args', '--log-level', logger]) ])
micro_ros_diagnostic_bridge/launch/diagnostic_bridge.launch.py
1,543
Copyright (c) 2021 - for information on the respective copyright owner see the NOTICE file and/or the repository https://github.com/micro-ROS/system_modes. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
677
en
0.845681
""" DriverFactory class Note: Change this class as you add support for: 1. SauceLabs/BrowserStack 2. More browsers like Opera """ import dotenv,os,sys,requests,json from datetime import datetime from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.chrome import service from selenium.webdriver.remote.webdriver import RemoteConnection from conf import opera_browser_conf class DriverFactory(): def __init__(self,browser='ff',browser_version=None,os_name=None): "Constructor for the Driver factory" self.browser=browser self.browser_version=browser_version self.os_name=os_name def get_web_driver(self,remote_flag,os_name,os_version,browser,browser_version): "Return the appropriate driver" if (remote_flag.lower() == 'n'): web_driver = self.run_local(os_name,os_version,browser,browser_version) else: print("DriverFactory does not know the browser: ",browser) web_driver = None return web_driver def run_local(self,os_name,os_version,browser,browser_version): "Return the local driver" local_driver = None if browser.lower() == "ff" or browser.lower() == 'firefox': local_driver = webdriver.Firefox() elif browser.lower() == "ie": local_driver = webdriver.Ie() elif browser.lower() == "chrome": local_driver = webdriver.Chrome() elif browser.lower() == "opera": opera_options = None try: opera_browser_location = opera_browser_conf.location options = webdriver.ChromeOptions() options.binary_location = opera_browser_location # path to opera executable local_driver = webdriver.Opera(options=options) except Exception as e: print("\nException when trying to get remote webdriver:%s"%sys.modules[__name__]) print("Python says:%s"%str(e)) if 'no Opera binary' in str(e): print("SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n") elif browser.lower() == "safari": local_driver = webdriver.Safari() return local_driver def get_firefox_driver(self): "Return the Firefox driver" driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile()) return driver def get_firefox_profile(self): "Return a firefox profile" return self.set_firefox_profile() def set_firefox_profile(self): "Setup firefox with the right preferences and return a profile" try: self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','downloads')) if not os.path.exists(self.download_dir): os.makedirs(self.download_dir) except Exception as e: print("Exception when trying to set directory structure") print(str(e)) profile = webdriver.firefox.firefox_profile.FirefoxProfile() set_pref = profile.set_preference set_pref('browser.download.folderList', 2) set_pref('browser.download.dir', self.download_dir) set_pref('browser.download.useDownloadDir', True) set_pref('browser.helperApps.alwaysAsk.force', False) set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf') set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream') set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf') set_pref('pdfjs.disabled',True) return profile
QA/page_objects/DriverFactory.py
4,018
Constructor for the Driver factory Return the Firefox driver Return a firefox profile Return the appropriate driver Return the local driver Setup firefox with the right preferences and return a profile DriverFactory class Note: Change this class as you add support for: 1. SauceLabs/BrowserStack 2. More browsers like Opera path to opera executable
350
en
0.768656
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """Define a class for creating the jailed context.""" import os import shutil from subprocess import run, PIPE from retry.api import retry_call from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \ JAILER_DEFAULT_CHROOT class JailerContext: """Represents jailer configuration and contains jailer helper functions. Each microvm will have a jailer configuration associated with it. """ # Keep in sync with parameters from code base. jailer_id = None exec_file = None numa_node = None uid = None gid = None chroot_base = None netns = None daemonize = None seccomp_level = None def __init__( self, jailer_id, exec_file, numa_node=0, uid=1234, gid=1234, chroot_base=JAILER_DEFAULT_CHROOT, netns=None, daemonize=True, seccomp_level=2 ): """Set up jailer fields. This plays the role of a default constructor as it populates the jailer's fields with some default values. Each field can be further adjusted by each test even with None values. """ self.jailer_id = jailer_id self.exec_file = exec_file self.numa_node = numa_node self.uid = uid self.gid = gid self.chroot_base = chroot_base self.netns = netns if netns is not None else jailer_id self.daemonize = daemonize self.seccomp_level = seccomp_level def __del__(self): """Cleanup this jailer context.""" self.cleanup() def construct_param_list(self): """Create the list of parameters we want the jailer to start with. We want to be able to vary any parameter even the required ones as we might want to add integration tests that validate the enforcement of mandatory arguments. """ jailer_param_list = [] # Pretty please, try to keep the same order as in the code base. if self.jailer_id is not None: jailer_param_list.extend(['--id', str(self.jailer_id)]) if self.exec_file is not None: jailer_param_list.extend(['--exec-file', str(self.exec_file)]) if self.numa_node is not None: jailer_param_list.extend(['--node', str(self.numa_node)]) if self.uid is not None: jailer_param_list.extend(['--uid', str(self.uid)]) if self.gid is not None: jailer_param_list.extend(['--gid', str(self.gid)]) if self.chroot_base is not None: jailer_param_list.extend( ['--chroot-base-dir', str(self.chroot_base)] ) if self.netns is not None: jailer_param_list.extend(['--netns', str(self.netns_file_path())]) if self.daemonize: jailer_param_list.append('--daemonize') if self.seccomp_level is not None: jailer_param_list.extend( ['--seccomp-level', str(self.seccomp_level)] ) return jailer_param_list def chroot_base_with_id(self): """Return the MicroVM chroot base + MicroVM ID.""" return os.path.join( self.chroot_base if self.chroot_base is not None else JAILER_DEFAULT_CHROOT, FC_BINARY_NAME, self.jailer_id ) def api_socket_path(self): """Return the MicroVM API socket path.""" return os.path.join(self.chroot_path(), API_USOCKET_NAME) def chroot_path(self): """Return the MicroVM chroot path.""" return os.path.join(self.chroot_base_with_id(), 'root') def jailed_path(self, file_path, create=False): """Create a hard link owned by uid:gid. Create a hard link to the specified file, changes the owner to uid:gid, and returns a path to the link which is valid within the jail. """ file_name = os.path.basename(file_path) global_p = os.path.join(self.chroot_path(), file_name) jailed_p = os.path.join("/", file_name) if create: cmd = 'ln -f {} {}'.format(file_path, global_p) run(cmd, shell=True, check=True) cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p) run(cmd, shell=True, check=True) return jailed_p def netns_file_path(self): """Get the host netns file path for a jailer context. Returns the path on the host to the file which represents the netns, and which must be passed to the jailer as the value of the --netns parameter, when in use. """ if self.netns: return '/var/run/netns/{}'.format(self.netns) return None def netns_cmd_prefix(self): """Return the jailer context netns file prefix.""" if self.netns: return 'ip netns exec {} '.format(self.netns) return '' def setup(self): """Set up this jailer context.""" os.makedirs( self.chroot_base if self.chroot_base is not None else JAILER_DEFAULT_CHROOT, exist_ok=True ) if self.netns: run('ip netns add {}'.format(self.netns), shell=True, check=True) def cleanup(self): """Clean up this jailer context.""" shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True) if self.netns: _ = run( 'ip netns del {}'.format(self.netns), shell=True, stderr=PIPE ) # Remove the cgroup folders associated with this microvm. # The base /sys/fs/cgroup/<controller>/firecracker folder will remain, # because we can't remove it unless we're sure there's no other running # microVM. # Firecracker is interested in these 3 cgroups for the moment. controllers = ('cpu', 'cpuset', 'pids') for controller in controllers: # Obtain the tasks from each cgroup and wait on them before # removing the microvm's associated cgroup folder. try: retry_call( f=self._kill_crgoup_tasks, fargs=[controller], exceptions=TimeoutError, max_delay=5 ) except TimeoutError: pass # As the files inside a cgroup aren't real, they can't need # to be removed, that is why 'rm -rf' and 'rmdir' fail. # We only need to remove the cgroup directories. The "-depth" # argument tells find to do a depth first recursion, so that # we remove any sub cgroups first if they are there. back_cmd = r'-depth -type d -exec rmdir {} \;' cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format( controller, FC_BINARY_NAME, self.jailer_id, back_cmd ) # We do not need to know if it succeeded or not; afterall, we are # trying to clean up resources created by the jailer itself not # the testing system. _ = run(cmd, shell=True, stderr=PIPE) def _kill_crgoup_tasks(self, controller): """Simulate wait on pid. Read the tasks file and stay there until /proc/{pid} disappears. The retry function that calls this code makes sure we do not timeout. """ tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format( controller, FC_BINARY_NAME, self.jailer_id ) # If tests do not call start on machines, the cgroups will not be # created. if not os.path.exists(tasks_file): return True cmd = 'cat {}'.format(tasks_file) tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8') tasks_split = tasks.splitlines() for task in tasks_split: if os.path.exists("/proc/{}".format(task)): raise TimeoutError return True
tests/framework/jailer.py
8,191
Represents jailer configuration and contains jailer helper functions. Each microvm will have a jailer configuration associated with it. Cleanup this jailer context. Set up jailer fields. This plays the role of a default constructor as it populates the jailer's fields with some default values. Each field can be further adjusted by each test even with None values. Simulate wait on pid. Read the tasks file and stay there until /proc/{pid} disappears. The retry function that calls this code makes sure we do not timeout. Return the MicroVM API socket path. Return the MicroVM chroot base + MicroVM ID. Return the MicroVM chroot path. Clean up this jailer context. Create the list of parameters we want the jailer to start with. We want to be able to vary any parameter even the required ones as we might want to add integration tests that validate the enforcement of mandatory arguments. Create a hard link owned by uid:gid. Create a hard link to the specified file, changes the owner to uid:gid, and returns a path to the link which is valid within the jail. Return the jailer context netns file prefix. Get the host netns file path for a jailer context. Returns the path on the host to the file which represents the netns, and which must be passed to the jailer as the value of the --netns parameter, when in use. Set up this jailer context. Define a class for creating the jailed context. Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 Keep in sync with parameters from code base. Pretty please, try to keep the same order as in the code base. Remove the cgroup folders associated with this microvm. The base /sys/fs/cgroup/<controller>/firecracker folder will remain, because we can't remove it unless we're sure there's no other running microVM. Firecracker is interested in these 3 cgroups for the moment. Obtain the tasks from each cgroup and wait on them before removing the microvm's associated cgroup folder. As the files inside a cgroup aren't real, they can't need to be removed, that is why 'rm -rf' and 'rmdir' fail. We only need to remove the cgroup directories. The "-depth" argument tells find to do a depth first recursion, so that we remove any sub cgroups first if they are there. We do not need to know if it succeeded or not; afterall, we are trying to clean up resources created by the jailer itself not the testing system. If tests do not call start on machines, the cgroups will not be created.
2,489
en
0.879739
# Copyright 2022 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 import logging import traceback from myuw.dao.calendar import api_request from myuw.views.api import ProtectedAPI from myuw.views.error import handle_exception from myuw.views import prefetch_resources from myuw.logger.timer import Timer from myuw.logger.logresp import log_api_call logger = logging.getLogger(__name__) class DepartmentalCalendar(ProtectedAPI): def get(self, request, *args, **kwargs): timer = Timer() try: prefetch_resources(request, prefetch_group=True, prefetch_enrollment=True) response = api_request(request) log_api_call(timer, request, "Get DepartmentalCalendar") return self.json_response(response) except Exception: return handle_exception(logger, timer, traceback)
myuw/views/api/calendar.py
933
Copyright 2022 UW-IT, University of Washington SPDX-License-Identifier: Apache-2.0
82
en
0.305151
from django import forms from django.forms import ModelForm from .models import Review class ReviewForm(ModelForm): required_css_class = 'required' def __init__(self, *args, **kwargs): """ user object is passed to the form in kwargs in the view the user objected is removed from kwargs and then the super class form object is instantiated. This is because our form needs the user object not its super class. """ self.user = kwargs.pop('user', None) super(ReviewForm, self).__init__(*args, **kwargs) class Meta: model = Review fields = [ 'title', 'review', 'type', 'book' ] def clean_book(self, *args, **kwargs): """ This method checks if a user has already reviewed the selected book. As per django docs exists() is an efficient way of checking this. """ book = self.cleaned_data.get("book") if Review.objects.filter(book=book, author=self.user).exists(): raise forms.ValidationError("Book already reviewed by user {}".format(self.user)) else: return book
reviews/forms.py
1,166
user object is passed to the form in kwargs in the view the user objected is removed from kwargs and then the super class form object is instantiated. This is because our form needs the user object not its super class. This method checks if a user has already reviewed the selected book. As per django docs exists() is an efficient way of checking this.
353
en
0.963058
import warnings warnings.simplefilter("ignore", category=FutureWarning) from pmaf.biome.essentials._metakit import EssentialFeatureMetabase from pmaf.biome.essentials._base import EssentialBackboneBase from pmaf.internal._constants import ( AVAIL_TAXONOMY_NOTATIONS, jRegexGG, jRegexQIIME, BIOM_TAXONOMY_NAMES, VALID_RANKS, ) from pmaf.internal._shared import ( generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks, ) from collections import defaultdict from os import path import pandas as pd import numpy as np import biom from typing import Union, Sequence, Tuple, Any, Optional from pmaf.internal._typing import AnyGenericIdentifier, Mapper class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase): """An `essential` class for handling taxonomy data.""" def __init__( self, taxonomy: Union[pd.DataFrame, pd.Series, str], taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None, **kwargs: Any ) -> None: """Constructor for :class:`.RepTaxonomy` Parameters ---------- taxonomy Data containing feature taxonomy taxonomy_columns Column(s) containing taxonomy data kwargs Passed to :func:`~pandas.read_csv` or :mod:`biome` loader. """ tmp_metadata = kwargs.pop("metadata", {}) self.__avail_ranks = [] self.__internal_taxonomy = None if isinstance(taxonomy, pd.DataFrame): if taxonomy.shape[0] > 0: if taxonomy.shape[1] > 1: if validate_ranks(list(taxonomy.columns.values), VALID_RANKS): tmp_taxonomy = taxonomy else: raise ValueError( "Provided `taxonomy` Datafame has invalid ranks." ) else: tmp_taxonomy = taxonomy.iloc[:, 0] else: raise ValueError("Provided `taxonomy` Datafame is invalid.") elif isinstance(taxonomy, pd.Series): if taxonomy.shape[0] > 0: tmp_taxonomy = taxonomy else: raise ValueError("Provided `taxonomy` Series is invalid.") elif isinstance(taxonomy, str): if path.isfile(taxonomy): file_extension = path.splitext(taxonomy)[-1].lower() if file_extension in [".csv", ".tsv"]: if taxonomy_columns is None: tmp_taxonomy = pd.read_csv( taxonomy, sep=kwargs.pop("sep", ","), header=kwargs.pop("header", "infer"), index_col=kwargs.pop("index_col", None), ) else: if isinstance(taxonomy_columns, int): tmp_taxonomy = pd.read_csv( taxonomy, sep=kwargs.pop("sep", ","), header=kwargs.pop("header", "infer"), index_col=kwargs.pop("index_col", None), ).iloc[:, taxonomy_columns] else: tmp_taxonomy = pd.read_csv( taxonomy, sep=kwargs.pop("sep", ","), header=kwargs.pop("header", "infer"), index_col=kwargs.pop("index_col", None), ).loc[:, taxonomy_columns] elif file_extension in [".biom", ".biome"]: tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs) tmp_metadata.update({"biom": new_metadata}) else: raise NotImplementedError("File type is not supported.") else: raise FileNotFoundError("Provided `taxonomy` file path is invalid.") else: raise TypeError("Provided `taxonomy` has invalid type.") self.__init_internal_taxonomy(tmp_taxonomy, **kwargs) super().__init__(metadata=tmp_metadata, **kwargs) @classmethod def from_csv( cls, filepath: str, taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None, **kwargs: Any ) -> "RepTaxonomy": """Factory method to construct a :class:`.RepTaxonomy` from CSV file. Parameters ---------- filepath Path to .csv File taxonomy_columns Column(s) containing taxonomy data kwargs Passed to the constructor. filepath: Returns ------- Instance of class:`.RepTaxonomy` """ if taxonomy_columns is None: tmp_taxonomy = pd.read_csv(filepath, **kwargs) else: if isinstance(taxonomy_columns, int): tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns] else: tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns] tmp_metadata = kwargs.pop("metadata", {}) tmp_metadata.update({"filepath": path.abspath(filepath)}) return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs) @classmethod def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy": """Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom` file. Parameters ---------- filepath :mod:`biom` file path. kwargs Passed to the constructor. Returns ------- Instance of class:`.RepTaxonomy` """ taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs) tmp_metadata = kwargs.pop("metadata", {}) tmp_metadata.update({"biom": new_metadata}) return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs) @classmethod def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]: """Actual private method to process :mod:`biom` file. Parameters ---------- filepath :mod:`biom` file path. kwargs Compatibility """ biom_file = biom.load_table(filepath) if biom_file.metadata(axis="observation") is not None: obs_data = biom_file.metadata_to_dataframe("observation") col_names = list(obs_data.columns.values) col_names_low = [col.lower() for col in col_names] avail_col_names = [ colname for tax_name in BIOM_TAXONOMY_NAMES for colname in col_names_low if colname[::-1].find(tax_name[::-1]) < 3 and colname[::-1].find(tax_name[::-1]) > -1 ] metadata_cols = [ col for col in col_names if col.lower() not in avail_col_names ] if len(avail_col_names) == 1: tmp_col_index = col_names_low.index(avail_col_names[0]) taxonomy_frame = obs_data[col_names[tmp_col_index]] else: taxonomy_frame = obs_data tmp_metadata = obs_data.loc[:, metadata_cols].to_dict() return taxonomy_frame, tmp_metadata else: raise ValueError("Biom file does not contain observation metadata.") def _remove_features_by_id( self, ids: AnyGenericIdentifier, **kwargs: Any ) -> Optional[AnyGenericIdentifier]: """Remove features by features ids and ratify action. Parameters ---------- ids Feature identifiers kwargs Compatibility """ tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype) if len(tmp_ids) > 0: self.__internal_taxonomy.drop(tmp_ids, inplace=True) return self._ratify_action("_remove_features_by_id", ids, **kwargs) def _merge_features_by_map( self, map_dict: Mapper, done: bool = False, **kwargs: Any ) -> Optional[Mapper]: """Merge features and ratify action. Parameters ---------- map_dict Map to use for merging done Whether merging was completed or not. Compatibility. kwargs Compatibility """ if not done: raise NotImplementedError if map_dict: return self._ratify_action( "_merge_features_by_map", map_dict, _annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(), **kwargs ) def drop_feature_by_id( self, ids: AnyGenericIdentifier, **kwargs: Any ) -> Optional[AnyGenericIdentifier]: """Remove features by feature `ids`. Parameters ---------- ids Feature identifiers kwargs Compatibility """ target_ids = np.asarray(ids) if self.xrid.isin(target_ids).sum() == len(target_ids): return self._remove_features_by_id(target_ids, **kwargs) else: raise ValueError("Invalid feature ids are provided.") def get_taxonomy_by_id( self, ids: Optional[AnyGenericIdentifier] = None ) -> pd.DataFrame: """Get taxonomy :class:`~pandas.DataFrame` by feature `ids`. Parameters ---------- ids Either feature indices or None for all. Returns ------- class:`pandas.DataFrame` with taxonomy data """ if ids is None: target_ids = self.xrid else: target_ids = np.asarray(ids) if self.xrid.isin(target_ids).sum() <= len(target_ids): return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks] else: raise ValueError("Invalid feature ids are provided.") def get_lineage_by_id( self, ids: Optional[AnyGenericIdentifier] = None, missing_rank: bool = False, desired_ranks: Union[bool, Sequence[str]] = False, drop_ranks: Union[bool, Sequence[str]] = False, **kwargs: Any ) -> pd.Series: """Get taxonomy lineages by feature `ids`. Parameters ---------- ids Either feature indices or None for all. missing_rank If True will generate prefix like `s__` or `d__` desired_ranks List of desired ranks to generate. If False then will generate all main ranks drop_ranks List of ranks to drop from desired ranks. This parameter only useful if `missing_rank` is True kwargs Compatibility. Returns ------- class:`pandas.Series` with consensus lineages and corresponding IDs """ if ids is None: target_ids = self.xrid else: target_ids = np.asarray(ids) tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks total_valid_rids = self.xrid.isin(target_ids).sum() if total_valid_rids == len(target_ids): return generate_lineages_from_taxa( self.__internal_taxonomy.loc[target_ids], missing_rank, tmp_desired_ranks, drop_ranks, ) elif total_valid_rids < len(target_ids): return generate_lineages_from_taxa( self.__internal_taxonomy.loc[np.unique(target_ids)], missing_rank, tmp_desired_ranks, drop_ranks, ) else: raise ValueError("Invalid feature ids are provided.") def find_features_by_pattern( self, pattern_str: str, case_sensitive: bool = False, regex: bool = False ) -> np.ndarray: """Searches for features with taxa that matches `pattern_str` Parameters ---------- pattern_str Pattern to search for case_sensitive Case sensitive mode regex Use regular expressions Returns ------- class:`~numpy.ndarray` with indices """ return self.__internal_taxonomy[ self.__internal_taxonomy.loc[:, "lineage"].str.contains( pattern_str, case=case_sensitive, regex=regex ) ].index.values def drop_features_without_taxa( self, **kwargs: Any ) -> Optional[AnyGenericIdentifier]: """Remove features that do not contain taxonomy. Parameters ---------- kwargs Compatibility """ ids_to_drop = self.find_features_without_taxa() return self._remove_features_by_id(ids_to_drop, **kwargs) def drop_features_without_ranks( self, ranks: Sequence[str], any: bool = False, **kwargs: Any ) -> Optional[AnyGenericIdentifier]: # Done """Remove features that do not contain `ranks` Parameters ---------- ranks Ranks to look for any If True removes feature with single occurrence of missing rank. If False all `ranks` must be missing. kwargs Compatibility """ target_ranks = np.asarray(ranks) if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len( target_ranks ): no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna() no_rank_mask_adjusted = ( no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1) ) ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index return self._remove_features_by_id(ids_to_drop, **kwargs) else: raise ValueError("Invalid ranks are provided.") def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]: """Merge features with duplicated taxonomy. Parameters ---------- kwargs Compatibility """ ret = {} groupby = self.__internal_taxonomy.groupby("lineage") if any([len(group) > 1 for group in groupby.groups.values()]): tmp_feature_lineage = [] tmp_groups = [] group_indices = list(range(len(groupby.groups))) for lineage, feature_ids in groupby.groups.items(): tmp_feature_lineage.append(lineage) tmp_groups.append(list(feature_ids)) self.__init_internal_taxonomy( pd.Series(data=tmp_feature_lineage, index=group_indices) ) ret = dict(zip(group_indices, tmp_groups)) return self._merge_features_by_map(ret, True, **kwargs) def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]: """Merge features by taxonomic rank/level. Parameters ---------- level Taxonomic rank/level to use for merging. kwargs Compatibility """ ret = {} if not isinstance(level, str): raise TypeError("`rank` must have str type.") if level in self.__avail_ranks: target_ranks = get_rank_upto(self.avail_ranks, level, True) if target_ranks: tmp_lineages = generate_lineages_from_taxa( self.__internal_taxonomy, False, target_ranks, False ) groups = tmp_lineages.groupby(tmp_lineages) if len(groups.groups) > 1: tmp_feature_lineage = [] tmp_groups = [] group_indices = list(range(len(groups.groups))) for lineage, feature_ids in groups.groups.items(): tmp_feature_lineage.append(lineage) tmp_groups.append(list(feature_ids)) self.__init_internal_taxonomy( pd.Series(data=tmp_feature_lineage, index=group_indices) ) ret = dict(zip(group_indices, tmp_groups)) else: raise ValueError("Invalid rank are provided.") return self._merge_features_by_map(ret, True, **kwargs) def find_features_without_taxa(self) -> np.ndarray: """Find features without taxa. Returns ------- class:`~numpy.ndarray` with feature indices. """ return self.__internal_taxonomy.loc[ self.__internal_taxonomy.loc[:, VALID_RANKS].agg( lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1 ) < 1 ].index.values def get_subset( self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any ) -> "RepTaxonomy": """Get subset of the :class:`.RepTaxonomy`. Parameters ---------- rids Feature identifiers. args Compatibility kwargs Compatibility Returns ------- class:`.RepTaxonomy` """ if rids is None: target_rids = self.xrid else: target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype) if not self.xrid.isin(target_rids).sum() == len(target_rids): raise ValueError("Invalid feature ids are provided.") return type(self)( taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"], metadata=self.metadata, name=self.name, ) def _export( self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any ) -> Tuple[pd.Series, dict]: """Creates taxonomy for export. Parameters ---------- taxlike Generate taxonomy in format(currently only `lineage` is supported.) ascending Sorting kwargs Compatibility """ if taxlike == "lineage": return ( self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending), kwargs, ) else: raise NotImplemented def export( self, output_fp: str, *args, _add_ext: bool = False, sep: str = ",", **kwargs: Any ) -> None: """Exports the taxonomy into the specified file. Parameters ---------- output_fp Export filepath args Compatibility _add_ext Add file extension or not. sep Delimiter kwargs Compatibility """ tmp_export, rkwarg = self._export(*args, **kwargs) if _add_ext: tmp_export.to_csv("{}.csv".format(output_fp), sep=sep) else: tmp_export.to_csv(output_fp, sep=sep) def copy(self) -> "RepTaxonomy": """Copy of the instance.""" return type(self)( taxonomy=self.__internal_taxonomy.loc[:, "lineage"], metadata=self.metadata, name=self.name, ) def __fix_taxon_names(self) -> None: """Fix invalid taxon names.""" def taxon_fixer(taxon): if taxon is not None and pd.notna(taxon): tmp_taxon_trimmed = taxon.lower().strip() if len(tmp_taxon_trimmed) > 0: if tmp_taxon_trimmed[0] == "[": tmp_taxon_trimmed = tmp_taxon_trimmed[1:] if tmp_taxon_trimmed[-1] == "]": tmp_taxon_trimmed = tmp_taxon_trimmed[:-1] return tmp_taxon_trimmed.capitalize() else: return None else: return None self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[ :, VALID_RANKS ].applymap(taxon_fixer) def __reconstruct_internal_lineages(self) -> None: """Reconstruct the internal lineages.""" self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa( self.__internal_taxonomy, True, self.__avail_ranks, False ) def __init_internal_taxonomy( self, taxonomy_data: Union[pd.Series, pd.DataFrame], taxonomy_notation: Optional[str] = "greengenes", order_ranks: Optional[Sequence[str]] = None, **kwargs: Any ) -> None: """Main method to initialize taxonomy. Parameters ---------- taxonomy_data Incoming parsed taxonomy data taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. kwargs Compatibility """ if isinstance(taxonomy_data, pd.Series): new_taxonomy = self.__init_taxonomy_from_lineages( taxonomy_data, taxonomy_notation, order_ranks ) elif isinstance(taxonomy_data, pd.DataFrame): if taxonomy_data.shape[1] == 1: taxonomy_data_series = pd.Series( data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index ) new_taxonomy = self.__init_taxonomy_from_lineages( taxonomy_data_series, taxonomy_notation, order_ranks ) else: new_taxonomy = self.__init_taxonomy_from_frame( taxonomy_data, taxonomy_notation, order_ranks ) else: raise RuntimeError( "`taxonomy_data` must be either pd.Series or pd.Dataframe" ) if new_taxonomy is None: raise ValueError("Provided taxonomy is invalid.") # Assign newly constructed taxonomy to the self.__internal_taxonomy self.__internal_taxonomy = new_taxonomy self.__fix_taxon_names() # Fix incorrect taxa tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns] self.__avail_ranks = [ rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any() ] # Reconstruct internal lineages for default greengenes notation self.__reconstruct_internal_lineages() self._init_state = True def __init_taxonomy_from_lineages( self, taxonomy_series: pd.Series, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]], ) -> pd.DataFrame: # Done """Main method that produces taxonomy dataframe from lineages. Parameters ---------- taxonomy_series :class:`pandas.Series` with taxonomy lineages taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. """ # Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS: notation = taxonomy_notation else: # Get first lineage _sample for notation testing assuming the rest have the the same notations sample_taxon = taxonomy_series.iloc[0] # Identify notation of the lineage string notation = indentify_taxon_notation(sample_taxon) if order_ranks is not None: if all([rank in VALID_RANKS for rank in order_ranks]): target_order_ranks = order_ranks else: raise NotImplementedError else: target_order_ranks = VALID_RANKS if notation == "greengenes": lineages = taxonomy_series.reset_index().values.tolist() ordered_taxa_list = [] ordered_indices_list = [elem[0] for elem in lineages] for lineage in lineages: tmp_lineage = jRegexGG.findall(lineage[1]) tmp_taxa_dict = { elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS } for rank in VALID_RANKS: if rank not in tmp_taxa_dict.keys(): tmp_taxa_dict.update({rank: None}) tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS] ordered_taxa_list.append([None] + tmp_taxa_ordered) taxonomy = pd.DataFrame( index=ordered_indices_list, data=ordered_taxa_list, columns=["lineage"] + VALID_RANKS, ) return taxonomy elif notation == "qiime": lineages = taxonomy_series.reset_index().values.tolist() tmp_taxa_dict_list = [] tmp_ranks = set() for lineage in lineages: tmp_lineage = jRegexQIIME.findall(lineage[1]) tmp_lineage.sort(key=lambda x: x[0]) tmp_taxa_dict = defaultdict(None) tmp_taxa_dict[None] = lineage[0] for rank, taxon in tmp_lineage: tmp_taxa_dict[rank] = taxon tmp_ranks.add(rank) tmp_taxa_dict_list.append(dict(tmp_taxa_dict)) tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list) tmp_taxonomy_df.set_index(None, inplace=True) tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))] tmp_taxonomy_df.columns = [ rank for rank in target_order_ranks[::-1][: len(tmp_ranks)] ][::-1] for rank in VALID_RANKS: if rank not in tmp_taxonomy_df.columns: tmp_taxonomy_df.loc[:, rank] = None return tmp_taxonomy_df elif notation == "silva": lineages = taxonomy_series.reset_index().values.tolist() tmp_taxa_dict_list = [] tmp_ranks = set() for lineage in lineages: tmp_lineage = lineage[1].split(";") tmp_taxa_dict = defaultdict(None) tmp_taxa_dict[None] = lineage[0] for rank_i, taxon in enumerate(tmp_lineage): rank = target_order_ranks[rank_i] tmp_taxa_dict[rank] = taxon tmp_ranks.add(rank) tmp_taxa_dict_list.append(dict(tmp_taxa_dict)) tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list) tmp_taxonomy_df.set_index(None, inplace=True) tmp_rank_ordered = [ rank for rank in target_order_ranks if rank in VALID_RANKS ] tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered] tmp_taxonomy_df.columns = [ rank for rank in target_order_ranks[::-1][: len(tmp_ranks)] ][::-1] for rank in VALID_RANKS: if rank not in tmp_taxonomy_df.columns: tmp_taxonomy_df.loc[:, rank] = None return tmp_taxonomy_df else: raise NotImplementedError def __init_taxonomy_from_frame( self, taxonomy_dataframe: pd.DataFrame, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]], ) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series """Main method that produces taxonomy sheet from dataframe. Parameters ---------- taxonomy_dataframe :class:`~pandas.DataFrame` with taxa split by ranks. taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. Returns ------- :class:`~pandas.DataFrame` """ valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS) if valid_ranks is not None: if len(valid_ranks) > 0: return pd.concat( [ taxonomy_dataframe, pd.DataFrame( data="", index=taxonomy_dataframe.index, columns=[ rank for rank in VALID_RANKS if rank not in valid_ranks ], ), ], axis=1, ) else: taxonomy_series = taxonomy_dataframe.apply( lambda taxa: ";".join(taxa.values.tolist()), axis=1 ) return self.__init_taxonomy_from_lineages( taxonomy_series, taxonomy_notation, order_ranks ) else: valid_ranks = cols2ranks(taxonomy_dataframe.columns) taxonomy_dataframe.columns = valid_ranks taxonomy_series = taxonomy_dataframe.apply( lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1 ) return self.__init_taxonomy_from_lineages( taxonomy_series, taxonomy_notation, order_ranks ) @property def avail_ranks(self) -> Sequence[str]: """List of available taxonomic ranks.""" return self.__avail_ranks @property def duplicated(self) -> pd.Index: """List of duplicated feature indices.""" return self.__internal_taxonomy.index[ self.__internal_taxonomy["lineage"].duplicated(keep=False) ] @property def data(self) -> pd.DataFrame: """Actual data representation as pd.DataFrame.""" return self.__internal_taxonomy @property def xrid(self) -> pd.Index: """Feature indices as pd.Index.""" return self.__internal_taxonomy.index
pmaf/biome/essentials/_taxonomy.py
30,666
An `essential` class for handling taxonomy data. Fix invalid taxon names. Constructor for :class:`.RepTaxonomy` Parameters ---------- taxonomy Data containing feature taxonomy taxonomy_columns Column(s) containing taxonomy data kwargs Passed to :func:`~pandas.read_csv` or :mod:`biome` loader. Main method to initialize taxonomy. Parameters ---------- taxonomy_data Incoming parsed taxonomy data taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. kwargs Compatibility Main method that produces taxonomy sheet from dataframe. Parameters ---------- taxonomy_dataframe :class:`~pandas.DataFrame` with taxa split by ranks. taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. Returns ------- :class:`~pandas.DataFrame` Main method that produces taxonomy dataframe from lineages. Parameters ---------- taxonomy_series :class:`pandas.Series` with taxonomy lineages taxonomy_notation Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS` order_ranks List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. Actual private method to process :mod:`biom` file. Parameters ---------- filepath :mod:`biom` file path. kwargs Compatibility Reconstruct the internal lineages. Creates taxonomy for export. Parameters ---------- taxlike Generate taxonomy in format(currently only `lineage` is supported.) ascending Sorting kwargs Compatibility Merge features and ratify action. Parameters ---------- map_dict Map to use for merging done Whether merging was completed or not. Compatibility. kwargs Compatibility Remove features by features ids and ratify action. Parameters ---------- ids Feature identifiers kwargs Compatibility List of available taxonomic ranks. Copy of the instance. Actual data representation as pd.DataFrame. Remove features by feature `ids`. Parameters ---------- ids Feature identifiers kwargs Compatibility Remove features that do not contain `ranks` Parameters ---------- ranks Ranks to look for any If True removes feature with single occurrence of missing rank. If False all `ranks` must be missing. kwargs Compatibility Remove features that do not contain taxonomy. Parameters ---------- kwargs Compatibility List of duplicated feature indices. Exports the taxonomy into the specified file. Parameters ---------- output_fp Export filepath args Compatibility _add_ext Add file extension or not. sep Delimiter kwargs Compatibility Searches for features with taxa that matches `pattern_str` Parameters ---------- pattern_str Pattern to search for case_sensitive Case sensitive mode regex Use regular expressions Returns ------- class:`~numpy.ndarray` with indices Find features without taxa. Returns ------- class:`~numpy.ndarray` with feature indices. Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom` file. Parameters ---------- filepath :mod:`biom` file path. kwargs Passed to the constructor. Returns ------- Instance of class:`.RepTaxonomy` Factory method to construct a :class:`.RepTaxonomy` from CSV file. Parameters ---------- filepath Path to .csv File taxonomy_columns Column(s) containing taxonomy data kwargs Passed to the constructor. filepath: Returns ------- Instance of class:`.RepTaxonomy` Get taxonomy lineages by feature `ids`. Parameters ---------- ids Either feature indices or None for all. missing_rank If True will generate prefix like `s__` or `d__` desired_ranks List of desired ranks to generate. If False then will generate all main ranks drop_ranks List of ranks to drop from desired ranks. This parameter only useful if `missing_rank` is True kwargs Compatibility. Returns ------- class:`pandas.Series` with consensus lineages and corresponding IDs Get subset of the :class:`.RepTaxonomy`. Parameters ---------- rids Feature identifiers. args Compatibility kwargs Compatibility Returns ------- class:`.RepTaxonomy` Get taxonomy :class:`~pandas.DataFrame` by feature `ids`. Parameters ---------- ids Either feature indices or None for all. Returns ------- class:`pandas.DataFrame` with taxonomy data Merge features with duplicated taxonomy. Parameters ---------- kwargs Compatibility Merge features by taxonomic rank/level. Parameters ---------- level Taxonomic rank/level to use for merging. kwargs Compatibility Feature indices as pd.Index. Done Assign newly constructed taxonomy to the self.__internal_taxonomy Fix incorrect taxa Reconstruct internal lineages for default greengenes notation Done Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation Get first lineage _sample for notation testing assuming the rest have the the same notations Identify notation of the lineage string Done For now only pass to _init_taxonomy_from_series
5,444
en
0.493231
import pickle import numpy as np # pickle_file = 'experiment_pickle_12_0.15_5_0.075.p' pickle_file = 'experiment_pickle_12_0.1_5_0.075.p' content = pickle.load(open(pickle_file)) familys = content.keys() for family in familys: collected = [] measurements = content[family] for measurement in measurements: collected.append(np.mean(measurement[1])) print family, ':', round(np.median(collected), 3), '+-', round(np.percentile(collected, 75) - np.percentile(collected, 25), 3)
src/RQ4_exp/run_pickle.py
503
pickle_file = 'experiment_pickle_12_0.15_5_0.075.p'
51
en
0.858046
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd from numpy.testing import assert_equal, assert_almost_equal from qf_lib.backtesting.events.time_event.regular_time_event.market_close_event import MarketCloseEvent from qf_lib.backtesting.events.time_event.regular_time_event.market_open_event import MarketOpenEvent from qf_lib.common.enums.frequency import Frequency from qf_lib.common.enums.price_field import PriceField from qf_lib.common.utils.dateutils.date_format import DateFormat from qf_lib.common.utils.dateutils.string_to_date import str_to_date from qf_lib.containers.qf_data_array import QFDataArray from qf_lib_tests.integration_tests.backtesting.alpha_model_strategy_testers.test_alpha_model_strategy_for_stop_losses import \ TestAlphaModelStrategy class TestAlphaModelIntradayStrategy(TestAlphaModelStrategy): data_start_date = str_to_date("2014-12-25 00:00:00.00", DateFormat.FULL_ISO) data_end_date = str_to_date("2015-02-28 23:59:59.00", DateFormat.FULL_ISO) end_date = str_to_date("2015-02-28 13:30:00.00", DateFormat.FULL_ISO) frequency = Frequency.MIN_1 def test_stop_losses(self): expected_transactions_quantities = \ [8130, -127, 1, -8004, 7454, -58, -7396, 6900, -6900, 6390, -44, -6346, 5718, -36] result_transactions_quantities = [t.quantity for t in self.transactions] assert_equal(expected_transactions_quantities, result_transactions_quantities) expected_transactions_prices = [125, 130, 135, 235.6, 255, 260, 259.35, 280, 264.1, 285, 290, 282, 315, 320] result_transactions_prices = [t.price for t in self.transactions] assert_almost_equal(expected_transactions_prices, result_transactions_prices) expected_portfolio_values = [1024390, 1064659, 1064659, 1064659, 1104677, 1144697, 1184717, 1224737, 1264757, 1264757, 1264757, 1304777, 1344797, 1384817, 1424837, 1464857, 1464857, 1464857, 1504877, 1544897, 1584917, 1624937, 1664957, 1664957, 1664957, 1704977, 1744997, 1785017, 1825037, 1865057, 1865057, 1865057, 1905077, 1945097, 1985117, 1885867.4, 1908229.4, 1908229.4, 1908229.4, 1945325.4, 1982305.4, 2019285.4, 1918330, 1808620, 1808620, 1808620, 1827790, 1859608, 1891338, 1923068, 1954798, 1954798, 1954798, 1789802, 1806956, 1835438, 1863848, 1892258, 1892258] assert_almost_equal(expected_portfolio_values, list(self.portfolio.portfolio_eod_series())) def _make_mock_data_array(self, tickers, fields): all_dates_market_open = pd.date_range(start=self.data_start_date + MarketOpenEvent.trigger_time(), end=self.data_end_date + MarketOpenEvent.trigger_time(), freq="B") all_dates_market_close = pd.date_range(start=self.data_start_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), end=self.data_end_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), freq="B") num_of_dates = len(all_dates_market_open) num_of_tickers = len(tickers) num_of_fields = len(fields) start_value = 100.0 values = np.arange(start_value, num_of_dates * num_of_tickers * num_of_fields + start_value) reshaped_values = np.reshape(values, (num_of_dates, num_of_tickers, num_of_fields)) mocked_result_market_open = QFDataArray.create(all_dates_market_open, tickers, fields, data=reshaped_values) mocked_result_market_close = QFDataArray.create(all_dates_market_close, tickers, fields, data=reshaped_values) mocked_result_market_close.loc[:, :, PriceField.Low] -= 5.0 mocked_result_market_close.loc[:, :, PriceField.High] += 5.0 all_dates = all_dates_market_open.union(all_dates_market_close) mocked_result = QFDataArray.create(all_dates, tickers, fields) mocked_result.loc[all_dates_market_open, :, :] = mocked_result_market_open.loc[:, :, :] mocked_result.loc[all_dates_market_close, :, :] = mocked_result_market_close.loc[:, :, :] self._add_test_cases(mocked_result, tickers) return mocked_result def _add_test_cases(self, mocked_result, tickers): # single low price breaking the stop level mocked_result.loc[ str_to_date('2015-02-05 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0 # two consecutive low prices breaking the stop level mocked_result.loc[ str_to_date('2015-02-12 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0 mocked_result.loc[ str_to_date('2015-02-13 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0 # single open price breaking the stop level mocked_result.loc[ str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 25.0 mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Open] = \ mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low]
qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py
5,979
Copyright 2016-present CERN – European Organization for Nuclear Research Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. single low price breaking the stop level two consecutive low prices breaking the stop level single open price breaking the stop level
764
en
0.860023
import json import os from djoser.conf import settings as djoser_settings from djoser.compat import get_user_email from django.utils.timezone import now from django.http import HttpResponse from rest_framework import status from rest_framework.decorators import api_view, authentication_classes, permission_classes, action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from django.db import transaction from django.conf import settings from .authentication import WebpageTokenAuth from .models import AHJUserMaintains, AHJ, User, APIToken, Contact, PreferredContactMethod from .permissions import IsSuperuser from .serializers import UserSerializer from djoser.views import UserViewSet from .utils import get_enum_value_row, filter_dict_keys, ENUM_FIELDS @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated]) class ConfirmPasswordReset(UserViewSet): @action(["post"], detail=False) def reset_password_confirm(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.user.set_password(serializer.data["new_password"]) if hasattr(serializer.user, "last_login"): serializer.user.last_login = now() serializer.user.is_active = True # The purpose of overwriting this endpoint is to set users as active if performing password reset confirm. serializer.user.save() # The user had to access their email account to perform a password reset. if djoser_settings.PASSWORD_CHANGED_EMAIL_CONFIRMATION: context = {"user": serializer.user} to = [get_user_email(serializer.user)] djoser_settings.EMAIL.password_changed_confirmation(self.request, context).send(to) return Response(status=status.HTTP_204_NO_CONTENT) @api_view(['GET']) @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated]) def get_active_user(request): """ Endpoint for getting the active user through the authtoken """ return Response(UserSerializer(request.user, context={'is_public_view': False}).data, status=status.HTTP_200_OK) @api_view(['GET']) def get_single_user(request, username): """ Function view for getting a single user with the specified Username = username """ context = {'is_public_view': True} if request.auth is not None and request.user.Username == username: context['is_public_view'] = False try: user = User.objects.get(Username=username) return Response(UserSerializer(user, context=context).data, status=status.HTTP_200_OK) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) @api_view(['POST']) @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated]) def user_update(request): """ Update the user profile associated with the requesting user. """ changeable_user_fields = {'Username', 'PersonalBio', 'URL', 'CompanyAffiliation'} changeable_contact_fields = {'FirstName', 'LastName', 'URL', 'WorkPhone', 'PreferredContactMethod', 'Title'} user_data = filter_dict_keys(request.data, changeable_user_fields) contact_data = filter_dict_keys(request.data, changeable_contact_fields) for field in ENUM_FIELDS.intersection(contact_data.keys()): contact_data[field] = get_enum_value_row(field, contact_data[field]) user = request.user User.objects.filter(UserID=user.UserID).update(**user_data) Contact.objects.filter(ContactID=user.ContactID.ContactID).update(**contact_data) return Response('Success', status=status.HTTP_200_OK) @api_view(['GET']) @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated, IsSuperuser]) def create_api_token(request): try: user = request.user with transaction.atomic(): APIToken.objects.filter(user=user).delete() api_token = APIToken.objects.create(user=user) return Response({'auth_token': api_token.key}, status=status.HTTP_201_CREATED) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) @api_view(['POST']) @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated, IsSuperuser]) def set_ahj_maintainer(request): """ View to assign a user as a data maintainer of an AHJ Expects a Username and a the primary key of an AHJ (AHJPK) """ try: username = request.data['Username'] ahjpk = request.data['AHJPK'] user = User.objects.get(Username=username) ahj = AHJ.objects.get(AHJPK=ahjpk) maintainer_record = AHJUserMaintains.objects.filter(AHJPK=ahj, UserID=user) if maintainer_record.exists(): maintainer_record.update(MaintainerStatus=True) else: AHJUserMaintains.objects.create(UserID=user, AHJPK=ahj, MaintainerStatus=True) return Response(UserSerializer(user).data, status=status.HTTP_200_OK) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) @api_view(['POST']) @authentication_classes([WebpageTokenAuth]) @permission_classes([IsAuthenticated, IsSuperuser]) def remove_ahj_maintainer(request): """ View to revoke a user as a data maintainer of an AHJ Expects a user's webpage token and a the primary key of an AHJ (AHJPK) """ try: username = request.data['Username'] ahjpk = request.data['AHJPK'] user = User.objects.get(Username=username) ahj = AHJ.objects.get(AHJPK=ahjpk) AHJUserMaintains.objects.filter(AHJPK=ahj, UserID=user).update(MaintainerStatus=False) return Response(UserSerializer(user).data, status=status.HTTP_200_OK) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
server/ahj_app/views_users.py
5,932
Endpoint for getting the active user through the authtoken Function view for getting a single user with the specified Username = username View to revoke a user as a data maintainer of an AHJ Expects a user's webpage token and a the primary key of an AHJ (AHJPK) View to assign a user as a data maintainer of an AHJ Expects a Username and a the primary key of an AHJ (AHJPK) Update the user profile associated with the requesting user. The purpose of overwriting this endpoint is to set users as active if performing password reset confirm. The user had to access their email account to perform a password reset.
613
en
0.916582
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetShareResult', 'AwaitableGetShareResult', 'get_share', ] @pulumi.output_type class GetShareResult: """ Represents a share on the Data Box Edge/Gateway device. """ def __init__(__self__, access_protocol=None, azure_container_info=None, client_access_rights=None, data_policy=None, description=None, id=None, monitoring_status=None, name=None, refresh_details=None, share_mappings=None, share_status=None, system_data=None, type=None, user_access_rights=None): if access_protocol and not isinstance(access_protocol, str): raise TypeError("Expected argument 'access_protocol' to be a str") pulumi.set(__self__, "access_protocol", access_protocol) if azure_container_info and not isinstance(azure_container_info, dict): raise TypeError("Expected argument 'azure_container_info' to be a dict") pulumi.set(__self__, "azure_container_info", azure_container_info) if client_access_rights and not isinstance(client_access_rights, list): raise TypeError("Expected argument 'client_access_rights' to be a list") pulumi.set(__self__, "client_access_rights", client_access_rights) if data_policy and not isinstance(data_policy, str): raise TypeError("Expected argument 'data_policy' to be a str") pulumi.set(__self__, "data_policy", data_policy) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if monitoring_status and not isinstance(monitoring_status, str): raise TypeError("Expected argument 'monitoring_status' to be a str") pulumi.set(__self__, "monitoring_status", monitoring_status) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if refresh_details and not isinstance(refresh_details, dict): raise TypeError("Expected argument 'refresh_details' to be a dict") pulumi.set(__self__, "refresh_details", refresh_details) if share_mappings and not isinstance(share_mappings, list): raise TypeError("Expected argument 'share_mappings' to be a list") pulumi.set(__self__, "share_mappings", share_mappings) if share_status and not isinstance(share_status, str): raise TypeError("Expected argument 'share_status' to be a str") pulumi.set(__self__, "share_status", share_status) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if user_access_rights and not isinstance(user_access_rights, list): raise TypeError("Expected argument 'user_access_rights' to be a list") pulumi.set(__self__, "user_access_rights", user_access_rights) @property @pulumi.getter(name="accessProtocol") def access_protocol(self) -> str: """ Access protocol to be used by the share. """ return pulumi.get(self, "access_protocol") @property @pulumi.getter(name="azureContainerInfo") def azure_container_info(self) -> Optional['outputs.AzureContainerInfoResponse']: """ Azure container mapping for the share. """ return pulumi.get(self, "azure_container_info") @property @pulumi.getter(name="clientAccessRights") def client_access_rights(self) -> Optional[Sequence['outputs.ClientAccessRightResponse']]: """ List of IP addresses and corresponding access rights on the share(required for NFS protocol). """ return pulumi.get(self, "client_access_rights") @property @pulumi.getter(name="dataPolicy") def data_policy(self) -> Optional[str]: """ Data policy of the share. """ return pulumi.get(self, "data_policy") @property @pulumi.getter def description(self) -> Optional[str]: """ Description for the share. """ return pulumi.get(self, "description") @property @pulumi.getter def id(self) -> str: """ The path ID that uniquely identifies the object. """ return pulumi.get(self, "id") @property @pulumi.getter(name="monitoringStatus") def monitoring_status(self) -> str: """ Current monitoring status of the share. """ return pulumi.get(self, "monitoring_status") @property @pulumi.getter def name(self) -> str: """ The object name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="refreshDetails") def refresh_details(self) -> Optional['outputs.RefreshDetailsResponse']: """ Details of the refresh job on this share. """ return pulumi.get(self, "refresh_details") @property @pulumi.getter(name="shareMappings") def share_mappings(self) -> Sequence['outputs.MountPointMapResponse']: """ Share mount point to the role. """ return pulumi.get(self, "share_mappings") @property @pulumi.getter(name="shareStatus") def share_status(self) -> str: """ Current status of the share. """ return pulumi.get(self, "share_status") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Share on ASE device """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ The hierarchical type of the object. """ return pulumi.get(self, "type") @property @pulumi.getter(name="userAccessRights") def user_access_rights(self) -> Optional[Sequence['outputs.UserAccessRightResponse']]: """ Mapping of users and corresponding access rights on the share (required for SMB protocol). """ return pulumi.get(self, "user_access_rights") class AwaitableGetShareResult(GetShareResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetShareResult( access_protocol=self.access_protocol, azure_container_info=self.azure_container_info, client_access_rights=self.client_access_rights, data_policy=self.data_policy, description=self.description, id=self.id, monitoring_status=self.monitoring_status, name=self.name, refresh_details=self.refresh_details, share_mappings=self.share_mappings, share_status=self.share_status, system_data=self.system_data, type=self.type, user_access_rights=self.user_access_rights) def get_share(device_name: Optional[str] = None, name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult: """ Represents a share on the Data Box Edge/Gateway device. :param str device_name: The device name. :param str name: The share name. :param str resource_group_name: The resource group name. """ __args__ = dict() __args__['deviceName'] = device_name __args__['name'] = name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20201201:getShare', __args__, opts=opts, typ=GetShareResult).value return AwaitableGetShareResult( access_protocol=__ret__.access_protocol, azure_container_info=__ret__.azure_container_info, client_access_rights=__ret__.client_access_rights, data_policy=__ret__.data_policy, description=__ret__.description, id=__ret__.id, monitoring_status=__ret__.monitoring_status, name=__ret__.name, refresh_details=__ret__.refresh_details, share_mappings=__ret__.share_mappings, share_status=__ret__.share_status, system_data=__ret__.system_data, type=__ret__.type, user_access_rights=__ret__.user_access_rights)
sdk/python/pulumi_azure_native/databoxedge/v20201201/get_share.py
9,175
Represents a share on the Data Box Edge/Gateway device. Access protocol to be used by the share. Azure container mapping for the share. List of IP addresses and corresponding access rights on the share(required for NFS protocol). Data policy of the share. Description for the share. Represents a share on the Data Box Edge/Gateway device. :param str device_name: The device name. :param str name: The share name. :param str resource_group_name: The resource group name. The path ID that uniquely identifies the object. Current monitoring status of the share. The object name. Details of the refresh job on this share. Share mount point to the role. Current status of the share. Share on ASE device The hierarchical type of the object. Mapping of users and corresponding access rights on the share (required for SMB protocol). coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test
1,029
en
0.810633
#!/usr/bin/env python import os from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) with open( os.path.join(here, "requirements.txt"), encoding="utf-8" ) as requirements_file: requirements = requirements_file.read().splitlines() with open( os.path.join(here, "requirements_dev.txt"), encoding="utf-8" ) as requirements_dev_file: requirements_dev = requirements_dev_file.read().splitlines() # split the developer requirements into setup and test requirements if not requirements_dev.count("") == 1 or requirements_dev.index("") == 0: raise SyntaxError( "requirements_dev.txt has the wrong format: setup and test " "requirements have to be separated by one blank line." ) requirements_dev_split = requirements_dev.index("") test_requirements = requirements_dev[ requirements_dev_split + 1 : ] # +1: skip empty line setup( project_urls={ "Documentation": "https://zfit-interface.readthedocs.io/", "Changelog": "https://zfit-interface.readthedocs.io/en/latest/changelog.html", "Issue Tracker": "https://github.com/zfit/zfit-interface/issues", }, install_requires=requirements, test_requirements=test_requirements, extras_require={"dev": requirements_dev}, use_scm_version=True, )
setup.py
1,303
!/usr/bin/env python split the developer requirements into setup and test requirements +1: skip empty line
106
en
0.667979
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Wed Jan 23 15:13:33 2019 @author: ifenty """ from __future__ import division import numpy as np import matplotlib.pylab as plt from .llc_array_conversion import llc_compact_to_tiles from .llc_array_conversion import llc_compact_to_faces from .llc_array_conversion import llc_faces_to_tiles from .llc_array_conversion import llc_faces_to_compact from .llc_array_conversion import llc_tiles_to_faces from .llc_array_conversion import llc_tiles_to_compact from .read_bin_llc import read_llc_to_compact, read_llc_to_faces, read_llc_to_tiles from .tile_plot import plot_tiles # Tests the read_bin_llc and llc_array_conversion routines # %% ### Load model grid coordinates (longitude, latitude) def run_read_bin_and_llc_conversion_test(llc_grid_dir, llc_lons_fname='XC.data', llc_hfacc_fname='hFacC.data', llc=90, llc_grid_filetype = '>f', make_plots=False): """ Runs test on the read_bin_llc and llc_conversion routines Parameters ---------- llc_grid_dir : string A string with the directory of the binary file to open llc_lons_fname : string A string with the name of the XC grid file [XC.data] llc_hfacc_fname : string A string with the name of the hfacC grid file [hFacC.data] llc : int the size of the llc grid. For ECCO v4, we use the llc90 domain so `llc` would be `90`. Default: 90 llc_grid_filetype: string the file type, default is big endian (>) 32 bit float (f) alternatively, ('<d') would be little endian (<) 64 bit float (d) Deafult: '>f' make_plots : boolean A boolean specifiying whether or not to make plots Deafult: False Returns ------- 1 : all tests passed 0 : at least one test failed """ # SET TEST RESULT = 1 TO START TEST_RESULT = 1 # %% ----------- TEST 1: 2D field XC FOM GRID FILE #%% 1a LOAD COMPACT tmpXC_c = read_llc_to_compact(llc_grid_dir, llc_lons_fname, llc=llc, filetype=llc_grid_filetype) tmpXC_f = read_llc_to_faces(llc_grid_dir, llc_lons_fname, llc=llc, filetype=llc_grid_filetype) tmpXC_t = read_llc_to_tiles(llc_grid_dir, llc_lons_fname, llc=llc, filetype=llc_grid_filetype) if make_plots: #plt.close('all') for f in range(1,6): plt.figure() plt.imshow(tmpXC_f[f]);plt.colorbar() plot_tiles(tmpXC_t) plt.draw() raw_input("Press Enter to continue...") #%% 1b CONVERT COMPACT TO FACES, TILES tmpXC_cf = llc_compact_to_faces(tmpXC_c) tmpXC_ct = llc_compact_to_tiles(tmpXC_c) for f in range(1,6): tmp = np.unique(tmpXC_f[f] - tmpXC_cf[f]) print ('unique diffs CF ', f, tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1b-1') return TEST_RESULT tmp = np.unique(tmpXC_ct - tmpXC_t) print ('unique diffs for CT ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1b-2') return TEST_RESULT #%% 1c CONVERT FACES TO TILES, COMPACT tmpXC_ft = llc_faces_to_tiles(tmpXC_f) tmpXC_fc = llc_faces_to_compact(tmpXC_f) # unique diff tests tmp = np.unique(tmpXC_t - tmpXC_ft) print ('unique diffs for FT ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1c-1') return TEST_RESULT tmp = np.unique(tmpXC_fc - tmpXC_c) print ('unique diffs FC', tmp ) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1c-2') return TEST_RESULT #%% 1d CONVERT TILES to FACES, COMPACT tmpXC_tf = llc_tiles_to_faces(tmpXC_t) tmpXC_tc = llc_tiles_to_compact(tmpXC_t) # unique diff tests for f in range(1,6): tmp = np.unique(tmpXC_f[f] - tmpXC_tf[f]) print ('unique diffs for TF ', f, tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1d-1') return TEST_RESULT tmp = np.unique(tmpXC_tc - tmpXC_c) print ('unique diffs TC', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1d-2') return TEST_RESULT #%% 1e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT tmpXC_cftfc = llc_faces_to_compact(llc_tiles_to_faces(llc_faces_to_tiles(llc_compact_to_faces(tmpXC_c)))) tmp = np.unique(tmpXC_cftfc - tmpXC_c) print ('unique diffs CFTFC', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 1e') return TEST_RESULT # %% ----------- TEST 2: 3D fields HFACC FOM GRID FILE #%% 2a LOAD COMPACT tmpHF_c = read_llc_to_compact(llc_grid_dir, llc_hfacc_fname, llc=llc,nk=50, filetype=llc_grid_filetype) tmpHF_f = read_llc_to_faces(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50, filetype=llc_grid_filetype) tmpHF_t = read_llc_to_tiles(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50, filetype=llc_grid_filetype) tmpHF_c.shape if make_plots: #plt.close('all') plt.imshow(tmpHF_c[0,:]);plt.colorbar() plot_tiles(tmpHF_t[:,0,:]) plot_tiles(tmpHF_t[:,20,:]) plt.draw() raw_input("Press Enter to continue...") #%% 2b CONVERT COMPACT TO FACES, TILES tmpHF_cf = llc_compact_to_faces(tmpHF_c) tmpHF_ct = llc_compact_to_tiles(tmpHF_c) # unique diff tests for f in range(1,6): tmp = np.unique(tmpHF_f[f] - tmpHF_cf[f]) print ('unique diffs CF ', f, tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2b-1') return TEST_RESULT tmp = np.unique(tmpHF_ct - tmpHF_t) print ('unique diffs CT ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2b-2') return TEST_RESULT if make_plots: for k in [0, 20]: for f in range(1,6): plt.figure() plt.imshow(tmpHF_cf[f][k,:], origin='lower');plt.colorbar() plt.draw() raw_input("Press Enter to continue...") #%% 2c CONVERT FACES TO TILES, COMPACT tmpHF_ft = llc_faces_to_tiles(tmpHF_f) tmpHF_fc = llc_faces_to_compact(tmpHF_f) if make_plots: #plt.close('all') plot_tiles(tmpHF_ft[:,0,:]) plot_tiles(tmpHF_ft[:,20,:]) plt.draw() raw_input("Press Enter to continue...") # unique diff tests tmp = np.unique(tmpHF_t - tmpHF_ft) print ('unique diffs FT ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2c-1') return TEST_RESULT tmp = np.unique(tmpHF_fc - tmpHF_c) print ('unique diffs FC', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2c-2') return TEST_RESULT #%% 2d CONVERT TILES to FACES, COMPACT tmpHF_tf = llc_tiles_to_faces(tmpHF_t) tmpHF_tc = llc_tiles_to_compact(tmpHF_t) if make_plots: #plt.close('all') for k in [0, 20]: for f in range(1,6): plt.figure() plt.imshow(tmpHF_tf[f][k,:], origin='lower');plt.colorbar() plt.draw() raw_input("Press Enter to continue...") # unique diff tests for f in range(1,6): tmp = np.unique(tmpHF_f[f] - tmpHF_tf[f]) print ('unique diffs TF ', f, tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2d-1') return TEST_RESULT tmp = np.unique(tmpHF_tc - tmpHF_c) print ('unique diffs TC ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2d-1') return TEST_RESULT #%% 2e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT tmpHF_cftfc = llc_faces_to_compact(llc_tiles_to_faces( llc_faces_to_tiles(llc_compact_to_faces(tmpHF_c)))) tmp = np.unique(tmpHF_cftfc - tmpHF_c) print ('unique diffs CFTFC ', tmp) if len(tmp) != 1 or tmp[0] != 0: TEST_RESULT = 0 print ('failed on 2e') return TEST_RESULT print ('YOU MADE IT THIS FAR, TESTS PASSED!') return TEST_RESULT ####################### ########################### #%% if __name__== "__main__": import sys import matplotlib sys.path.append('/Users/ifenty/ECCOv4-py/') import ecco_v4_py as ecco import matplotlib.pylab as plt llc_grid_dir = '/Volumes/ECCO_BASE/ECCO_v4r3/grid_llc90/' llc_lons_fname='XC.data' llc_hfacc_fname='hFacC.data', llc=90, llc_grid_filetype = '>f', make_plots=False #%% TEST_RESULT = ecco.run_read_bin_and_llc_conversion_test(llc_grid_dir, make_plots=True) print(TEST_RESULT)
ecco_v4_py/test_llc_array_loading_and_conversion.py
9,456
Runs test on the read_bin_llc and llc_conversion routines Parameters ---------- llc_grid_dir : string A string with the directory of the binary file to open llc_lons_fname : string A string with the name of the XC grid file [XC.data] llc_hfacc_fname : string A string with the name of the hfacC grid file [hFacC.data] llc : int the size of the llc grid. For ECCO v4, we use the llc90 domain so `llc` would be `90`. Default: 90 llc_grid_filetype: string the file type, default is big endian (>) 32 bit float (f) alternatively, ('<d') would be little endian (<) 64 bit float (d) Deafult: '>f' make_plots : boolean A boolean specifiying whether or not to make plots Deafult: False Returns ------- 1 : all tests passed 0 : at least one test failed Created on Wed Jan 23 15:13:33 2019 @author: ifenty !/usr/bin/env python2 -*- coding: utf-8 -*- Tests the read_bin_llc and llc_array_conversion routines %% Load model grid coordinates (longitude, latitude) SET TEST RESULT = 1 TO START %% ----------- TEST 1: 2D field XC FOM GRID FILE%% 1a LOAD COMPACTplt.close('all')%% 1b CONVERT COMPACT TO FACES, TILES%% 1c CONVERT FACES TO TILES, COMPACT unique diff tests %% 1d CONVERT TILES to FACES, COMPACT unique diff tests %% 1e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT %% ----------- TEST 2: 3D fields HFACC FOM GRID FILE%% 2a LOAD COMPACTplt.close('all')%% 2b CONVERT COMPACT TO FACES, TILES unique diff tests %% 2c CONVERT FACES TO TILES, COMPACTplt.close('all') unique diff tests %% 2d CONVERT TILES to FACES, COMPACT plt.close('all') unique diff tests %% 2e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT %%%%
1,700
en
0.646932
import requests import json from pybliometrics.scopus import AbstractRetrieval arr_authors = [ '55949131000', #EG '56344636600', #MF '6602888121', #MG '7005314544' #SR ] MY_API_KEY = 'afd5bb57359cd0e85670e92a9a282d48' from pybliometrics.scopus.utils import config #config['Authentication']['APIKey'] = 'afd5bb57359cd0e85670e92a9a282d48' bib = set() def get_scopus_info(SCOPUS_ID): url = ("http://api.elsevier.com/content/abstract/scopus_id/" + SCOPUS_ID + "?field=authors,title,publicationName,volume,issueIdentifier," + "prism:pageRange,coverDate,article-number,doi,citedby-count,prism:aggregationType") resp = requests.get(url, headers={'Accept':'application/json', 'X-ELS-APIKey': MY_API_KEY}) return json.loads(resp.text.encode('utf-8')) for author in arr_authors: resp = requests.get("http://api.elsevier.com/content/search/scopus?query=AU-ID(" + author + ")&field=dc:identifier", headers={'Accept':'application/json', 'X-ELS-APIKey': MY_API_KEY}) results = resp.json() #print(results) i = 0 for r in results['search-results']["entry"]: sid = [str(r['dc:identifier'])] # some entries seem to have json parse errors, so we catch those print(sid[0].replace('SCOPUS_ID:','')) ab = AbstractRetrieval(sid[0].replace('SCOPUS_ID:','')) bib.add(str(ab.get_html())) break break with open('bib.bib', 'w') as file: for bibitem in bib: file.write(bibitem) file.write('\n')
script/bib_script2_not_working.py
1,645
EGMFMGSRconfig['Authentication']['APIKey'] = 'afd5bb57359cd0e85670e92a9a282d48'print(results) some entries seem to have json parse errors, so we catch those
156
en
0.712169
# MIT License # # Copyright (c) 2020 PANGAEA (https://www.pangaea.de/) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import List import jmespath from fuji_server.helper.metadata_collector import MetaDataCollector from fuji_server.helper.request_helper import RequestHelper, AcceptTypes class MetaDataCollectorDatacite (MetaDataCollector): exclude_conversion: List[str] def __init__(self, mapping, pid_url=None, loggerinst=None): super().__init__(logger=loggerinst, mapping=mapping) self.pid_url = pid_url self.exclude_conversion = ['creator', 'license', 'related_resources', 'access_level'] def parse_metadata(self): source_name = None dcite_metadata = {} self.logger.info('FsF-F2-01M : Extract datacite metadata') requestHelper = RequestHelper(self.pid_url, self.logger) requestHelper.setAcceptType(AcceptTypes.datacite_json) neg_source,ext_meta = requestHelper.content_negotiate('FsF-F2-01M') if ext_meta: try: dcite_metadata = jmespath.search(self.metadata_mapping.value, ext_meta) if dcite_metadata: self.namespaces.append('http://datacite.org/schema/') source_name = self.getEnumSourceNames().DATACITE_JSON.value if dcite_metadata['creator'] is None: first = dcite_metadata['creator_first'] last = dcite_metadata['creator_last'] # default type of creator is [] if isinstance(first, list) and isinstance(last, list): if len(first) == len(last): names = [i + " " + j for i, j in zip(first, last)] dcite_metadata['creator'] = names if dcite_metadata.get('related_resources'): self.logger.info('FsF-I3-01M : {0} related resource(s) extracted from {1}'.format( len(dcite_metadata['related_resources']), source_name)) temp_rels = [] for r in dcite_metadata['related_resources']: filtered = {k: v for k, v in r.items() if v is not None} temp_rels.append(filtered) dcite_metadata['related_resources'] = temp_rels else: self.logger.info('FsF-I3-01M : No related resource(s) found in Datacite metadata') # convert all values (list type) into string except 'creator','license','related_resources' for key, value in dcite_metadata.items(): if key not in self.exclude_conversion and isinstance(value, list): flat = ', '.join(map(str, value)) dcite_metadata[key] = flat except Exception as e: self.logger.exception('Failed to extract Datacite Json - {}'.format(e)) return source_name, dcite_metadata
fuji_server/helper/metadata_collector_datacite.py
4,107
MIT License Copyright (c) 2020 PANGAEA (https://www.pangaea.de/) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. default type of creator is [] convert all values (list type) into string except 'creator','license','related_resources'
1,205
en
0.84302
#!/usr/bin/env python # -*- coding: utf-8 -*- # # imageprocessor documentation build configuration file. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # # import imageprocessor import os import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = u'ImageProcessor' copyright = u"2021, Rui Wang" author = u"Rui Wang" # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. # version = imageprocessor.__version__ # The full version, including alpha/beta/rc tags. # release = imageprocessor.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_sidebars = { "**": ["about.html", "navigation.html", "searchbox.html"] } # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'imageprocessordoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ (master_doc, 'imageprocessor.tex', u'imageprocessor Documentation', u'Rui Wang', 'manual'), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'imageprocessor', u'imageprocessor Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'imageprocessor', u'imageprocessor Documentation', author, 'imageprocessor', 'One line description of project.', 'Miscellaneous'), ]
docs/conf.py
4,809
!/usr/bin/env python -*- coding: utf-8 -*- imageprocessor documentation build configuration file. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import imageprocessor -- Project information ----------------------------------------------------- -- General configuration --------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. version = imageprocessor.__version__ The full version, including alpha/beta/rc tags. release = imageprocessor.__version__ The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This patterns also effect to html_static_path and html_extra_path The name of the Pygments (syntax highlighting) style to use. If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". -- Options for HTMLHelp output --------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category)
3,480
en
0.666229
# coding=utf-8 """ PAT - the name of the current project. main_portfolio_maker.py - the name of the new file which you specify in the New File dialog box during the file creation. Hossein - the login name of the current user. 8 / 8 / 18 - the current system date. 9: 14 AM - the current system time. PyCharm - the name of the IDE in which the file will be created. """ from portfolio_maker.subscriber import create_subscription from observer import Observer from price_fetcher.config import PROJECT_ID from price_fetcher.publisher import list_topics from price_fetcher.config import TICKERS import time import datetime if __name__ == '__main__': topics = list_topics(PROJECT_ID) topics = [str(topic).split('/')[-1][:-2] for topic in topics if 'simulator' in str(topic)] subscriptions = [create_subscription(PROJECT_ID, topic, 'live_writer_' + str(i)) for i, topic in enumerate(topics)] observer = Observer(tickers=['AAPL'], start_date=datetime.date(2018, 10, 18)) observer.initiate() for i in range(len(topics)): observer.receive_messages(PROJECT_ID, 'live_writer_' + str(i)) while True: # print('PRINTING!', observer.instruments) time.sleep(60)
portfolio_maker/main_portfolio_maker.py
1,222
PAT - the name of the current project. main_portfolio_maker.py - the name of the new file which you specify in the New File dialog box during the file creation. Hossein - the login name of the current user. 8 / 8 / 18 - the current system date. 9: 14 AM - the current system time. PyCharm - the name of the IDE in which the file will be created. coding=utf-8 print('PRINTING!', observer.instruments)
401
en
0.804993
import discord import json import CloudDB import nqrng from cloudant.result import Result global CONFIG client = discord.Client() token = "" #import config file with open('config.json', 'r') as f: getFile = json.load(f) global CONFIG CONFIG = getFile["services"]["discord"][0] token = CONFIG["token"] #db connect global my_database my_database = CloudDB.connect_db() # bot start @client.event async def on_ready(): print("Login") print(client.user.name) print(client.user.id) print("================") # bot get message @client.event async def on_message(message): # if get message for bot > return none if message.author.bot: return None if message.content.startswith('!Hi'): channel = message.channel await channel.send('Welcome!') if message.content.startswith('!Qadjoke'): channel = message.channel arrResult = Result(my_database.all_docs, include_docs=True) dbNum = my_database.doc_count() num1 = nqrng.random_number() % dbNum num2 = nqrng.random_number() % dbNum num = (num1 * num2) % dbNum result = arrResult[num][0]['doc']['Qdad'] print(f"{num} is randnum, result is {result}") await channel.send(f'{result}') client.run(token)
quantum-ugly-duckling-main/discord_bot.py
1,287
import config filedb connect bot start bot get message if get message for bot > return none
91
es
0.077892
import tensorflow as tf from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import LuongAttention, \ AttentionWrapper, AttentionWrapperState class AttentionMode: """ Enumerator for the Luong style local attention modes. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 """ # local-m mode. MONOTONIC = 'monotonic' # local-p mode. PREDICTIVE = 'predictive' class AttentionScore: """ Enumerator for the three different content-based scoring functions for Luong style attention. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 """ DOT = 'dot' GENERAL = 'general' CONCAT = 'concat' def _luong_local_compute_attention(attention_mechanism, cell_output, attention_state, attention_layer): """Computes the attention and alignments for the Luong style local attention mechanism.""" alignments, next_attention_state = attention_mechanism( cell_output, state=attention_state) # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time] expanded_alignments = tf.expand_dims(alignments, 1) context_windows = [] padded_alignment_windows = [] window_start = attention_mechanism.window_start window_stop = attention_mechanism.window_stop pre_padding = attention_mechanism.window_pre_padding post_padding = attention_mechanism.window_post_padding full_pre_padding = attention_mechanism.full_seq_pre_padding full_post_padding = attention_mechanism.full_seq_post_padding for i in range(0, attention_mechanism.const_batch_size): # Slice out the window from the memory. value_window = attention_mechanism.values[i, window_start[i][0]:window_stop[i][0], :] # Add zero padding to the slice in order to ensure the window size is (2D+1). value_window_paddings = [ [pre_padding[i][0], post_padding[i][0]], [0, 0] ] value_window = tf.pad(value_window, value_window_paddings, 'CONSTANT') # Shape information is lost after padding ;(. value_window.set_shape((attention_mechanism.window_size, attention_mechanism._num_units)) # Calculate the context vector for the current batch entry using only information from # teh window. context_window = tf.matmul(expanded_alignments[i], value_window) context_windows.append(context_window) if attention_mechanism.force_gaussian is True: # Apply gaussian weighting of the window contents. point_dist = tf.cast(tf.range(start=window_start[i][0], limit=window_stop[i][0], delta=1), dtype=tf.float32) - attention_mechanism.p[i][0] gaussian_weights = tf.exp(-(point_dist ** 2) / 2 * (attention_mechanism.d / 2) ** 2) __alignments = alignments[i] * gaussian_weights else: # Use the raw window contents. __alignments = alignments[i] # Add padding to the alignments to get from the window size 2D+1 up to the original # memory length. alignment_seq_paddings = [ [full_pre_padding[i][0], full_post_padding[i][0]], ] __alignments = tf.pad(__alignments, alignment_seq_paddings, 'CONSTANT') padded_alignment_windows.append(__alignments) # Stack all context vectors into one tensor. context = tf.stack(context_windows) # Squeeze out the helper dimension used for calculating the context. context = tf.squeeze(context, [1]) # Stack all alignment vectors into one tensor. This tensor gives alignments for each encoder # step. padded_alignment = tf.stack(padded_alignment_windows) if attention_layer is not None: attention = attention_layer(tf.concat([cell_output, context], 1)) else: attention = context return attention, padded_alignment, padded_alignment class LocalLuongAttention(LuongAttention): """ Implements a Luong-style local attention mechanism. This implementation supports both monotonic attention as well as predictive attention. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 """ def __init__(self, num_units, memory, const_batch_size, memory_sequence_length=None, scale=False, probability_fn=None, score_mask_value=None, dtype=None, name="LocalLuongAttention", d=10, attention_mode=AttentionMode.MONOTONIC, score_mode=AttentionScore.DOT, force_gaussian=False ): """ Arguments: num_units (int): The depth of the attention mechanism. This controls the number of units in the memory layer that processes the encoder states into the `keys`. memory (tf.Tensor): The memory to query; usually the output of an RNN encoder. The shape is expected to be shape=(batch_size, encoder_max_time, ...) const_batch_size (int): The constant batch size to expect from every batch. Every batch is expected to contain exactly `const_batch_size` samples. memory_sequence_length: (optional) Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. scale (boolean): Whether to scale the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is @{tf.nn.softmax}. Other options include @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional) The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. dtype (tf.DType): The data type for the memory layer of the attention mechanism. name (string): Name to use when creating ops. d (int): D parameter controlling the window size and gaussian distribution. The window size is set to be `2D + 1`. attention_mode (AttentionMode): The attention mode to use. Can be either `MONOTONIC` or `PREDICTIVE`. score_mode (AttentionScore): The attention scoring function to use. Can either be `DOT`, `GENERAL` or `CONCAT`. force_gaussian (boolean): Force a gaussian distribution onto the scores in the attention window. Defaults to False. """ super().__init__(num_units=num_units, memory=memory, memory_sequence_length=memory_sequence_length, scale=scale, probability_fn=probability_fn, score_mask_value=score_mask_value, dtype=dtype, name=name) # Initialize the decoding time counter. # This variable is updated by the `ÀdvancedAttentionWrapper`. self.time = 0 # Calculate the attention window size. self.d = d self.window_size = 2 * self.d + 1 # Store the attention mode. self.attention_mode = attention_mode # Store the scoring function style to be used. self.score_mode = score_mode # The constant batch size to expect. self.const_batch_size = const_batch_size self.force_gaussian = force_gaussian def __call__(self, query, state): """ Calculate the alignments and next_state for the current decoder output. Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. state (tf.Tensor): In Luong attention the state is equal to the alignments. Therefore this will contain the alignments from the previous decoding step. Returns: (alignments, next_state): alignments (tf.Tensor): The normalized attention scores for the attention window. The shape is shape=(B, 2D+1), with B being the batch size and `2D+1` being the window size. next_state (tf.Tensor): In Luong attention this is equal to `alignments`. """ with tf.variable_scope(None, "local_luong_attention", [query]): # Get the depth of the memory values. num_units = self._keys.get_shape()[-1] # Get the source sequence length from memory. source_seq_length = tf.shape(self._keys)[1] if self.attention_mode == AttentionMode.PREDICTIVE: # Predictive selection fo the attention window position. vp = tf.get_variable(name="local_v_p", shape=[num_units, 1], dtype=tf.float32) wp = tf.get_variable(name="local_w_p", shape=[num_units, num_units], dtype=tf.float32) # shape => (B, num_units) _intermediate_result = tf.transpose(tf.tensordot(wp, query, [0, 1])) # shape => (B, 1) _tmp = tf.transpose(tf.tensordot(vp, tf.tanh(_intermediate_result), [0, 1])) # Derive p_t as described by Luong for the predictive local-p case. self.p = tf.cast(source_seq_length, tf.float32) * tf.sigmoid(_tmp) elif self.attention_mode == AttentionMode.MONOTONIC: # Derive p_t as described by Luong for the predictive local-m case. self.p = tf.tile( [[self.time]], tf.convert_to_tensor([self.batch_size, 1]) ) # Prevent the window from leaving the memory. self.p = tf.maximum(self.p, self.d) self.p = tf.minimum(self.p, source_seq_length - (self.d + 1)) self.p = tf.cast(self.p, dtype=tf.float32) # Calculate the memory sequence index at which the window should start. start_index = tf.floor(self.p) - self.d start_index = tf.cast(start_index, dtype=tf.int32) # Prevent the window from leaving the memory. self.window_start = tf.maximum(0, start_index) # Calculate the memory sequence index at which the window should stop. stop_index = tf.floor(self.p) + self.d + 1 stop_index = tf.cast(stop_index, dtype=tf.int32) # Prevent the window from leaving the memory. self.window_stop = tf.minimum(source_seq_length, stop_index) # Calculate how many padding frames should be added to the start of the window. # This is used to get up to the total memory length again. self.full_seq_pre_padding = tf.abs(start_index) # Calculate how many padding frames should be added to the end of the window. # This is used to get up to the total memory length again. self.full_seq_post_padding = tf.abs(stop_index - source_seq_length) # Calculate how many padding frames should be added to the start of the window. # This is used to get the window up to 2D+1 frames. self.window_pre_padding = tf.abs(self.window_start - start_index) # Calculate how many padding frames should be added to the end of the window. # This is used to get the window up to 2D+1 frames. self.window_post_padding = tf.abs(self.window_stop - stop_index) # Slice the windows for every batch entry. with tf.variable_scope(None, "window_extraction", [query]): windows = [] # Iterate the batch entries. for i in range(0, self.const_batch_size): # Slice out the window from the processed memory. __window = self._keys[i, self.window_start[i][0]:self.window_stop[i][0], :] # Add zero padding to the slice in order to ensure the window size is (2D+1). paddings = [ [self.window_pre_padding[i][0], self.window_post_padding[i][0]], [0, 0] ] __window = tf.pad(__window, paddings, 'CONSTANT') # Collect the extracted windows for each batch entry. windows.append(__window) # Merge all extracted windows into one tensor. window = tf.stack(windows) # Calculate the not not normalized attention score as described by Luong as dot. if self.score_mode == AttentionScore.DOT: score = _luong_dot_score(query, window, self._scale) # Calculate the not not normalized attention score as described by Luong as general. elif self.score_mode == AttentionScore.GENERAL: score = _luong_general_score(query, window) # Calculate the not not normalized attention score as described by Luong as general. elif self.score_mode == AttentionScore.CONCAT: score = _luong_concat_score(query, window) else: score = None raise Exception("An invalid attention scoring mode was supplied.") # Normalize the scores. alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state def _luong_dot_score(query, keys, scale): """ Implements the Luong-style dot scoring function. This attention has two forms. The first is standard Luong attention, as described in: Minh-Thang Luong, Hieu Pham, Christopher D. Manning. "Effective Approaches to Attention-based Neural Machine Translation." EMNLP 2015. https://arxiv.org/abs/1508.04025 The second is the scaled form inspired partly by the normalized form of Bahdanau attention. To enable the second form, call this function with `scale=True`. This implementation is derived from: `tensorflow.contrib.seq2seq.python.ops.attention_wrapper` Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. scale (boolean): Whether to apply a scale to the score function. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. Raises: ValueError: If `key` and `query` depths do not match. """ depth = query.get_shape()[-1] key_units = keys.get_shape()[-1] if depth != key_units: raise ValueError( "Incompatible or unknown inner dimensions between query and keys. " "Query (%s) has units: %s. Keys (%s) have units: %s. " "Perhaps you need to set num_units to the keys' dimension (%s)?" % (query, depth, keys, key_units, key_units)) dtype = query.dtype query = tf.expand_dims(query, 1) score = tf.matmul(query, keys, transpose_b=True) score = tf.squeeze(score, [1]) if scale: # Scalar used in weight scaling g = tf.get_variable( "attention_g", dtype=dtype, initializer=tf.ones_initializer, shape=()) score = g * score return score def _luong_general_score(query, keys): """ Implements the Luong-style general scoring function. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. """ raise NotImplementedError('Luong style general mode attention scoring is not implemented yet!') def _luong_concat_score(query, keys): """ Implements the Luong-style concat scoring function. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. """ raise NotImplementedError('Luong style concat mode attention scoring is not implemented yet!') class AdvancedAttentionWrapper(AttentionWrapper): """ Wraps the standard AttentionWrapper class so that during decoding steps the decoding time index is updated in the attention mechanism. This is a hack to enable us using Luong style monotonic attention. """ def __init__(self, cell, attention_mechanism, attention_layer_size=None, alignment_history=False, cell_input_fn=None, output_attention=True, initial_cell_state=None, name=None): super().__init__(cell=cell, attention_mechanism=attention_mechanism, attention_layer_size=attention_layer_size, alignment_history=alignment_history, cell_input_fn=cell_input_fn, output_attention=output_attention, initial_cell_state=initial_cell_state, name=name) def call(self, inputs, state): """Perform a step of attention-wrapped RNN. - Step 1: Mix the `inputs` and previous step's `attention` output via `cell_input_fn`. - Step 2: Call the wrapped `cell` with this input and its previous state. - Step 3: Score the cell's output with `attention_mechanism`. - Step 4: Calculate the alignments by passing the score through the `normalizer`. - Step 5: Calculate the context vector as the inner product between the alignments and the attention_mechanism's values (memory). - Step 6: Calculate the attention output by concatenating the cell output and context through the attention layer (a linear layer with `attention_layer_size` outputs). Args: inputs: (Possibly nested tuple of) Tensor, the input at this time step. state: An instance of `AttentionWrapperState` containing tensors from the previous time step. Returns: A tuple `(attention_or_cell_output, next_state)`, where: - `attention_or_cell_output` depending on `output_attention`. - `next_state` is an instance of `AttentionWrapperState` containing the state calculated at this time step. Raises: TypeError: If `state` is not an instance of `AttentionWrapperState`. """ if not isinstance(state, AttentionWrapperState): raise TypeError("Expected state to be instance of AttentionWrapperState. " "Received type %s instead." % type(state)) # Step 1: Calculate the true inputs to the cell based on the # previous attention value. cell_inputs = self._cell_input_fn(inputs, state.attention) cell_state = state.cell_state cell_output, next_cell_state = self._cell(cell_inputs, cell_state) cell_batch_size = ( cell_output.shape[0].value or tf.shape(cell_output)[0]) error_message = ( "When applying AttentionWrapper %s: " % self.name + "Non-matching batch sizes between the memory " "(encoder output) and the query (decoder output). Are you using " "the BeamSearchDecoder? You may need to tile your memory input via " "the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with tf.control_dependencies( self._batch_size_checks(cell_batch_size, error_message)): cell_output = tf.identity( cell_output, name="checked_cell_output") if self._is_multi: previous_attention_state = state.attention_state previous_alignment_history = state.alignment_history else: previous_attention_state = [state.attention_state] previous_alignment_history = [state.alignment_history] all_alignments = [] all_attentions = [] all_attention_states = [] maybe_all_histories = [] for i, attention_mechanism in enumerate(self._attention_mechanisms): # Note: This is the only modification hacked into the attention wrapper to support # monotonic Luong attention. attention_mechanism.time = state.time attention, alignments, next_attention_state = _luong_local_compute_attention( attention_mechanism, cell_output, previous_attention_state[i], self._attention_layers[i] if self._attention_layers else None) alignment_history = previous_alignment_history[i].write( state.time, alignments) if self._alignment_history else () all_attention_states.append(next_attention_state) all_alignments.append(alignments) all_attentions.append(attention) maybe_all_histories.append(alignment_history) attention = tf.concat(all_attentions, 1) next_state = AttentionWrapperState( time=state.time + 1, cell_state=next_cell_state, attention=attention, attention_state=self._item_or_tuple(all_attention_states), alignments=self._item_or_tuple(all_alignments), alignment_history=self._item_or_tuple(maybe_all_histories)) if self._output_attention: return attention, next_state else: return cell_output, next_state
tacotron/attention.py
24,646
Wraps the standard AttentionWrapper class so that during decoding steps the decoding time index is updated in the attention mechanism. This is a hack to enable us using Luong style monotonic attention. Enumerator for the Luong style local attention modes. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Enumerator for the three different content-based scoring functions for Luong style attention. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Implements a Luong-style local attention mechanism. This implementation supports both monotonic attention as well as predictive attention. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Calculate the alignments and next_state for the current decoder output. Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. state (tf.Tensor): In Luong attention the state is equal to the alignments. Therefore this will contain the alignments from the previous decoding step. Returns: (alignments, next_state): alignments (tf.Tensor): The normalized attention scores for the attention window. The shape is shape=(B, 2D+1), with B being the batch size and `2D+1` being the window size. next_state (tf.Tensor): In Luong attention this is equal to `alignments`. Arguments: num_units (int): The depth of the attention mechanism. This controls the number of units in the memory layer that processes the encoder states into the `keys`. memory (tf.Tensor): The memory to query; usually the output of an RNN encoder. The shape is expected to be shape=(batch_size, encoder_max_time, ...) const_batch_size (int): The constant batch size to expect from every batch. Every batch is expected to contain exactly `const_batch_size` samples. memory_sequence_length: (optional) Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. scale (boolean): Whether to scale the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is @{tf.nn.softmax}. Other options include @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional) The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. dtype (tf.DType): The data type for the memory layer of the attention mechanism. name (string): Name to use when creating ops. d (int): D parameter controlling the window size and gaussian distribution. The window size is set to be `2D + 1`. attention_mode (AttentionMode): The attention mode to use. Can be either `MONOTONIC` or `PREDICTIVE`. score_mode (AttentionScore): The attention scoring function to use. Can either be `DOT`, `GENERAL` or `CONCAT`. force_gaussian (boolean): Force a gaussian distribution onto the scores in the attention window. Defaults to False. Implements the Luong-style concat scoring function. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. Implements the Luong-style dot scoring function. This attention has two forms. The first is standard Luong attention, as described in: Minh-Thang Luong, Hieu Pham, Christopher D. Manning. "Effective Approaches to Attention-based Neural Machine Translation." EMNLP 2015. https://arxiv.org/abs/1508.04025 The second is the scaled form inspired partly by the normalized form of Bahdanau attention. To enable the second form, call this function with `scale=True`. This implementation is derived from: `tensorflow.contrib.seq2seq.python.ops.attention_wrapper` Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. scale (boolean): Whether to apply a scale to the score function. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. Raises: ValueError: If `key` and `query` depths do not match. Implements the Luong-style general scoring function. - See [1]: Effective Approaches to Attention-based Neural Machine Translation, http://arxiv.org/abs/1508.04025 Arguments: query (tf.Tensor): Decoder cell outputs to compare to the keys (memory). The shape is expected to be shape=(B, num_units) with B being the batch size and `num_units` being the output size of the decoder_cell. keys (tf.Tensor): Processed memory (usually the encoder states processed by the memory_layer). The shape is expected to be shape=(B, X, num_units) with B being the batch size and `num_units` being the output size of the memory_layer. X may be the maximal length of the encoder time domain or in the case of local attention the window size. Returns: score (tf.Tensor): A tensor with shape=(B, X) containing the non-normalized score values. Computes the attention and alignments for the Luong style local attention mechanism. Perform a step of attention-wrapped RNN. - Step 1: Mix the `inputs` and previous step's `attention` output via `cell_input_fn`. - Step 2: Call the wrapped `cell` with this input and its previous state. - Step 3: Score the cell's output with `attention_mechanism`. - Step 4: Calculate the alignments by passing the score through the `normalizer`. - Step 5: Calculate the context vector as the inner product between the alignments and the attention_mechanism's values (memory). - Step 6: Calculate the attention output by concatenating the cell output and context through the attention layer (a linear layer with `attention_layer_size` outputs). Args: inputs: (Possibly nested tuple of) Tensor, the input at this time step. state: An instance of `AttentionWrapperState` containing tensors from the previous time step. Returns: A tuple `(attention_or_cell_output, next_state)`, where: - `attention_or_cell_output` depending on `output_attention`. - `next_state` is an instance of `AttentionWrapperState` containing the state calculated at this time step. Raises: TypeError: If `state` is not an instance of `AttentionWrapperState`. local-m mode. local-p mode. Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time] Slice out the window from the memory. Add zero padding to the slice in order to ensure the window size is (2D+1). Shape information is lost after padding ;(. Calculate the context vector for the current batch entry using only information from teh window. Apply gaussian weighting of the window contents. Use the raw window contents. Add padding to the alignments to get from the window size 2D+1 up to the original memory length. Stack all context vectors into one tensor. Squeeze out the helper dimension used for calculating the context. Stack all alignment vectors into one tensor. This tensor gives alignments for each encoder step. Initialize the decoding time counter. This variable is updated by the `ÀdvancedAttentionWrapper`. Calculate the attention window size. Store the attention mode. Store the scoring function style to be used. The constant batch size to expect. Get the depth of the memory values. Get the source sequence length from memory. Predictive selection fo the attention window position. shape => (B, num_units) shape => (B, 1) Derive p_t as described by Luong for the predictive local-p case. Derive p_t as described by Luong for the predictive local-m case. Prevent the window from leaving the memory. Calculate the memory sequence index at which the window should start. Prevent the window from leaving the memory. Calculate the memory sequence index at which the window should stop. Prevent the window from leaving the memory. Calculate how many padding frames should be added to the start of the window. This is used to get up to the total memory length again. Calculate how many padding frames should be added to the end of the window. This is used to get up to the total memory length again. Calculate how many padding frames should be added to the start of the window. This is used to get the window up to 2D+1 frames. Calculate how many padding frames should be added to the end of the window. This is used to get the window up to 2D+1 frames. Slice the windows for every batch entry. Iterate the batch entries. Slice out the window from the processed memory. Add zero padding to the slice in order to ensure the window size is (2D+1). Collect the extracted windows for each batch entry. Merge all extracted windows into one tensor. Calculate the not not normalized attention score as described by Luong as dot. Calculate the not not normalized attention score as described by Luong as general. Calculate the not not normalized attention score as described by Luong as general. Normalize the scores. Scalar used in weight scaling Step 1: Calculate the true inputs to the cell based on the previous attention value. Note: This is the only modification hacked into the attention wrapper to support monotonic Luong attention.
11,042
en
0.81814
import logging import warnings from rest_framework import serializers from rest_framework.authtoken.models import Token from django.contrib.auth import get_user_model l = logging.getLogger(__name__) class OAuth2InputSerializer(serializers.Serializer): provider = serializers.CharField(required=False) code = serializers.CharField() redirect_uri = serializers.CharField(required=False) class OAuth1InputSerializer(serializers.Serializer): provider = serializers.CharField(required=False) oauth_token = serializers.CharField() oauth_token_secret = serializers.CharField() oauth_verifier = serializers.CharField() class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() exclude = ('is_staff', 'is_active', 'date_joined', 'password', 'last_login', 'user_permissions', 'groups', 'is_superuser',) class TokenSerializer(serializers.Serializer): token = serializers.SerializerMethodField() def get_token(self, obj): token, created = Token.objects.get_or_create(user=obj) return token.key class UserTokenSerializer(TokenSerializer, UserSerializer): pass class JWTSerializer(TokenSerializer): def get_token(self, obj): try: from rest_framework_jwt.settings import api_settings except ImportError: warnings.warn('djangorestframework-jwt must be installed for JWT authentication', ImportWarning) raise jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER payload = jwt_payload_handler(self.get_jwt_payload(obj)) token = jwt_encode_handler(payload) return token def get_jwt_payload(self, obj): """ Define here, what data shall be encoded in JWT. By default, entire object will be encoded. """ return obj class UserJWTSerializer(JWTSerializer, UserSerializer): pass
rest_social_auth/serializers.py
2,030
Define here, what data shall be encoded in JWT. By default, entire object will be encoded.
90
en
0.792898
# -*- coding: utf-8 -*- """Handle orders and pendingOrders endpoints.""" from .apirequest import APIRequest from .decorators import dyndoc_insert, endpoint from .responses.orders import responses from abc import abstractmethod class Orders(APIRequest): """Orders - abstract base class to handle the orders endpoints.""" ENDPOINT = "" METHOD = "GET" EXPECTED_STATUS = 0 @abstractmethod @dyndoc_insert(responses) def __init__(self, accountID, orderID=None): """Instantiate an Orders request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string id of the order to perform the request for. """ endpoint = self.ENDPOINT.format(accountID=accountID, orderID=orderID) super(Orders, self).__init__(endpoint, method=self.METHOD, expected_status=self.EXPECTED_STATUS) @endpoint("v3/accounts/{accountID}/orders", "POST", 201) class OrderCreate(Orders): """Create an Order for an Account.""" HEADERS = {"Content-Type": "application/json"} @dyndoc_insert(responses) def __init__(self, accountID, data): """Instantiate an OrderCreate request. Parameters ---------- accountID : string (required) id of the account to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_orders_create_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderCreate(accountID, data=data) >>> client.request(r) >>> print r.response :: {_v3_accounts_accountID_orders_create_resp} """ super(OrderCreate, self).__init__(accountID) self.data = data @endpoint("v3/accounts/{accountID}/orders") class OrderList(Orders): """Create an Order for an Account.""" @dyndoc_insert(responses) def __init__(self, accountID, params=None): """Instantiate an OrderList request. Parameters ---------- accountID : string (required) id of the account to perform the request on. params : dict optional request query parameters, check developer.oanda.com for details Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderList(accountID) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_orders_list_resp} """ super(OrderList, self).__init__(accountID) self.params = params @endpoint("v3/accounts/{accountID}/pendingOrders") class OrdersPending(Orders): """List all pending Orders in an Account.""" @dyndoc_insert(responses) def __init__(self, accountID): """Instantiate an OrdersPending request. Parameters ---------- accountID : string (required) id of the account to perform the request on. Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrdersPending(accountID) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_orders_pending_resp} """ super(OrdersPending, self).__init__(accountID) @endpoint("v3/accounts/{accountID}/orders/{orderID}") class OrderDetails(Orders): """Get details for a single Order in an Account.""" @dyndoc_insert(responses) def __init__(self, accountID, orderID): """Instantiate an OrderDetails request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderDetails(accountID=..., orderID=...) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_details_resp} """ super(OrderDetails, self).__init__(accountID, orderID) @endpoint("v3/accounts/{accountID}/orders/{orderID}", "PUT", 201) class OrderReplace(Orders): """OrderReplace. Replace an Order in an Account by simultaneously cancelling it and createing a replacement Order. """ HEADERS = {"Content-Type": "application/json"} @dyndoc_insert(responses) def __init__(self, accountID, orderID, data): """Instantiate an OrderReplace request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_order_replace_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> data = {_v3_accounts_accountID_order_replace_body} >>> r = orders.OrderReplace(accountID=..., orderID=..., data=data) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_replace_resp} """ super(OrderReplace, self).__init__(accountID, orderID) self.data = data @endpoint("v3/accounts/{accountID}/orders/{orderID}/cancel", "PUT") class OrderCancel(Orders): """Cancel a pending Order in an Account.""" @dyndoc_insert(responses) def __init__(self, accountID, orderID): """Instantiate an OrdersCancel request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the account to perform the request on. Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderCancel(accountID= ..., orderID=...) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_cancel_resp} """ super(OrderCancel, self).__init__(accountID, orderID) @endpoint("v3/accounts/{accountID}/orders/{orderID}/clientExtensions", "PUT") class OrderClientExtensions(Orders): """Update the Client Extensions for an Order in an Account. .. warning:: Do not set, modify or delete clientExtensions if your account is associated with MT4. """ HEADERS = {"Content-Type": "application/json"} @dyndoc_insert(responses) def __init__(self, accountID, orderID, data): """Instantiate an OrderCreate request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_order_clientextensions_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderClientExtensions(accountID, orderID, data=data) >>> client.request(r) >>> print r.response :: {_v3_accounts_accountID_order_clientextensions_resp} """ super(OrderClientExtensions, self).__init__(accountID, orderID) self.data = data
oandapyV20/endpoints/orders.py
8,188
Cancel a pending Order in an Account. Update the Client Extensions for an Order in an Account. .. warning:: Do not set, modify or delete clientExtensions if your account is associated with MT4. Create an Order for an Account. Get details for a single Order in an Account. Create an Order for an Account. OrderReplace. Replace an Order in an Account by simultaneously cancelling it and createing a replacement Order. Orders - abstract base class to handle the orders endpoints. List all pending Orders in an Account. Instantiate an Orders request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string id of the order to perform the request for. Instantiate an OrderCreate request. Parameters ---------- accountID : string (required) id of the account to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_orders_create_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderCreate(accountID, data=data) >>> client.request(r) >>> print r.response :: {_v3_accounts_accountID_orders_create_resp} Instantiate an OrderList request. Parameters ---------- accountID : string (required) id of the account to perform the request on. params : dict optional request query parameters, check developer.oanda.com for details Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderList(accountID) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_orders_list_resp} Instantiate an OrdersPending request. Parameters ---------- accountID : string (required) id of the account to perform the request on. Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrdersPending(accountID) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_orders_pending_resp} Instantiate an OrderDetails request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderDetails(accountID=..., orderID=...) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_details_resp} Instantiate an OrderReplace request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_order_replace_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> data = {_v3_accounts_accountID_order_replace_body} >>> r = orders.OrderReplace(accountID=..., orderID=..., data=data) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_replace_resp} Instantiate an OrdersCancel request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the account to perform the request on. Example:: >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderCancel(accountID= ..., orderID=...) >>> client.request(r) >>> print r.response Output:: {_v3_accounts_accountID_order_cancel_resp} Instantiate an OrderCreate request. Parameters ---------- accountID : string (required) id of the account to perform the request on. orderID : string (required) id of the order to perform the request on. data : JSON (required) json orderbody to send Orderbody example:: {_v3_accounts_accountID_order_clientextensions_body} >>> import oandapyV20 >>> import oandapyV20.endpoints.orders as orders >>> client = oandapyV20.API(access_token=...) >>> r = orders.OrderClientExtensions(accountID, orderID, data=data) >>> client.request(r) >>> print r.response :: {_v3_accounts_accountID_order_clientextensions_resp} Handle orders and pendingOrders endpoints. -*- coding: utf-8 -*-
4,539
en
0.525004
""" Copyright (c) 2019 Microsoft Corporation. All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import yaml import argparse import numpy as np import os import sys import time import json import pickle import torch as th import torch.nn as nn import horovod.torch as hvd #import pykaldi related modules import kaldi.fstext as kaldi_fst import kaldi.hmm as kaldi_hmm import kaldi.matrix as kaldi_matrix import kaldi.lat as kaldi_lat import kaldi.decoder as kaldi_decoder import kaldi.util as kaldi_util from kaldi.asr import MappedLatticeFasterRecognizer from kaldi.decoder import LatticeFasterDecoderOptions from data import SpeechDataset, SeqDataloader from models import LSTMStack, NnetAM from ops import ops from utils import utils def main(): parser = argparse.ArgumentParser() parser.add_argument("-config") parser.add_argument("-data", help="data yaml file") parser.add_argument("-data_path", default='', type=str, help="path of data files") parser.add_argument("-seed_model", help="the seed nerual network model") parser.add_argument("-exp_dir", help="the directory to save the outputs") parser.add_argument("-transform", help="feature transformation matrix or mvn statistics") parser.add_argument("-criterion", type=str, choices=["mmi", "mpfe", "smbr"], help="set the sequence training crtierion") parser.add_argument("-trans_model", help="the HMM transistion model, used for lattice generation") parser.add_argument("-prior_path", help="the prior for decoder, usually named as final.occs in kaldi setup") parser.add_argument("-den_dir", help="the decoding graph directory to find HCLG and words.txt files") parser.add_argument("-lr", type=float, help="set the learning rate") parser.add_argument("-ce_ratio", default=0.1, type=float, help="the ratio for ce regularization") parser.add_argument("-momentum", default=0, type=float, help="set the momentum") parser.add_argument("-batch_size", default=32, type=int, help="Override the batch size in the config") parser.add_argument("-data_loader_threads", default=0, type=int, help="number of workers for data loading") parser.add_argument("-max_grad_norm", default=5, type=float, help="max_grad_norm for gradient clipping") parser.add_argument("-sweep_size", default=100, type=float, help="process n hours of data per sweep (default:60)") parser.add_argument("-num_epochs", default=1, type=int, help="number of training epochs (default:1)") parser.add_argument('-print_freq', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('-save_freq', default=1000, type=int, metavar='N', help='save model frequency (default: 1000)') args = parser.parse_args() with open(args.config) as f: config = yaml.safe_load(f) config['data_path'] = args.data_path config["sweep_size"] = args.sweep_size print("pytorch version:{}".format(th.__version__)) with open(args.data) as f: data = yaml.safe_load(f) config["source_paths"] = [j for i, j in data['clean_source'].items()] print("Experiment starts with config {}".format(json.dumps(config, sort_keys=True, indent=4))) # Initialize Horovod hvd.init() th.cuda.set_device(hvd.local_rank()) print("Run experiments with world size {}".format(hvd.size())) dataset = SpeechDataset(config) transform=None if args.transform is not None and os.path.isfile(args.transform): with open(args.transform, 'rb') as f: transform = pickle.load(f) dataset.transform = transform train_dataloader = SeqDataloader(dataset, batch_size=args.batch_size, num_workers = args.data_loader_threads, distributed=True, test_only=False) print("Data loader set up successfully!") print("Number of minibatches: {}".format(len(train_dataloader))) if not os.path.isdir(args.exp_dir): os.makedirs(args.exp_dir) # ceate model model_config = config["model_config"] lstm = LSTMStack(model_config["feat_dim"], model_config["hidden_size"], model_config["num_layers"], model_config["dropout"], True) model = NnetAM(lstm, model_config["hidden_size"]*2, model_config["label_size"]) model.cuda() # setup the optimizer optimizer = th.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) # Broadcast parameters and opterimizer state from rank 0 to all other processes. hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) # Add Horovod Distributed Optimizer optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) if os.path.isfile(args.seed_model): checkpoint = th.load(args.seed_model) state_dict = checkpoint['model'] model.load_state_dict(state_dict) print("=> loaded checkpoint '{}' ".format(args.seed_model)) else: sys.stderr.write('ERROR: The model file %s does not exist!\n'%(model_file)) sys.exit(0) HCLG = args.den_dir + "/HCLG.fst" words_txt = args.den_dir + "/words.txt" silence_phones = args.den_dir + "/phones/silence.csl" if not os.path.isfile(HCLG): sys.stderr.write('ERROR: The HCLG file %s does not exist!\n'%(HCLG)) sys.exit(0) if not os.path.isfile(words_txt): sys.stderr.write('ERROR: The words.txt file %s does not exist!\n'%(words_txt)) sys.exit(0) if not os.path.isfile(silence_phones): sys.stderr.write('ERROR: The silence phone file %s does not exist!\n'%(silence_phones)) sys.exit(0) with open(silence_phones) as f: silence_ids = [int(i) for i in f.readline().strip().split(':')] f.close() if os.path.isfile(args.trans_model): trans_model = kaldi_hmm.TransitionModel() with kaldi_util.io.xopen(args.trans_model) as ki: trans_model.read(ki.stream(), ki.binary) else: sys.stderr.write('ERROR: The trans_model %s does not exist!\n'%(args.trans_model)) sys.exit(0) # now we can setup the decoder decoder_opts = LatticeFasterDecoderOptions() decoder_opts.beam = config["decoder_config"]["beam"] decoder_opts.lattice_beam = config["decoder_config"]["lattice_beam"] decoder_opts.max_active = config["decoder_config"]["max_active"] acoustic_scale = config["decoder_config"]["acoustic_scale"] decoder_opts.determinize_lattice = False #To produce raw state-level lattice instead of compact lattice asr_decoder = MappedLatticeFasterRecognizer.from_files( args.trans_model, HCLG, words_txt, acoustic_scale=acoustic_scale, decoder_opts=decoder_opts) prior = kaldi_util.io.read_matrix(args.prior_path).numpy() log_prior = th.tensor(np.log(prior[0]/np.sum(prior[0])), dtype=th.float) model.train() for epoch in range(args.num_epochs): run_train_epoch(model, optimizer, log_prior.cuda(), train_dataloader, epoch, asr_decoder, trans_model, silence_ids, args) # save model if hvd.rank() == 0: checkpoint={} checkpoint['model']=model.state_dict() checkpoint['optimizer']=optimizer.state_dict() checkpoint['epoch']=epoch output_file=args.exp_dir + '/model.se.'+ str(epoch) +'.tar' th.save(checkpoint, output_file) def run_train_epoch(model, optimizer, log_prior, dataloader, epoch, asr_decoder, trans_model, silence_ids, args): batch_time = utils.AverageMeter('Time', ':6.3f') losses = utils.AverageMeter('Loss', ':.4e') grad_norm = utils.AverageMeter('grad_norm', ':.4e') progress = utils.ProgressMeter(len(dataloader), batch_time, losses, grad_norm, prefix="Epoch: [{}]".format(epoch)) ce_criterion = nn.CrossEntropyLoss(ignore_index=-100, reduction='sum') if args.criterion == "mmi": se_criterion = ops.MMIFunction.apply else: se_criterion = ops.sMBRFunction.apply end = time.time() for i, batch in enumerate(dataloader, 0): feat = batch["x"] label = batch["y"] #pdf-ids for ce loss num_frs = batch["num_frs"] utt_ids = batch["utt_ids"] aux = batch["aux"] #trans_ids for se loss x = feat.to(th.float32) y = label.long() x = x.cuda() y = y.cuda() prediction = model(x) ce_loss = ce_criterion(prediction.view(-1, prediction.shape[2]), y.view(-1)) se_loss = 0.0 for j in range(len(num_frs)): log_like_j=prediction[j,:,:] log_like_j= log_like_j[:num_frs[j],:] log_like_j = log_like_j - log_prior #trans_id = label[j, :num_frs[j], 0].tolist() trans_id = th.from_numpy(aux[j][0][0].astype(int)).tolist() # print(len(trans_id), num_frs[j]) if args.criterion == "mmi": se_loss += se_criterion(log_like_j, asr_decoder, trans_model, trans_id) else: se_loss += se_criterion(log_like_j, asr_decoder, trans_model, trans_id, args.criterion, silence_ids) loss = se_loss.cuda() + args.ce_ratio * ce_loss optimizer.zero_grad() loss.backward() # Gradient Clipping (th 5.0) norm = nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() grad_norm.update(norm) # update loss tot_frs = np.array(num_frs).sum() losses.update(loss.item()/tot_frs) # measure elapsed time batch_time.update(time.time() - end) # save model if hvd.rank() == 0 and i % args.save_freq == 0: checkpoint={} checkpoint['model']=model.state_dict() checkpoint['optimizer']=optimizer.state_dict() output_file=args.exp_dir + '/model.se.'+ str(i) +'.tar' th.save(checkpoint, output_file) if hvd.rank() == 0 and i % args.print_freq == 0: progress.print(i) if __name__ == '__main__': main()
bin/train_se.py
12,991
Copyright (c) 2019 Microsoft Corporation. All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import pykaldi related modules Initialize Horovod ceate model setup the optimizer Broadcast parameters and opterimizer state from rank 0 to all other processes. Add Horovod Distributed Optimizer now we can setup the decoderTo produce raw state-level lattice instead of compact lattice save modelpdf-ids for ce losstrans_ids for se losstrans_id = label[j, :num_frs[j], 0].tolist() print(len(trans_id), num_frs[j]) Gradient Clipping (th 5.0) update loss measure elapsed time save model
2,269
en
0.790014
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Ftrl optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.framework import constant_op from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import ftrl from tensorflow.python.training import gradient_descent class FtrlOptimizerTest(XLATestCase): def initVariableAndGradient(self, dtype): var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.02, 0.04], dtype=dtype) return var0, var1, grads0, grads1 def equivAdagradTest_FtrlPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = ftrl.FtrlOptimizer( 3.0, learning_rate_power=-0.5, # using Adagrad learning rate initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], var0.eval()) self.assertAllClose([0.0, 0.0], var1.eval()) # Run Ftrl for a few steps for _ in range(steps): ftrl_update.run() return var0.eval(), var1.eval() def equivAdagradTest_AdagradPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], var0.eval()) self.assertAllClose([0.0, 0.0], var1.eval()) # Run Adagrad for a few steps for _ in range(steps): adagrad_update.run() return var0.eval(), var1.eval() def equivGradientDescentTest_FtrlPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = ftrl.FtrlOptimizer( 3.0, learning_rate_power=-0.0, # using Fixed learning rate initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], var0.eval()) self.assertAllClose([0.0, 0.0], var1.eval()) # Run Ftrl for a few steps for _ in range(steps): ftrl_update.run() return var0.eval(), var1.eval() def equivGradientDescentTest_GradientDescentPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd") sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], var0.eval()) self.assertAllClose([0.0, 0.0], var1.eval()) # Run GradientDescent for a few steps for _ in range(steps): sgd_update.run() return var0.eval(), var1.eval() def testFtrlwithoutRegularization(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], var0.eval()) self.assertAllClose([0.0, 0.0], var1.eval()) # Run 3 steps FTRL for _ in range(3): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-2.60260963, -4.29698515]), var0.eval()) self.assertAllCloseAccordingToType( np.array([-0.28432083, -0.56694895]), var1.eval()) def testFtrlwithoutRegularization2(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([4.0, 3.0], var1.eval()) # Run 3 steps FTRL for _ in range(3): ftrl_update.run() # Validate updated params self.assertAllClose( np.array([-2.55607247, -3.98729396]), var0.eval(), 1e-5, 1e-5) self.assertAllClose( np.array([-0.28232238, -0.56096673]), var1.eval(), 1e-5, 1e-5) def testFtrlWithL1(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([4.0, 3.0], var1.eval()) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllClose(np.array([-7.66718769, -10.91273689]), var0.eval()) self.assertAllClose(np.array([-0.93460727, -1.86147261]), var1.eval()) def testFtrlWithL1_L2(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([4.0, 3.0], var1.eval()) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllClose(np.array([-0.24059935, -0.46829352]), var0.eval()) self.assertAllClose(np.array([-0.02406147, -0.04830509]), var1.eval()) def testFtrlWithL1_L2_L2Shrinkage(self): """Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. """ for dtype in self.float_types: with self.test_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([4.0, 3.0], var1.eval()) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllClose(np.array([-0.21931979, -0.40642974]), var0.eval()) self.assertAllClose(np.array([-0.0282721, -0.07188385]), var1.eval()) # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential # with Adagrad. # So, basing on these two properties, we test if our implementation of # FTRL-Proximal performs same updates as Adagrad or GradientDescent. def testEquivAdagradwithoutRegularization(self): steps = 5 for dtype in self.float_types: with self.test_session(), self.test_scope(): val0, val1 = self.equivAdagradTest_FtrlPart(steps, dtype) with self.test_session(), self.test_scope(): val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype) self.assertAllClose(val0, val2) self.assertAllClose(val1, val3) def testEquivGradientDescentwithoutRegularization(self): steps = 5 for dtype in self.float_types: with self.test_session(), self.test_scope(): val0, val1 = self.equivGradientDescentTest_FtrlPart(steps, dtype) with self.test_session(), self.test_scope(): val2, val3 = self.equivGradientDescentTest_GradientDescentPart( steps, dtype) self.assertAllClose(val0, val2) self.assertAllClose(val1, val3) if __name__ == "__main__": test.main()
tensorflow/compiler/tests/ftrl_test.py
11,940
Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. Tests for Ftrl optimizer. Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== using Adagrad learning rate Fetch params to validate initial values Run Ftrl for a few steps Fetch params to validate initial values Run Adagrad for a few steps using Fixed learning rate Fetch params to validate initial values Run Ftrl for a few steps Fetch params to validate initial values Run GradientDescent for a few steps Fetch params to validate initial values Run 3 steps FTRL Validate updated params Fetch params to validate initial values Run 3 steps FTRL Validate updated params Fetch params to validate initial values Run 10 steps FTRL Validate updated params Fetch params to validate initial values Run 10 steps FTRL Validate updated params Fetch params to validate initial values Run 10 steps FTRL Validate updated params When variables are initialized with Zero, FTRL-Proximal has two properties: 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical with GradientDescent. 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential with Adagrad. So, basing on these two properties, we test if our implementation of FTRL-Proximal performs same updates as Adagrad or GradientDescent.
2,093
en
0.741843
# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from neutron.agent.common import config from neutron.agent.linux import external_process from neutron.agent.linux import keepalived from neutron.openstack.common import log as logging from neutron.tests.functional import base as functional_base from neutron.tests.unit.agent.linux import test_keepalived LOG = logging.getLogger(__name__) class KeepalivedManagerTestCase(functional_base.BaseSudoTestCase, test_keepalived.KeepalivedConfBaseMixin): def setUp(self): super(KeepalivedManagerTestCase, self).setUp() self.check_sudo_enabled() self._configure() def _configure(self): cfg.CONF.set_override('debug', True) config.setup_logging() config.register_root_helper(cfg.CONF) cfg.CONF.set_override('root_helper', self.root_helper, group='AGENT') def test_keepalived_spawn(self): expected_config = self._get_config() manager = keepalived.KeepalivedManager('router1', expected_config, conf_path=cfg.CONF.state_path, root_helper=self.root_helper) self.addCleanup(manager.disable) manager.spawn() process = external_process.ProcessManager( cfg.CONF, 'router1', self.root_helper, namespace=None, pids_path=cfg.CONF.state_path) self.assertTrue(process.active) config_path = manager._get_full_config_file_path('keepalived.conf') with open(config_path, 'r') as config_file: config_contents = config_file.read() self.assertEqual(expected_config.get_config_str(), config_contents)
neutron/tests/functional/agent/linux/test_keepalived.py
2,376
Copyright (c) 2014 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
603
en
0.868476
from pyquilted.quilted.section import Section class Work(Section): """The work section in a quilted resume The work object is a complex section. It contains blocks of jobs and optionally a list of slugs. As a section it mixes in the sectionable functionality. """ def __init__(self, blocks=None, slugs=None, icon=None): self.label = 'Work' self.icon = icon or 'fa-briefcase' self.blocks = blocks or [] self.compact = False def add_job(self, job): self.blocks.append(vars(job)) def add_slugs(self, slugs): self.slugs = slugs class Job: """The job block in the work section""" def __init__(self, dates=None, location=None, company=None, title=None, slugs=None, previously=None, **kwargs): self.dates = dates self.location = location self.company = company self.title = title self.slugs = slugs self.history = History(previously=previously).to_dict() class Slugs(): """The additional list of slugs in the work section""" def __init__(self, slugs=None): self.blocks = slugs class History(): def __init__(self, previously=None): self.previously = previously def to_dict(self): if self.previously: return vars(self) return None
pyquilted/quilted/work.py
1,356
The job block in the work section The additional list of slugs in the work section The work section in a quilted resume The work object is a complex section. It contains blocks of jobs and optionally a list of slugs. As a section it mixes in the sectionable functionality.
273
en
0.942622
# Generated by Django 2.2 on 2019-05-02 16:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('openbook_communities', '0021_auto_20190502_1754'), ] operations = [ migrations.AddIndex( model_name='communitymembership', index=models.Index(fields=['community', 'user'], name='openbook_co_communi_59b23c_idx'), ), ]
openbook_communities/migrations/0022_auto_20190502_1804.py
431
Generated by Django 2.2 on 2019-05-02 16:04
43
en
0.616323
"""Collect and parse kobo forms.""" from datetime import datetime, timedelta, timezone from os import getenv from typing import Dict, List from dateutil.parser import parse as dtparser from flask import request import requests from werkzeug.exceptions import BadRequest, InternalServerError, NotFound def get_kobo_params(): """Collect and validate request parameters and environment variables.""" kobo_username = getenv('KOBO_USERNAME') if kobo_username is None: raise InternalServerError('Missing backend parameter: KOBO_USERNAME') kobo_pw = getenv('KOBO_PW') if kobo_pw is None: raise InternalServerError('Missing backend parameter: KOBO_PW') form_name = request.args.get('formName') if form_name is None: raise BadRequest('Missing query parameter: formName') datetime_field = request.args.get('datetimeField') if datetime_field is None: raise BadRequest('Missing parameter datetimeField') geom_field = request.args.get('geomField') if geom_field is None: raise BadRequest('Missing parameter: geomField') filters = {} filters_params = request.args.get('filters', None) if filters_params is not None: filters = dict([f.split('=') for f in filters_params.split(',')]) form_fields = dict(name=form_name, datetime=datetime_field, geom=geom_field, filters=filters) auth = (kobo_username, kobo_pw) return auth, form_fields def parse_form_field(value: str, field_type: str): """Parse strings into type according to field_type provided.""" if field_type == 'decimal': return float(value) elif field_type == 'integer': return int(value) elif field_type in ('datetime', 'date'): return dtparser(value).astimezone(timezone.utc) elif field_type == 'geopoint': lat, lon, _, _ = value.split(' ') return {'lat': float(lat), 'lon': float(lon)} else: return value def parse_form_response(form_dict: Dict[str, str], form_fields: Dict[str, str], labels: List[str]): """Transform a Kobo form dictionary into a format that is used by the frontend.""" form_data = {k: parse_form_field(form_dict.get(k), v) for k, v in labels.items() if k not in (form_fields.get('geom'), form_fields.get('datetime'))} datetime_field = form_fields.get('datetime') datetime_value = parse_form_field(form_dict.get(datetime_field), labels.get(datetime_field)) geom_field = form_fields.get('geom') latlon_dict = parse_form_field(form_dict.get(geom_field), labels.get(geom_field)) status = form_dict.get('_validation_status').get('label', None) form_data = {**form_data, **latlon_dict, 'date': datetime_value, 'status': status} return form_data def parse_datetime_params(): """Transform into datetime objects used for filtering form responses.""" begin_datetime_str = request.args.get('beginDateTime', '2000-01-01') begin_datetime = dtparser(begin_datetime_str).replace(tzinfo=timezone.utc) end_datetime_str = request.args.get('endDateTime') if end_datetime_str is not None: end_datetime = dtparser(end_datetime_str) else: # 10 years. end_datetime = datetime.now() + timedelta(days=365 * 10) end_datetime = end_datetime.replace(tzinfo=timezone.utc) # strptime function includes hours, minutes, and seconds as 00 by default. # This check is done in case the begin and end datetime values are the same. if end_datetime == begin_datetime: end_datetime = end_datetime + timedelta(days=1) if begin_datetime > end_datetime: raise BadRequest('beginDateTime value must be lower than endDateTime') return begin_datetime, end_datetime def get_responses_from_kobo(auth, form_name): """ Request kobo api to collect all the information related to a form. Also, retrieve the form responses for parsing and filtering. """ form_url = request.args.get('koboUrl') if form_url is None: raise BadRequest('Missing parameter koboUrl') resp = requests.get(form_url, auth=auth) resp.raise_for_status() kobo_user_metadata = resp.json() # Find form and get results. forms_iterator = (d for d in kobo_user_metadata.get('results') if d.get('name') == form_name) form_metadata = next(forms_iterator, None) if form_metadata is None: raise NotFound('Form not found') # Additional request to get label mappings. resp = requests.get(form_metadata.get('url'), auth=auth) resp.raise_for_status() form_metadata = resp.json() # Get form fields and field type used for parsing. form_labels = {f.get('$autoname'): f.get('type') for f in form_metadata.get('content').get('survey')} # Get all form responses using metadata 'data' key resp = requests.get(form_metadata.get('data'), auth=auth) resp.raise_for_status() form_responses = resp.json().get('results') return form_responses, form_labels def get_form_responses(begin_datetime, end_datetime): """Get all form responses using Kobo api.""" auth, form_fields = get_kobo_params() form_responses, form_labels = get_responses_from_kobo(auth, form_fields.get('name')) forms = [parse_form_response(f, form_fields, form_labels) for f in form_responses] filtered_forms = [] for form in forms: date_value = form.get('date') conditions = [form.get(k) == v for k, v in form_fields.get('filters').items()] conditions.append(begin_datetime <= date_value) conditions.append(date_value < end_datetime) if all(conditions) is False: continue filtered_forms.append(form) sorted_forms = sorted(filtered_forms, key=lambda x: x.get('date')) # Transform date into string. sorted_forms = [{**f, 'date': f.get('date').date().isoformat()} for f in sorted_forms] return sorted_forms
api-flask/app/kobo.py
6,008
Get all form responses using Kobo api. Collect and validate request parameters and environment variables. Request kobo api to collect all the information related to a form. Also, retrieve the form responses for parsing and filtering. Transform into datetime objects used for filtering form responses. Parse strings into type according to field_type provided. Transform a Kobo form dictionary into a format that is used by the frontend. Collect and parse kobo forms. 10 years. strptime function includes hours, minutes, and seconds as 00 by default. This check is done in case the begin and end datetime values are the same. Find form and get results. Additional request to get label mappings. Get form fields and field type used for parsing. Get all form responses using metadata 'data' key Transform date into string.
821
en
0.846691
import logging import ldap import six from collections import Mapping, Iterable from ldap import modlist from nodeconductor.structure import ServiceBackend, ServiceBackendError logger = logging.getLogger(__name__) class LDAPBackendError(ServiceBackendError): pass class UnauthorizedError(LDAPBackendError): pass class LDAPBackend(ServiceBackend): """ Interface to LDAP API. https://www.python-ldap.org/doc/html/ """ def __init__(self, settings): self.settings = settings self.user_base_dn = settings.options.get('user_base_dn', '') self.client = self._get_client() def _get_client(self): username = ','.join(['uid=%s' % self.settings.username, self.user_base_dn]) try: client = ldap.initialize(self.settings.backend_url) client.simple_bind_s(username, self.settings.password) except ldap.LDAPError as e: six.reraise(UnauthorizedError, e) return client def ping(self, raise_exception=False): tries_count = 3 for _ in range(tries_count): try: self.client.search_s(self.user_base_dn, ldap.SCOPE_SUBTREE) except ldap.LDAPError as e: if raise_exception: six.reraise(LDAPBackendError, e) else: return True return False def sync(self): pass def create_ldap_user(self, ldap_user): dn = ('uid=%s,' % ldap_user.name) + self.user_base_dn # python-ldap rises TypeError if unicode strings are used. data = modlist.addModlist(self._unicode_to_string(ldap_user.attributes)) try: self.client.add_s(dn, data) except ldap.LDAPError as e: six.reraise(LDAPBackendError, e) else: ldap_user.backend_id = dn ldap_user.save(update_fields=['backend_id']) def delete_ldap_user(self, ldap_user): dn = ldap_user.backend_id try: # XXX: Change to disabling user instead self.client.delete_s(dn) except ldap.LDAPError as e: six.reraise(LDAPBackendError, e) def _unicode_to_string(self, data): # http://stackoverflow.com/a/1254499/4591416 if isinstance(data, basestring): return str(data) elif isinstance(data, Mapping): return dict(map(self._unicode_to_string, data.iteritems())) elif isinstance(data, Iterable): return type(data)(map(self._unicode_to_string, data)) else: return data
src/nodeconductor_ldap/backend.py
2,599
Interface to LDAP API. https://www.python-ldap.org/doc/html/ python-ldap rises TypeError if unicode strings are used. XXX: Change to disabling user instead http://stackoverflow.com/a/1254499/4591416
200
en
0.509737
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ import flow_action_list class flow_id(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-openflow-operational - based on the path /openflow-state/flow-id. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__flow_id','__priority','__status','__in_port','__in_vlan','__source_mac','__destination_mac','__ether_type','__ip_protocol','__ip_protocol_source_port','__ip_protocol_destination_port','__source_ip','__destination_ip','__source_ipv6','__destination_ipv6','__instructions','__action_data','__meter_id','__vlan_upbits','__nw_tos','__source_ip_mask','__destination_ip_mask','__total_packets','__total_bytes','__flow_action_list',) _yang_name = 'flow-id' _rest_name = 'flow-id' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__ether_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__destination_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) self.__ip_protocol_destination_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__action_data = YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__ip_protocol_source_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__priority = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__total_packets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) self.__source_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__flow_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__destination_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__in_port = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__status = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False) self.__destination_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__ip_protocol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__flow_action_list = YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False) self.__source_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__nw_tos = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__meter_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) self.__instructions = YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__in_vlan = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__source_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__source_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__destination_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) self.__vlan_upbits = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'openflow-state', u'flow-id'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'openflow-state', u'flow-id'] def _get_flow_id(self): """ Getter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32) YANG Description: Flow ID """ return self.__flow_id def _set_flow_id(self, v, load=False): """ Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_flow_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow_id() directly. YANG Description: Flow ID """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """flow_id must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__flow_id = t if hasattr(self, '_set'): self._set() def _unset_flow_id(self): self.__flow_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_priority(self): """ Getter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32) YANG Description: Priority """ return self.__priority def _set_priority(self, v, load=False): """ Setter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: Priority """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """priority must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__priority = t if hasattr(self, '_set'): self._set() def _unset_priority(self): self.__priority = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_status(self): """ Getter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status) YANG Description: Status """ return self.__status def _set_status(self, v, load=False): """ Setter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status) If this variable is read-only (config: false) in the source YANG file, then _set_status is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_status() directly. YANG Description: Status """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """status must be of a type compatible with flow-status""", 'defined-type': "brocade-openflow-operational:flow-status", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False)""", }) self.__status = t if hasattr(self, '_set'): self._set() def _unset_status(self): self.__status = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False) def _get_in_port(self): """ Getter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string) YANG Description: In Port """ return self.__in_port def _set_in_port(self, v, load=False): """ Setter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string) If this variable is read-only (config: false) in the source YANG file, then _set_in_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_port() directly. YANG Description: In Port """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_port must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__in_port = t if hasattr(self, '_set'): self._set() def _unset_in_port(self): self.__in_port = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_in_vlan(self): """ Getter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string) YANG Description: In Vlan """ return self.__in_vlan def _set_in_vlan(self, v, load=False): """ Setter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string) If this variable is read-only (config: false) in the source YANG file, then _set_in_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_vlan() directly. YANG Description: In Vlan """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """in_vlan must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__in_vlan = t if hasattr(self, '_set'): self._set() def _unset_in_vlan(self): self.__in_vlan = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_source_mac(self): """ Getter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string) YANG Description: Source Mac """ return self.__source_mac def _set_source_mac(self, v, load=False): """ Setter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_mac() directly. YANG Description: Source Mac """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """source_mac must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__source_mac = t if hasattr(self, '_set'): self._set() def _unset_source_mac(self): self.__source_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_destination_mac(self): """ Getter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string) YANG Description: Destination Mac """ return self.__destination_mac def _set_destination_mac(self, v, load=False): """ Setter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_mac() directly. YANG Description: Destination Mac """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """destination_mac must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__destination_mac = t if hasattr(self, '_set'): self._set() def _unset_destination_mac(self): self.__destination_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_ether_type(self): """ Getter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string) YANG Description: Ether type """ return self.__ether_type def _set_ether_type(self, v, load=False): """ Setter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string) If this variable is read-only (config: false) in the source YANG file, then _set_ether_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ether_type() directly. YANG Description: Ether type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ether_type must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__ether_type = t if hasattr(self, '_set'): self._set() def _unset_ether_type(self): self.__ether_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_ip_protocol(self): """ Getter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32) YANG Description: IP Protocol """ return self.__ip_protocol def _set_ip_protocol(self, v, load=False): """ Setter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol() directly. YANG Description: IP Protocol """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ip_protocol must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__ip_protocol = t if hasattr(self, '_set'): self._set() def _unset_ip_protocol(self): self.__ip_protocol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_ip_protocol_source_port(self): """ Getter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32) YANG Description: IP Protocol Source Port """ return self.__ip_protocol_source_port def _set_ip_protocol_source_port(self, v, load=False): """ Setter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol_source_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol_source_port() directly. YANG Description: IP Protocol Source Port """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ip_protocol_source_port must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__ip_protocol_source_port = t if hasattr(self, '_set'): self._set() def _unset_ip_protocol_source_port(self): self.__ip_protocol_source_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_ip_protocol_destination_port(self): """ Getter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32) YANG Description: IP Protocol Destination Port """ return self.__ip_protocol_destination_port def _set_ip_protocol_destination_port(self, v, load=False): """ Setter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol_destination_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol_destination_port() directly. YANG Description: IP Protocol Destination Port """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ip_protocol_destination_port must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__ip_protocol_destination_port = t if hasattr(self, '_set'): self._set() def _unset_ip_protocol_destination_port(self): self.__ip_protocol_destination_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_source_ip(self): """ Getter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string) YANG Description: Source IPv4 """ return self.__source_ip def _set_source_ip(self, v, load=False): """ Setter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ip() directly. YANG Description: Source IPv4 """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """source_ip must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__source_ip = t if hasattr(self, '_set'): self._set() def _unset_source_ip(self): self.__source_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_destination_ip(self): """ Getter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string) YANG Description: Destination IPv4 """ return self.__destination_ip def _set_destination_ip(self, v, load=False): """ Setter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ip() directly. YANG Description: Destination IPv4 """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """destination_ip must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__destination_ip = t if hasattr(self, '_set'): self._set() def _unset_destination_ip(self): self.__destination_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_source_ipv6(self): """ Getter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string) YANG Description: Source IPv6 Address """ return self.__source_ipv6 def _set_source_ipv6(self, v, load=False): """ Setter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ipv6 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ipv6() directly. YANG Description: Source IPv6 Address """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """source_ipv6 must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__source_ipv6 = t if hasattr(self, '_set'): self._set() def _unset_source_ipv6(self): self.__source_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_destination_ipv6(self): """ Getter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string) YANG Description: Destination IPv6 Address """ return self.__destination_ipv6 def _set_destination_ipv6(self, v, load=False): """ Setter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ipv6 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ipv6() directly. YANG Description: Destination IPv6 Address """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """destination_ipv6 must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__destination_ipv6 = t if hasattr(self, '_set'): self._set() def _unset_destination_ipv6(self): self.__destination_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_instructions(self): """ Getter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string) YANG Description: Instructions """ return self.__instructions def _set_instructions(self, v, load=False): """ Setter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string) If this variable is read-only (config: false) in the source YANG file, then _set_instructions is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_instructions() directly. YANG Description: Instructions """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """instructions must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__instructions = t if hasattr(self, '_set'): self._set() def _unset_instructions(self): self.__instructions = YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_action_data(self): """ Getter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string) YANG Description: Action """ return self.__action_data def _set_action_data(self, v, load=False): """ Setter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string) If this variable is read-only (config: false) in the source YANG file, then _set_action_data is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_action_data() directly. YANG Description: Action """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """action_data must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__action_data = t if hasattr(self, '_set'): self._set() def _unset_action_data(self): self.__action_data = YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_meter_id(self): """ Getter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32) YANG Description: Meter id """ return self.__meter_id def _set_meter_id(self, v, load=False): """ Setter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_meter_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_meter_id() directly. YANG Description: Meter id """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """meter_id must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__meter_id = t if hasattr(self, '_set'): self._set() def _unset_meter_id(self): self.__meter_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_vlan_upbits(self): """ Getter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32) YANG Description: Vlan Priority """ return self.__vlan_upbits def _set_vlan_upbits(self, v, load=False): """ Setter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_upbits is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_upbits() directly. YANG Description: Vlan Priority """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan_upbits must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__vlan_upbits = t if hasattr(self, '_set'): self._set() def _unset_vlan_upbits(self): self.__vlan_upbits = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_nw_tos(self): """ Getter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32) YANG Description: IP DSCP """ return self.__nw_tos def _set_nw_tos(self, v, load=False): """ Setter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_nw_tos is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nw_tos() directly. YANG Description: IP DSCP """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """nw_tos must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""", }) self.__nw_tos = t if hasattr(self, '_set'): self._set() def _unset_nw_tos(self): self.__nw_tos = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False) def _get_source_ip_mask(self): """ Getter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string) YANG Description: Source IPv4 Mask """ return self.__source_ip_mask def _set_source_ip_mask(self, v, load=False): """ Setter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ip_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ip_mask() directly. YANG Description: Source IPv4 Mask """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """source_ip_mask must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__source_ip_mask = t if hasattr(self, '_set'): self._set() def _unset_source_ip_mask(self): self.__source_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_destination_ip_mask(self): """ Getter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string) YANG Description: Destination IPv4 Mask """ return self.__destination_ip_mask def _set_destination_ip_mask(self, v, load=False): """ Setter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ip_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ip_mask() directly. YANG Description: Destination IPv4 Mask """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """destination_ip_mask must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""", }) self.__destination_ip_mask = t if hasattr(self, '_set'): self._set() def _unset_destination_ip_mask(self): self.__destination_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False) def _get_total_packets(self): """ Getter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64) YANG Description: Total Packets """ return self.__total_packets def _set_total_packets(self, v, load=False): """ Setter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_total_packets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_total_packets() directly. YANG Description: Total Packets """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """total_packets must be of a type compatible with uint64""", 'defined-type': "uint64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)""", }) self.__total_packets = t if hasattr(self, '_set'): self._set() def _unset_total_packets(self): self.__total_packets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) def _get_total_bytes(self): """ Getter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64) YANG Description: Total Bytes """ return self.__total_bytes def _set_total_bytes(self, v, load=False): """ Setter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_total_bytes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_total_bytes() directly. YANG Description: Total Bytes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """total_bytes must be of a type compatible with uint64""", 'defined-type': "uint64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)""", }) self.__total_bytes = t if hasattr(self, '_set'): self._set() def _unset_total_bytes(self): self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False) def _get_flow_action_list(self): """ Getter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list) YANG Description: Details of an action """ return self.__flow_action_list def _set_flow_action_list(self, v, load=False): """ Setter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_flow_action_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow_action_list() directly. YANG Description: Details of an action """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """flow_action_list must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False)""", }) self.__flow_action_list = t if hasattr(self, '_set'): self._set() def _unset_flow_action_list(self): self.__flow_action_list = YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False) flow_id = __builtin__.property(_get_flow_id) priority = __builtin__.property(_get_priority) status = __builtin__.property(_get_status) in_port = __builtin__.property(_get_in_port) in_vlan = __builtin__.property(_get_in_vlan) source_mac = __builtin__.property(_get_source_mac) destination_mac = __builtin__.property(_get_destination_mac) ether_type = __builtin__.property(_get_ether_type) ip_protocol = __builtin__.property(_get_ip_protocol) ip_protocol_source_port = __builtin__.property(_get_ip_protocol_source_port) ip_protocol_destination_port = __builtin__.property(_get_ip_protocol_destination_port) source_ip = __builtin__.property(_get_source_ip) destination_ip = __builtin__.property(_get_destination_ip) source_ipv6 = __builtin__.property(_get_source_ipv6) destination_ipv6 = __builtin__.property(_get_destination_ipv6) instructions = __builtin__.property(_get_instructions) action_data = __builtin__.property(_get_action_data) meter_id = __builtin__.property(_get_meter_id) vlan_upbits = __builtin__.property(_get_vlan_upbits) nw_tos = __builtin__.property(_get_nw_tos) source_ip_mask = __builtin__.property(_get_source_ip_mask) destination_ip_mask = __builtin__.property(_get_destination_ip_mask) total_packets = __builtin__.property(_get_total_packets) total_bytes = __builtin__.property(_get_total_bytes) flow_action_list = __builtin__.property(_get_flow_action_list) _pyangbind_elements = {'flow_id': flow_id, 'priority': priority, 'status': status, 'in_port': in_port, 'in_vlan': in_vlan, 'source_mac': source_mac, 'destination_mac': destination_mac, 'ether_type': ether_type, 'ip_protocol': ip_protocol, 'ip_protocol_source_port': ip_protocol_source_port, 'ip_protocol_destination_port': ip_protocol_destination_port, 'source_ip': source_ip, 'destination_ip': destination_ip, 'source_ipv6': source_ipv6, 'destination_ipv6': destination_ipv6, 'instructions': instructions, 'action_data': action_data, 'meter_id': meter_id, 'vlan_upbits': vlan_upbits, 'nw_tos': nw_tos, 'source_ip_mask': source_ip_mask, 'destination_ip_mask': destination_ip_mask, 'total_packets': total_packets, 'total_bytes': total_bytes, 'flow_action_list': flow_action_list, }
pybind/slxos/v17r_1_01a/openflow_state/flow_id/__init__.py
74,798
This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-openflow-operational - based on the path /openflow-state/flow-id. Each member element of the container is represented as a class variable - with a specific YANG type. Getter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string) YANG Description: Action Getter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string) YANG Description: Destination IPv4 Getter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string) YANG Description: Destination IPv4 Mask Getter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string) YANG Description: Destination IPv6 Address Getter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string) YANG Description: Destination Mac Getter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string) YANG Description: Ether type Getter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list) YANG Description: Details of an action Getter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32) YANG Description: Flow ID Getter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string) YANG Description: In Port Getter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string) YANG Description: In Vlan Getter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string) YANG Description: Instructions Getter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32) YANG Description: IP Protocol Getter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32) YANG Description: IP Protocol Destination Port Getter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32) YANG Description: IP Protocol Source Port Getter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32) YANG Description: Meter id Getter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32) YANG Description: IP DSCP Getter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32) YANG Description: Priority Getter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string) YANG Description: Source IPv4 Getter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string) YANG Description: Source IPv4 Mask Getter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string) YANG Description: Source IPv6 Address Getter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string) YANG Description: Source Mac Getter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status) YANG Description: Status Getter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64) YANG Description: Total Bytes Getter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64) YANG Description: Total Packets Getter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32) YANG Description: Vlan Priority Setter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string) If this variable is read-only (config: false) in the source YANG file, then _set_action_data is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_action_data() directly. YANG Description: Action Setter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ip() directly. YANG Description: Destination IPv4 Setter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ip_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ip_mask() directly. YANG Description: Destination IPv4 Mask Setter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_ipv6 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_ipv6() directly. YANG Description: Destination IPv6 Address Setter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string) If this variable is read-only (config: false) in the source YANG file, then _set_destination_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_destination_mac() directly. YANG Description: Destination Mac Setter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string) If this variable is read-only (config: false) in the source YANG file, then _set_ether_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ether_type() directly. YANG Description: Ether type Setter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list) If this variable is read-only (config: false) in the source YANG file, then _set_flow_action_list is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow_action_list() directly. YANG Description: Details of an action Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_flow_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow_id() directly. YANG Description: Flow ID Setter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string) If this variable is read-only (config: false) in the source YANG file, then _set_in_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_port() directly. YANG Description: In Port Setter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string) If this variable is read-only (config: false) in the source YANG file, then _set_in_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_vlan() directly. YANG Description: In Vlan Setter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string) If this variable is read-only (config: false) in the source YANG file, then _set_instructions is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_instructions() directly. YANG Description: Instructions Setter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol() directly. YANG Description: IP Protocol Setter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol_destination_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol_destination_port() directly. YANG Description: IP Protocol Destination Port Setter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ip_protocol_source_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_protocol_source_port() directly. YANG Description: IP Protocol Source Port Setter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_meter_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_meter_id() directly. YANG Description: Meter id Setter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_nw_tos is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nw_tos() directly. YANG Description: IP DSCP Setter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: Priority Setter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ip() directly. YANG Description: Source IPv4 Setter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ip_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ip_mask() directly. YANG Description: Source IPv4 Mask Setter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_ipv6 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_ipv6() directly. YANG Description: Source IPv6 Address Setter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string) If this variable is read-only (config: false) in the source YANG file, then _set_source_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source_mac() directly. YANG Description: Source Mac Setter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status) If this variable is read-only (config: false) in the source YANG file, then _set_status is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_status() directly. YANG Description: Status Setter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_total_bytes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_total_bytes() directly. YANG Description: Total Bytes Setter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_total_packets is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_total_packets() directly. YANG Description: Total Packets Setter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_upbits is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_upbits() directly. YANG Description: Vlan Priority
13,014
en
0.532737
# coding: utf-8 """ Gitea API. This documentation describes the Gitea API. # noqa: E501 OpenAPI spec version: 1.16.7 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class WikiCommit(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'author': 'CommitUser', 'commiter': 'CommitUser', 'message': 'str', 'sha': 'str' } attribute_map = { 'author': 'author', 'commiter': 'commiter', 'message': 'message', 'sha': 'sha' } def __init__(self, author=None, commiter=None, message=None, sha=None): # noqa: E501 """WikiCommit - a model defined in Swagger""" # noqa: E501 self._author = None self._commiter = None self._message = None self._sha = None self.discriminator = None if author is not None: self.author = author if commiter is not None: self.commiter = commiter if message is not None: self.message = message if sha is not None: self.sha = sha @property def author(self): """Gets the author of this WikiCommit. # noqa: E501 :return: The author of this WikiCommit. # noqa: E501 :rtype: CommitUser """ return self._author @author.setter def author(self, author): """Sets the author of this WikiCommit. :param author: The author of this WikiCommit. # noqa: E501 :type: CommitUser """ self._author = author @property def commiter(self): """Gets the commiter of this WikiCommit. # noqa: E501 :return: The commiter of this WikiCommit. # noqa: E501 :rtype: CommitUser """ return self._commiter @commiter.setter def commiter(self, commiter): """Sets the commiter of this WikiCommit. :param commiter: The commiter of this WikiCommit. # noqa: E501 :type: CommitUser """ self._commiter = commiter @property def message(self): """Gets the message of this WikiCommit. # noqa: E501 :return: The message of this WikiCommit. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this WikiCommit. :param message: The message of this WikiCommit. # noqa: E501 :type: str """ self._message = message @property def sha(self): """Gets the sha of this WikiCommit. # noqa: E501 :return: The sha of this WikiCommit. # noqa: E501 :rtype: str """ return self._sha @sha.setter def sha(self, sha): """Sets the sha of this WikiCommit. :param sha: The sha of this WikiCommit. # noqa: E501 :type: str """ self._sha = sha def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(WikiCommit, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, WikiCommit): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
gitea_api/models/wiki_commit.py
4,851
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Returns true if both objects are equal WikiCommit - a model defined in Swagger Returns true if both objects are not equal For `print` and `pprint` Gets the author of this WikiCommit. # noqa: E501 :return: The author of this WikiCommit. # noqa: E501 :rtype: CommitUser Sets the author of this WikiCommit. :param author: The author of this WikiCommit. # noqa: E501 :type: CommitUser Gets the commiter of this WikiCommit. # noqa: E501 :return: The commiter of this WikiCommit. # noqa: E501 :rtype: CommitUser Sets the commiter of this WikiCommit. :param commiter: The commiter of this WikiCommit. # noqa: E501 :type: CommitUser Gets the message of this WikiCommit. # noqa: E501 :return: The message of this WikiCommit. # noqa: E501 :rtype: str Sets the message of this WikiCommit. :param message: The message of this WikiCommit. # noqa: E501 :type: str Gets the sha of this WikiCommit. # noqa: E501 :return: The sha of this WikiCommit. # noqa: E501 :rtype: str Sets the sha of this WikiCommit. :param sha: The sha of this WikiCommit. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model Gitea API. This documentation describes the Gitea API. # noqa: E501 OpenAPI spec version: 1.16.7 Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8 noqa: F401 noqa: E501 noqa: E501
1,490
en
0.525979
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['DataCollectionRule'] class DataCollectionRule(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, data_collection_rule_name: Optional[pulumi.Input[str]] = None, data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None, data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None, description: Optional[pulumi.Input[str]] = None, destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ Definition of ARM tracked top level resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows. :param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. :param pulumi.Input[str] description: Description of the data collection rule. :param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations. :param pulumi.Input[str] location: The geo-location where the resource lives. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['data_collection_rule_name'] = data_collection_rule_name if data_flows is None and not opts.urn: raise TypeError("Missing required property 'data_flows'") __props__['data_flows'] = data_flows __props__['data_sources'] = data_sources __props__['description'] = description if destinations is None and not opts.urn: raise TypeError("Missing required property 'destinations'") __props__['destinations'] = destinations __props__['location'] = location if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags __props__['etag'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRule")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(DataCollectionRule, __self__).__init__( 'azure-native:insights/v20191101preview:DataCollectionRule', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRule': """ Get an existing DataCollectionRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["data_flows"] = None __props__["data_sources"] = None __props__["description"] = None __props__["destinations"] = None __props__["etag"] = None __props__["location"] = None __props__["name"] = None __props__["provisioning_state"] = None __props__["tags"] = None __props__["type"] = None return DataCollectionRule(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="dataFlows") def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]: """ The specification of data flows. """ return pulumi.get(self, "data_flows") @property @pulumi.getter(name="dataSources") def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]: """ The specification of data sources. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. """ return pulumi.get(self, "data_sources") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Description of the data collection rule. """ return pulumi.get(self, "description") @property @pulumi.getter def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']: """ The specification of destinations. """ return pulumi.get(self, "destinations") @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ Resource entity tag (ETag). """ return pulumi.get(self, "etag") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ The geo-location where the resource lives. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The resource provisioning state. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py
8,670
Definition of ARM tracked top level resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows. :param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. :param pulumi.Input[str] description: Description of the data collection rule. :param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations. :param pulumi.Input[str] location: The geo-location where the resource lives. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. The specification of data flows. The specification of data sources. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. Description of the data collection rule. The specification of destinations. Resource entity tag (ETag). Get an existing DataCollectionRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. The geo-location where the resource lives. The name of the resource. The resource provisioning state. Resource tags. The type of the resource. coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
2,071
en
0.650173
# # The Multiverse Platform is made available under the MIT License. # # Copyright (c) 2012 The Multiverse Foundation # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # #!/usr/bin/python # # This script sets up a render server machine # import sys import os import subprocess import shutil import glob # NOTE - if you change machinimaPath, you need to update BuildDirs() machinimaPath = 'c:\\Multiverse\\Machinima\\' treePath = 'c:\\Multiverse\\tree\\' toolsToCopy = [ 'processJob.py', 'sendJob.py', 'mencoder.exe', 'machinima.sshkey.ppk', 'pscp.exe', 'MP4Box.exe' ] toolSourceDir = os.path.join(treePath, 'Tools\\Machinima') srcMediaDir = os.path.join(treePath, 'Media') # map our internal world name to the public world name # used when copying assets from our source tree to the production media area worldNameMap = { 'nyts' : 'times_square', 'friendworld' : 'friendworld2' } # requires a full path name including drive spec # returns a list of directory components def SplitDirs(path): # split off the drive spec drive, path = os.path.splitdrive(path) # remove ending slash if it is present if path.endswith('\\'): path, empty = os.path.split(path) pathList = [] while path is not None: path, element = os.path.split(path) if len(element) == 0: pathList.append(path) path = None else: pathList.append(element) pathList.append(drive) pathList.reverse() return pathList def BuildPath(pathList, numDirs): path = os.path.join(pathList[0], pathList[1]) for i in range(2,numDirs): path = os.path.join(path, pathList[i]) return path # make sure all the directories in the path exist def MakePath(fullpath): pathList = SplitDirs(fullpath) for i in range(3, len(pathList) + 1): path = BuildPath(pathList, i) if not os.path.exists(path): print 'Making %s' % path os.mkdir(path) def MakeDirIfNeeded(path): if not os.path.exists(path): os.mkdir(path, 0777) def BuildDirs(): MakePath(machinimaPath) MakeDirIfNeeded(os.path.join(machinimaPath, 'Tools')) MakeDirIfNeeded(os.path.join(machinimaPath, 'Media')) MakeDirIfNeeded(os.path.join(machinimaPath, 'Jobs')) def CopyTools(): destPath = os.path.join(machinimaPath, 'Tools') for tool in toolsToCopy: srcPath = os.path.join(toolSourceDir, tool) shutil.copy(srcPath, destPath) def CopyTree(src, dst): """Because shutil.copytree() fails if a directory already exists""" srcLen = len(src) for path, dirs, files in os.walk(src): if '.svn' in path: continue dstPath = path[srcLen:] for dir in dirs: if '.svn' in dir: continue dstDir = os.path.join(dst, dstPath, dir) MakePath(dstDir) for file in files: srcPath = os.path.join(path, file) subPath = srcPath[srcLen:] dstPath = os.path.join(dst, subPath) shutil.copy2(srcPath, dstPath) def CopyMedia(): sceneDirs = glob.glob(os.path.join(srcMediaDir, '*\\Machinima\\*\\')) for sceneDir in sceneDirs: dirList = SplitDirs(sceneDir) numElements = len(dirList) scene = dirList[numElements-1] world = worldNameMap[dirList[numElements-3]] destMediaDir = os.path.join(machinimaPath, 'Media\\') destDir = os.path.join(os.path.join(destMediaDir, world), scene) CopyTree(sceneDir, destDir) saMediaSrc = os.path.join(srcMediaDir, 'standalone\\') saMediaDest = os.path.join(destMediaDir, 'standalone\\') CopyTree(saMediaSrc, saMediaDest) BuildDirs() CopyTools() CopyMedia()
tools/Machinima/setupRenderHost.py
4,745
The Multiverse Platform is made available under the MIT License. Copyright (c) 2012 The Multiverse Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.!/usr/bin/python This script sets up a render server machine NOTE - if you change machinimaPath, you need to update BuildDirs() map our internal world name to the public world name used when copying assets from our source tree to the production media area requires a full path name including drive spec returns a list of directory components split off the drive spec remove ending slash if it is present make sure all the directories in the path exist
1,614
en
0.837389
# INSTRUCTIONS # Translate the text and write it between the " # EXAMPLE: original -> "This text is in english: value {0}" # translation -> "Aquest text està en anglès: valor {0}" # If you see sth like {0}, {1}, maintain it on the translated sentence # Meke special attention to elements like ":", etc. lang_2_9_2 = { "Reload log": "", "Do not show the clock on secondary monitors": "", "Disable clock taskbar background color (make clock transparent)": "", "Open the welcome wizard": "", " (ALPHA STAGE, MAY NOT WORK)": "", "Welcome to ElevenClock": "", "Skip": "", "Start": "", "Next": "", "Finish": "", } lang_2_9 = lang_2_9_2 | { "Task Manager": "", "Change date and time": "", "Notification settings": "", "Updates, icon tray, language": "", "Hide extended options from the clock right-click menu (needs a restart to be aplied)": "", "Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "", 'Add the "Show Desktop" button on the left corner of every clock': '', 'You might need to set a custom background color for this to work.&nbsp;More info <a href="{0}" style="color:DodgerBlue">HERE</a>': '', "Clock's font, font size, font color and background, text alignment": "", "Date format, Time format, seconds,weekday, weeknumber, regional settings": "", "Testing features and error-fixing tools": "", "Language pack author(s), help translating ElevenClock": "", "Info, report a bug, submit a feature request, donate, about": "", "Log, debugging information": "", } lang_2_8 = lang_2_9 | { "Force the clock to be at the top of the screen": "", "Show the clock on the primary screen": "", "Use a custom font color": "", "Use a custom background color": "", "Align the clock text to the center": "", "Select custom color": "", "Hide the clock when a program occupies all screens": "", } lang2_7_bis = lang_2_8 | { "Use a custom font": "", "Use a custom font size": "", "Enable hide when multi-monitor fullscreen apps are running": "", "<b>{0}</b> needs to be enabled to change this setting": "", "<b>{0}</b> needs to be disabled to change this setting": "", } lang2_7 = lang2_7_bis | { " (This feature has been disabled because it should work by default. If it is not, please report a bug)": "", "ElevenClock's language": "" } lang2_6 = lang2_7 | { "About Qt6 (PySide6)": "", "About": "", "Alternative non-SSL update server (This might help with SSL errors)": "", "Fixes and other experimental features: (Use ONLY if something is not working)": "", "Show week number on the clock": "", } lang2_5 = lang2_6 | { "Hide the clock when RDP Client or Citrix Workspace are running": "", "Clock Appearance:": "", "Force the clock to have black text": "", " - It is required that the Dark Text checkbox is disabled": "", "Debbugging information:": "", "Open ElevenClock's log": "", } lang2_4 = lang2_5 | { # Added text in version 2.4 "Show the clock on the primary screen (Useful if clock is set on the left)": "", "Show weekday on the clock" :"", } lang2_3 = lang2_4 | { #Context menu "ElevenClock Settings" :"Instellingen ElevenClock", # Also settings title "Reload Clocks" :"Herlaad Klokken", "ElevenClock v{0}" :"ElevenClock v{0}", "Restart ElevenClock" :"ElevenClock opnieuw opstarten", "Hide ElevenClock" :"Verberg ElevenClock", "Quit ElevenClock" :"Afsluiten ElevenClock", #General settings section "General Settings:" :"Algemene Instellingen:", "Automatically check for updates" :"Controlleer automatisch voor updates", "Automatically install available updates" :"Installeer automatisch beschikbare updates", "Enable really silent updates" :"Schakel hele stille updates in", "Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"Omzeil update provider autenticatie check (NIET AANBEVOLEN, GEBRUIK OP EIGEN RISICO)", "Show ElevenClock on system tray" :"Laat ElevenClock in systeemvak zien", "Alternative clock alignment (may not work)" :"Alternatieve klok uitlijning (werkt mogelijk niet)", "Change startup behaviour" :"Verander automatisch starten gedrag", "Change" :"Verander", "<b>Update to the latest version!</b>" :"<b>Update naar de nieuwste versie!</b>", "Install update" :"Installeer update", #Clock settings "Clock Settings:" :"Klok instellingen:", "Hide the clock in fullscreen mode" :"Verberg de klok in volledigscherm applicaties", "Hide the clock when RDP client is active" :"Verberg de klok wanneer RDP client actief is", "Force the clock to be at the bottom of the screen" :"Forceer de klok om onderaan het scherm te staan", "Show the clock when the taskbar is set to hide automatically" :"Klok weergeven als de taakbalk is ingesteld om automatisch te verbergen", "Fix the hyphen/dash showing over the month" :"Corrigeer het koppelteken/streepje dat gedurende de maand wordt weergegeven", "Force the clock to have white text" :"Forceer de klok om witte tekst te hebben", "Show the clock at the left of the screen" :"Toon de klok aan de linkerkant van het scherm ", #Date & time settings "Date & Time Settings:" :"Datum & Tijd instellingen:", "Show seconds on the clock" :"Toon seconden op de klok", "Show date on the clock" :"Toon de datum op de klok", "Show time on the clock" :"Toon de tijd op de klok", "Change date and time format (Regional settings)" :"Datum en Tijd aanpassen (regionale instellingen)", "Regional settings" :"Regionale instellingen", #About the language pack "About the language pack:" :"Over het taalpakket:", "Translated to English by martinet101" :"Vertaald naar het Nederlands door Bugs", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc. "Translate ElevenClock to your language" :"Vertaal ElevenClock naar jou taal", "Get started" :"Begin", #About ElevenClock "About ElevenClock version {0}:" :"Over ElevenClock versie {0}:", "View ElevenClock's homepage" :"Website van ElevenClock", "Open" :"Open", "Report an issue/request a feature" :"Rapporteer een probleem/vraag een feature aan", "Report" :"Rapporteer", "Support the dev: Give me a coffee☕" :"Steun de ontwikkelaar: Geef mij een kopje koffie☕", "Open page" :"Open pagina", "Icons by Icons8" :"Iconen door Icons8", # Here, the word "Icons8" should not be translated "Webpage" :"Webpagina", "Close settings" :"Instellingen sluiten", "Close" :"Sluiten", } lang = lang2_3
elevenclock/lang/lang_nl.py
7,982
INSTRUCTIONS Translate the text and write it between the " EXAMPLE: original -> "This text is in english: value {0}" translation -> "Aquest text està en anglès: valor {0}" If you see sth like {0}, {1}, maintain it on the translated sentence Meke special attention to elements like ":", etc. Added text in version 2.4Context menu Also settings titleGeneral settings sectionClock settingsDate & time settingsAbout the language pack Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc. About ElevenClock Here, the word "Icons8" should not be translated
615
en
0.738676
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from functools import partial import pytest import torch from sklearn.metrics import mean_tweedie_deviance from torch import Tensor from tests.helpers import seed_all from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester from torchmetrics.functional.regression.tweedie_deviance import tweedie_deviance_score from torchmetrics.regression.tweedie_deviance import TweedieDevianceScore seed_all(42) Input = namedtuple("Input", ["preds", "targets"]) _single_target_inputs1 = Input( preds=torch.rand(NUM_BATCHES, BATCH_SIZE), targets=torch.rand(NUM_BATCHES, BATCH_SIZE), ) _single_target_inputs2 = Input( preds=torch.rand(NUM_BATCHES, BATCH_SIZE), targets=torch.rand(NUM_BATCHES, BATCH_SIZE), ) _multi_target_inputs = Input( preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 5), targets=torch.rand(NUM_BATCHES, BATCH_SIZE, 5), ) def _sk_deviance(preds: Tensor, targets: Tensor, power: float): sk_preds = preds.view(-1).numpy() sk_target = targets.view(-1).numpy() return mean_tweedie_deviance(sk_target, sk_preds, power=power) @pytest.mark.parametrize("power", [-0.5, 0, 1, 1.5, 2, 3]) @pytest.mark.parametrize( "preds, targets", [ (_single_target_inputs1.preds, _single_target_inputs1.targets), (_single_target_inputs2.preds, _single_target_inputs2.targets), (_multi_target_inputs.preds, _multi_target_inputs.targets), ], ) class TestDevianceScore(MetricTester): @pytest.mark.parametrize("ddp", [True, False]) @pytest.mark.parametrize("dist_sync_on_step", [True, False]) def test_deviance_scores_class(self, ddp, dist_sync_on_step, preds, targets, power): self.run_class_metric_test( ddp, preds, targets, TweedieDevianceScore, partial(_sk_deviance, power=power), dist_sync_on_step, metric_args=dict(power=power), ) def test_deviance_scores_functional(self, preds, targets, power): self.run_functional_metric_test( preds, targets, tweedie_deviance_score, partial(_sk_deviance, power=power), metric_args=dict(power=power), ) def test_pearson_corrcoef_differentiability(self, preds, targets, power): self.run_differentiability_test( preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score ) # Tweedie Deviance Score half + cpu does not work due to missing support in torch.log @pytest.mark.xfail(reason="TweedieDevianceScore metric does not support cpu + half precision") def test_pearson_corrcoef_half_cpu(self, preds, targets, power): metric_args = {"power": power} self.run_precision_test_cpu( preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score, metric_args=metric_args, ) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda") def test_pearson_corrcoef_half_gpu(self, preds, targets, power): metric_args = {"power": power} self.run_precision_test_gpu( preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score, metric_args=metric_args, ) def test_error_on_different_shape(metric_class=TweedieDevianceScore): metric = metric_class() with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"): metric(torch.randn(100), torch.randn(50)) def test_error_on_invalid_inputs(metric_class=TweedieDevianceScore): with pytest.raises(ValueError, match="Deviance Score is not defined for power=0.5."): metric_class(power=0.5) metric = metric_class(power=1) with pytest.raises( ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative." ): metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3)) with pytest.raises( ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative." ): metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0])) metric = metric_class(power=2) with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."): metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3)) with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."): metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0])) def test_corner_case_for_power_at_1(metric_class=TweedieDevianceScore): """Test that corner case for power=1.0 produce valid result.""" metric = TweedieDevianceScore() targets = torch.tensor([0, 1, 0, 1]) preds = torch.tensor([0.1, 0.1, 0.1, 0.1]) val = metric(preds, targets) assert val != 0.0 assert not torch.isnan(val)
tests/regression/test_tweedie_deviance.py
5,642
Test that corner case for power=1.0 produce valid result. Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Tweedie Deviance Score half + cpu does not work due to missing support in torch.log
703
en
0.855461
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Publich API of batch inference.""" from typing import Tuple, TypeVar, Union import apache_beam as beam import tensorflow as tf from tfx_bsl.beam import run_inference from tfx_bsl.public.proto import model_spec_pb2 from tensorflow_serving.apis import prediction_log_pb2 _K = TypeVar('_K') _INPUT_TYPE = Union[tf.train.Example, tf.train.SequenceExample, bytes] _OUTPUT_TYPE = prediction_log_pb2.PredictionLog @beam.ptransform_fn @beam.typehints.with_input_types(Union[_INPUT_TYPE, Tuple[_K, _INPUT_TYPE]]) @beam.typehints.with_output_types(Union[_OUTPUT_TYPE, Tuple[_K, _OUTPUT_TYPE]]) def RunInference( # pylint: disable=invalid-name examples: beam.pvalue.PCollection, inference_spec_type: model_spec_pb2.InferenceSpecType ) -> beam.pvalue.PCollection: """Run inference with a model. There are two types of inference you can perform using this PTransform: 1. In-process inference from a SavedModel instance. Used when `saved_model_spec` field is set in `inference_spec_type`. 2. Remote inference by using a service endpoint. Used when `ai_platform_prediction_model_spec` field is set in `inference_spec_type`. TODO(b/131873699): Add support for the following features: 1. tf.train.SequenceExample as Input for RemotePredict. 2. beam.Shared() initialization via Fingerprint for models CSE. 3. Models as SideInput. 4. TPU models. Args: examples: A PCollection containing examples of the following possible kinds, each with their corresponding return type. - PCollection[Example] -> PCollection[PredictionLog] * Works with Classify, Regress, MultiInference, Predict and RemotePredict. - PCollection[SequenceExample] -> PCollection[PredictionLog] * Works with Predict and (serialized) RemotePredict. - PCollection[bytes] -> PCollection[PredictionLog] * For serialized Example: Works with Classify, Regress, MultiInference, Predict and RemotePredict. * For everything else: Works with Predict and RemotePredict. - PCollection[Tuple[K, Example]] -> PCollection[ Tuple[K, PredictionLog]] * Works with Classify, Regress, MultiInference, Predict and RemotePredict. - PCollection[Tuple[K, SequenceExample]] -> PCollection[ Tuple[K, PredictionLog]] * Works with Predict and (serialized) RemotePredict. - PCollection[Tuple[K, bytes]] -> PCollection[ Tuple[K, PredictionLog]] * For serialized Example: Works with Classify, Regress, MultiInference, Predict and RemotePredict. * For everything else: Works with Predict and RemotePredict. inference_spec_type: Model inference endpoint. Returns: A PCollection (possibly keyed) containing prediction logs. """ return ( examples | 'RunInferenceImpl' >> run_inference.RunInferenceImpl(inference_spec_type))
tfx_bsl/public/beam/run_inference.py
3,800
Run inference with a model. There are two types of inference you can perform using this PTransform: 1. In-process inference from a SavedModel instance. Used when `saved_model_spec` field is set in `inference_spec_type`. 2. Remote inference by using a service endpoint. Used when `ai_platform_prediction_model_spec` field is set in `inference_spec_type`. TODO(b/131873699): Add support for the following features: 1. tf.train.SequenceExample as Input for RemotePredict. 2. beam.Shared() initialization via Fingerprint for models CSE. 3. Models as SideInput. 4. TPU models. Args: examples: A PCollection containing examples of the following possible kinds, each with their corresponding return type. - PCollection[Example] -> PCollection[PredictionLog] * Works with Classify, Regress, MultiInference, Predict and RemotePredict. - PCollection[SequenceExample] -> PCollection[PredictionLog] * Works with Predict and (serialized) RemotePredict. - PCollection[bytes] -> PCollection[PredictionLog] * For serialized Example: Works with Classify, Regress, MultiInference, Predict and RemotePredict. * For everything else: Works with Predict and RemotePredict. - PCollection[Tuple[K, Example]] -> PCollection[ Tuple[K, PredictionLog]] * Works with Classify, Regress, MultiInference, Predict and RemotePredict. - PCollection[Tuple[K, SequenceExample]] -> PCollection[ Tuple[K, PredictionLog]] * Works with Predict and (serialized) RemotePredict. - PCollection[Tuple[K, bytes]] -> PCollection[ Tuple[K, PredictionLog]] * For serialized Example: Works with Classify, Regress, MultiInference, Predict and RemotePredict. * For everything else: Works with Predict and RemotePredict. inference_spec_type: Model inference endpoint. Returns: A PCollection (possibly keyed) containing prediction logs. Publich API of batch inference. Copyright 2019 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Lint as: python3 pylint: disable=invalid-name
2,867
en
0.771806
import warnings from time import sleep from pyspedas import time_double from pytplot import get_data, store_data, options import numpy as np try: from hapiclient import hapi as load_hapi except: print('hapiclient not found; install with: "pip install hapiclient"') def hapi(trange=None, server=None, dataset=None, parameters='', suffix='', catalog=False): """ Loads data from a HAPI server into pytplot variables Parameters ----------- trange: list of str or list of float Time range to load the data for server: str HAPI server to load the data from dataset: str HAPI dataset to load parameters: str or list of str Parameters in the dataset to load; default is to load them all suffix: str Suffix to append to the tplot variables catalog: bool If True, returns the server's catalog of datasets Returns ------- List of tplot variables created. """ if server is None: print('Error, no server specified; example servers include:') print('- https://cdaweb.gsfc.nasa.gov/hapi') print('- https://pds-ppi.igpp.ucla.edu/hapi') print('- http://planet.physics.uiowa.edu/das/das2Server/hapi') print('- https://iswa.gsfc.nasa.gov/IswaSystemWebApp/hapi') print('- http://lasp.colorado.edu/lisird/hapi') return if catalog: catalog = load_hapi(server) items = [] if 'catalog' in catalog.keys(): items = catalog['catalog'] print('Available datasets: ') for item in items: if 'title' in item.keys(): print(item['id'] + ': ' + item['title']) else: print(item['id']) return if dataset is None: print('Error, no dataset specified; please see the catalog for a list of available data sets.') return if trange is None: print('Error, no trange specified') return if isinstance(parameters, list): parameters = ','.join(parameters) opts = {'logging': False} data, hapi_metadata = load_hapi(server, dataset, parameters, trange[0], trange[1], **opts) out_vars = [] # loop through the parameters in this dataset params = hapi_metadata['parameters'] for param in params[1:]: spec = False param_name = param.get('name') print('Loading ' + param_name) # load the data only for this parameter try: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=ResourceWarning) data, hapi_metadata = load_hapi(server, dataset, param_name, trange[0], trange[1], **opts) except: breakpoint() print('Error! 95') continue timestamps = [datapoint[0] for datapoint in data] unixtimes = [time_double(timestamp.decode('utf-8')) for timestamp in timestamps] param_type = hapi_metadata['parameters'][1].get('type') if param_type is None: param_type = 'double' data_size = hapi_metadata['parameters'][1].get('size') if data_size is None: single_line = True try: if param_type == 'double': single_line = isinstance(data[0][1], np.float64) elif param_type == 'integer': single_line = isinstance(data[0][1], np.int32) except IndexError: breakpoint() print('Error! 103') continue if single_line: data_out = np.zeros((len(data))) else: try: data_out = np.zeros((len(data), len(data[0][1]))) except TypeError: print('Error! 112') breakpoint() continue for idx, datapoint in enumerate(data): if single_line: data_out[idx] = datapoint[1] else: data_out[idx, :] = datapoint[1] data_out = data_out.squeeze() # check for fill values fill_value = hapi_metadata['parameters'][1].get('fill') if fill_value is not None: if param_type == 'double': fill_value = float(fill_value) data_out[data_out == fill_value] = np.nan elif param_type == 'integer': # NaN is only floating point, so we replace integer fill # values with 0 instead of NaN fill_value = int(fill_value) data_out[data_out == fill_value] = 0 bins = param.get('bins') if bins is not None: centers = bins[0].get('centers') if centers is not None: spec = True data_table = {'x': unixtimes, 'y': data_out} if spec: data_table['v'] = centers saved = store_data(param_name + suffix, data=data_table) metadata = get_data(param_name + suffix, metadata=True) metadata['HAPI'] = hapi_metadata if spec: options(param_name + suffix, 'spec', True) if saved: out_vars.append(param_name + suffix) # wait for a second before going to the next variable # to avoid hitting the server too quickly sleep(1) return out_vars
pyspedas/hapi/hapi.py
5,417
Loads data from a HAPI server into pytplot variables Parameters ----------- trange: list of str or list of float Time range to load the data for server: str HAPI server to load the data from dataset: str HAPI dataset to load parameters: str or list of str Parameters in the dataset to load; default is to load them all suffix: str Suffix to append to the tplot variables catalog: bool If True, returns the server's catalog of datasets Returns ------- List of tplot variables created. loop through the parameters in this dataset load the data only for this parameter check for fill values NaN is only floating point, so we replace integer fill values with 0 instead of NaN wait for a second before going to the next variable to avoid hitting the server too quickly
856
en
0.534505
# -*- coding: utf-8 -*- """ Created on Sun Apr 25 21:37:26 2021 @author: brian """ import os os.chdir('C:/Users/brian/Desktop/All/UWEC/DS785_Capstone/Project') import brawl_data as bd import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from statsmodels.stats.proportion import proportion_confint all_win_rates = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', custom_query = "SELECT mode, map, brawler, wins, matches_played FROM population_aggs_high;") all_win_rates['win_rate'] = all_win_rates['wins']/all_win_rates['matches_played'] all_win_rates = all_win_rates.loc[all_win_rates['matches_played']>10,:] win_rate_extremes = all_win_rates.groupby(['mode', 'map']).win_rate.agg(['min', 'max']) win_rate_extremes = win_rate_extremes.reset_index() win_rate_extremes['win_rate_differential'] = win_rate_extremes['max'] - win_rate_extremes['min'] win_rate_extremes = win_rate_extremes.sort_values(by = 'win_rate_differential') win_rate_extremes.columns = ['Mode', 'Map', 'Minimum Brawler Win Rate', 'Maximum Brawler Win Rate', 'Win Rate Differential'] sns.set_style("darkgrid") sns.scatterplot(data=win_rate_extremes, x='Minimum Brawler Win Rate', y='Maximum Brawler Win Rate', hue='Win Rate Differential', palette=sns.cubehelix_palette(start=2, rot=0, dark=.2, light=.8, as_cmap=True)) plt.title('Win Rates Differences for Brawlers Across Each Map-Mode') sns.violinplot(x=win_rate_extremes['Win Rate Differential']) plt.title('Differences Between Maximum and Minimum Win Rates for Brawlers Across Each Map-Mode') for_example = all_win_rates.loc[all_win_rates['map'] == 'Split', :].sort_values('win_rate', ascending = False) for_example = for_example.loc[:,['map', 'mode', 'brawler', 'win_rate']] for_example = pd.concat([for_example.head(5),for_example.tail(5)]) for_example_2 = pd.concat([win_rate_extremes.head(5),win_rate_extremes.tail(5)]) for_example_2 = for_example_2.sort_values('Win Rate Differential', ascending=False) example = bd.get_recommendation('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#2G080980', 'brawlBall', 'Sneaky Fields', 0, 4) example = pd.concat([example.head(5),example.tail(5)]) my_recs = bd.get_all_recommendations('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#8VUPQ2PP', my_trophy_min = 500) map_weaknesses = bd.get_map_weaknesses('dbname=BrawlStars user=postgres password=PG!3%7(', 'records') map_weaknesses.head(10) all_individual_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', custom_query = "SELECT * FROM individual_aggs_high UNION ALL SELECT * FROM individual_aggs_mid UNION ALL SELECT * FROM individual_aggs_low;") all_population_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '', custom_query = "SELECT * FROM population_aggs_high UNION ALL SELECT * FROM population_aggs_mid UNION ALL SELECT * FROM population_aggs_low;") #Calculate win rate confidence intervals all_individual_history['win_rate'] = all_individual_history['wins'] / all_individual_history['matches_played'] all_individual_history['ci.lower'],all_individual_history['ci.upper'] = zip(*all_individual_history.apply(lambda row : proportion_confint(count = row['wins'], nobs = row['matches_played'], alpha = .1, method = 'agresti_coull'), axis = 1)) all_population_history['win_rate'] = all_population_history['wins'] / all_population_history['matches_played'] all_individual_history = all_population_history.merge(all_individual_history, how = 'left', left_on = ['mode', 'map', 'brawler'], right_on = ['mode', 'map', 'brawler']) #Compare population to individual history and inform recommendations better = (all_individual_history['win_rate_x'] < all_individual_history['ci.lower']) & (all_individual_history['matches_played_y'] >= 5) worse = (all_individual_history['win_rate_x'] > all_individual_history['ci.upper']) & (all_individual_history['matches_played_y'] >= 5) sum(better) + sum(worse)
Capstone_Tables&Figures_Results_Graphs.py
4,177
Created on Sun Apr 25 21:37:26 2021 @author: brian -*- coding: utf-8 -*-Calculate win rate confidence intervalsCompare population to individual history and inform recommendations
181
en
0.770979
# -*- coding: utf-8 -*- """ hyper/tls ~~~~~~~~~ Contains the TLS/SSL logic for use in hyper. """ import os.path as path import six from .common.exceptions import MissingCertFile from .compat import ignore_missing, ssl NPN_PROTOCOL = 'h2' H2_NPN_PROTOCOLS = [NPN_PROTOCOL, 'h2-16', 'h2-15', 'h2-14'] SUPPORTED_NPN_PROTOCOLS = H2_NPN_PROTOCOLS + ['http/1.1'] H2C_PROTOCOL = 'h2c' # We have a singleton SSLContext object. There's no reason to be creating one # per connection. _context = None # Work out where our certificates are. cert_loc = path.join(path.dirname(__file__), 'certs.pem') def wrap_socket(sock, server_hostname, ssl_context=None, force_proto=None): """ A vastly simplified SSL wrapping function. We'll probably extend this to do more things later. """ global _context if ssl_context: # if an SSLContext is provided then use it instead of default context _ssl_context = ssl_context else: # create the singleton SSLContext we use if _context is None: # pragma: no cover _context = init_context() _ssl_context = _context # the spec requires SNI support ssl_sock = _ssl_context.wrap_socket(sock, server_hostname=server_hostname) # Setting SSLContext.check_hostname to True only verifies that the # post-handshake servername matches that of the certificate. We also need # to check that it matches the requested one. if _ssl_context.check_hostname: # pragma: no cover try: ssl.match_hostname(ssl_sock.getpeercert(), server_hostname) except AttributeError: ssl.verify_hostname(ssl_sock, server_hostname) # pyopenssl # Allow for the protocol to be forced externally. proto = force_proto # ALPN is newer, so we prefer it over NPN. The odds of us getting # different answers is pretty low, but let's be sure. with ignore_missing(): if proto is None: proto = ssl_sock.selected_alpn_protocol() with ignore_missing(): if proto is None: proto = ssl_sock.selected_npn_protocol() return (ssl_sock, proto) def init_context(cert_path=None, cert=None, cert_password=None): """ Create a new ``SSLContext`` that is correctly set up for an HTTP/2 connection. This SSL context object can be customized and passed as a parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class. Provide your own certificate file in case you don’t want to use hyper’s default certificate. The path to the certificate can be absolute or relative to your working directory. :param cert_path: (optional) The path to the certificate file of “certification authority” (CA) certificates :param cert: (optional) if string, path to ssl client cert file (.pem). If tuple, ('cert', 'key') pair. The certfile string must be the path to a single file in PEM format containing the certificate as well as any number of CA certificates needed to establish the certificate’s authenticity. The keyfile string, if present, must point to a file containing the private key in. Otherwise the private key will be taken from certfile as well. :param cert_password: (optional) The password argument may be a function to call to get the password for decrypting the private key. It will only be called if the private key is encrypted and a password is necessary. It will be called with no arguments, and it should return a string, bytes, or bytearray. If the return value is a string it will be encoded as UTF-8 before using it to decrypt the key. Alternatively a string, bytes, or bytearray value may be supplied directly as the password argument. It will be ignored if the private key is not encrypted and no password is needed. :returns: An ``SSLContext`` correctly set up for HTTP/2. """ cafile = cert_path or cert_loc if not cafile or not path.exists(cafile): err_msg = ("No certificate found at " + str(cafile) + ". Either " + "ensure the default cert.pem file is included in the " + "distribution or provide a custom certificate when " + "creating the connection.") raise MissingCertFile(err_msg) context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.set_default_verify_paths() context.load_verify_locations(cafile=cafile) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True with ignore_missing(): context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS) with ignore_missing(): context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS) # required by the spec context.options |= ssl.OP_NO_COMPRESSION if cert is not None: if not isinstance(cert, six.string_types): context.load_cert_chain(cert[0], cert[1], cert_password) else: context.load_cert_chain(cert, password=cert_password) return context
hyper/tls.py
5,066
Create a new ``SSLContext`` that is correctly set up for an HTTP/2 connection. This SSL context object can be customized and passed as a parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class. Provide your own certificate file in case you don’t want to use hyper’s default certificate. The path to the certificate can be absolute or relative to your working directory. :param cert_path: (optional) The path to the certificate file of “certification authority” (CA) certificates :param cert: (optional) if string, path to ssl client cert file (.pem). If tuple, ('cert', 'key') pair. The certfile string must be the path to a single file in PEM format containing the certificate as well as any number of CA certificates needed to establish the certificate’s authenticity. The keyfile string, if present, must point to a file containing the private key in. Otherwise the private key will be taken from certfile as well. :param cert_password: (optional) The password argument may be a function to call to get the password for decrypting the private key. It will only be called if the private key is encrypted and a password is necessary. It will be called with no arguments, and it should return a string, bytes, or bytearray. If the return value is a string it will be encoded as UTF-8 before using it to decrypt the key. Alternatively a string, bytes, or bytearray value may be supplied directly as the password argument. It will be ignored if the private key is not encrypted and no password is needed. :returns: An ``SSLContext`` correctly set up for HTTP/2. A vastly simplified SSL wrapping function. We'll probably extend this to do more things later. hyper/tls ~~~~~~~~~ Contains the TLS/SSL logic for use in hyper. -*- coding: utf-8 -*- We have a singleton SSLContext object. There's no reason to be creating one per connection. Work out where our certificates are. if an SSLContext is provided then use it instead of default context create the singleton SSLContext we use pragma: no cover the spec requires SNI support Setting SSLContext.check_hostname to True only verifies that the post-handshake servername matches that of the certificate. We also need to check that it matches the requested one. pragma: no cover pyopenssl Allow for the protocol to be forced externally. ALPN is newer, so we prefer it over NPN. The odds of us getting different answers is pretty low, but let's be sure. required by the spec
2,495
en
0.84366
'''This module implements concrete agent controllers for the rollout worker''' import numpy as np import os import random import rospkg import rospy from gazebo_msgs.msg import ModelState from gazebo_msgs.srv import SetModelState, SpawnModel from markov.agent_ctrl.constants import ConfigParams, BOT_CAR_Z, OBSTACLE_Z from markov.track_geom.constants import SET_MODEL_STATE, SPAWN_SDF_MODEL, SPAWN_URDF_MODEL, ObstacleDimensions from markov.track_geom.track_data import TrackData from markov.agent_ctrl.agent_ctrl_interface import AgentCtrlInterface from markov.rospy_wrappers import ServiceProxyWrapper from markov import utils from markov.reset.constants import AgentInfo from markov.domain_randomizations.randomizer_manager import RandomizerManager from markov.domain_randomizations.visual.model_visual_randomizer import ModelVisualRandomizer from markov.domain_randomizations.constants import ModelRandomizerType class ObstaclesCtrl(AgentCtrlInterface): def __init__(self): # Read ros parameters # OBJECT_POSITIONS will overwrite NUMBER_OF_OBSTACLES and RANDOMIZE_OBSTACLE_LOCATIONS self.object_locations = rospy.get_param("OBJECT_POSITIONS", []) self.num_obstacles = int(rospy.get_param("NUMBER_OF_OBSTACLES", 0)) \ if not self.object_locations else len(self.object_locations) self.min_obstacle_dist = float(rospy.get_param("MIN_DISTANCE_BETWEEN_OBSTACLES", 2.0)) self.randomize = utils.str2bool(rospy.get_param("RANDOMIZE_OBSTACLE_LOCATIONS", False)) self.use_bot_car = utils.str2bool(rospy.get_param("IS_OBSTACLE_BOT_CAR", False)) self.obstacle_names = ["obstacle_{}".format(i) for i in range(self.num_obstacles)] self.obstacle_dimensions = ObstacleDimensions.BOT_CAR_DIMENSION if self.use_bot_car \ else ObstacleDimensions.BOX_OBSTACLE_DIMENSION # track data self.track_data = TrackData.get_instance() # Wait for ros services rospy.wait_for_service(SET_MODEL_STATE) rospy.wait_for_service(SPAWN_SDF_MODEL) rospy.wait_for_service(SPAWN_URDF_MODEL) self.set_model_state = ServiceProxyWrapper(SET_MODEL_STATE, SetModelState) self.spawn_sdf_model = ServiceProxyWrapper(SPAWN_SDF_MODEL, SpawnModel) self.spawn_urdf_model = ServiceProxyWrapper(SPAWN_URDF_MODEL, SpawnModel) # Load the obstacle sdf/urdf obstacle_model_folder = "bot_car" if self.use_bot_car else "box_obstacle" rospack = rospkg.RosPack() deepracer_path = rospack.get_path("deepracer_simulation_environment") obstacle_sdf_path = os.path.join(deepracer_path, "models", obstacle_model_folder, "model.sdf") with open(obstacle_sdf_path, "r") as fp: self.obstacle_sdf = fp.read() # Set obstacle poses and spawn the obstacles self.obstacle_poses = self._compute_obstacle_poses() self._spawn_obstacles() self._configure_randomizer() def _configure_randomizer(self): '''configure domain randomizer ''' for obstacle_names in self.obstacle_names: RandomizerManager.get_instance().add(ModelVisualRandomizer(model_name=obstacle_names, model_randomizer_type=ModelRandomizerType.MODEL)) def _compute_obstacle_poses(self): obstacle_dists = [] obstacle_lanes = [] lane_choices = (self.track_data.inner_lane, self.track_data.outer_lane) # use fix obstacle locations if self.object_locations: for object_location in self.object_locations: # index 0 is obstacle_ndist and index 1 is obstacle_lane object_location = object_location.split(",") obstacle_dists.append(float(object_location[0]) * \ self.track_data.center_line.length) # Inner lane is 1, outer lane is -1. If True, use outer lane obstacle_lanes.append(lane_choices[int(object_location[1]) == -1]) else: # Start with equally spaced obstacle_start_dist = self.min_obstacle_dist obstacle_end_dist = self.track_data.center_line.length - 1.0 obstacle_dists = np.linspace(obstacle_start_dist, obstacle_end_dist, self.num_obstacles) # Perturb to achieve randomness if self.randomize: i_obstacle = list(range(self.num_obstacles)) random.shuffle(i_obstacle) for i in i_obstacle: lo = obstacle_start_dist if (i == 0) \ else obstacle_dists[i-1] + self.min_obstacle_dist hi = obstacle_end_dist if (i == self.num_obstacles-1) \ else obstacle_dists[i+1] - self.min_obstacle_dist if lo < hi: obstacle_dists[i] = random.uniform(lo, hi) # Select a random lane for each obstacle for _ in obstacle_dists: use_outer_lane = random.choice((False, True)) obstacle_lanes.append(lane_choices[use_outer_lane]) else: # Alternate between lanes for each obstacle use_outer_lane = False for _ in obstacle_dists: obstacle_lanes.append(lane_choices[use_outer_lane]) use_outer_lane = not use_outer_lane # Compute the obstacle poses obstacle_poses = [] for obstacle_dist, obstacle_lane in zip(obstacle_dists, obstacle_lanes): obstacle_pose = obstacle_lane.interpolate_pose( obstacle_lane.project(self.track_data.center_line.interpolate(obstacle_dist))) if self.use_bot_car: obstacle_pose.position.z = BOT_CAR_Z else: obstacle_pose.position.z = OBSTACLE_Z obstacle_poses.append(obstacle_pose) # Return the poses return obstacle_poses def _spawn_obstacles(self): for obstacle_name, obstacle_pose in zip(self.obstacle_names, self.obstacle_poses): self.spawn_sdf_model(obstacle_name, self.obstacle_sdf, '/{}'.format(obstacle_name), obstacle_pose, '') self.track_data.initialize_object(obstacle_name, obstacle_pose, self.obstacle_dimensions) def _reset_obstacles(self): for obstacle_name, obstacle_pose in zip(self.obstacle_names, self.obstacle_poses): obstacle_state = ModelState() obstacle_state.model_name = obstacle_name obstacle_state.pose = obstacle_pose obstacle_state.twist.linear.x = 0 obstacle_state.twist.linear.y = 0 obstacle_state.twist.linear.z = 0 obstacle_state.twist.angular.x = 0 obstacle_state.twist.angular.y = 0 obstacle_state.twist.angular.z = 0 self.set_model_state(obstacle_state) self.track_data.reset_object(obstacle_name, obstacle_pose) @property def action_space(self): return None def reset_agent(self): self.obstacle_poses = self._compute_obstacle_poses() self._reset_obstacles() def send_action(self, action): pass def update_agent(self, action): return {} def judge_action(self, agents_info_map): for agent_name, agent_info in agents_info_map.items(): # check racecar crash with a obstacle crashed_object_name = agent_info[AgentInfo.CRASHED_OBJECT_NAME.value] \ if AgentInfo.CRASHED_OBJECT_NAME.value in agent_info else '' # only trainable racecar agent has 'obstacle' as possible crashed object if 'obstacle' in crashed_object_name: self._reset_obstacles() break return None, None, None def finish_episode(self): pass
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/obstacles_agent_ctrl.py
8,047
configure domain randomizer This module implements concrete agent controllers for the rollout worker Read ros parameters OBJECT_POSITIONS will overwrite NUMBER_OF_OBSTACLES and RANDOMIZE_OBSTACLE_LOCATIONS track data Wait for ros services Load the obstacle sdf/urdf Set obstacle poses and spawn the obstacles use fix obstacle locations index 0 is obstacle_ndist and index 1 is obstacle_lane Inner lane is 1, outer lane is -1. If True, use outer lane Start with equally spaced Perturb to achieve randomness Select a random lane for each obstacle Alternate between lanes for each obstacle Compute the obstacle poses Return the poses check racecar crash with a obstacle only trainable racecar agent has 'obstacle' as possible crashed object
748
en
0.81149
# Generated by Django 2.1.5 on 2019-03-16 16:41 from django.db import migrations, models import django.db.models.deletion import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ("people", "0012_auto_20190316_1641"), ("services", "0023_key_points_heading_not_required"), ] operations = [ migrations.AddField( model_name="servicepage", name="contact_reasons", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="people.ContactReasonsList", ), ), migrations.AlterField( model_name="servicepage", name="heading_for_key_points", field=wagtail.core.fields.RichTextField(), ), ]
tbx/services/migrations/0024_auto_20190316_1641.py
897
Generated by Django 2.1.5 on 2019-03-16 16:41
45
en
0.536466
#!/usr/bin/env python # # Copyright 2019 YugaByte, Inc. and Contributors # # Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt import boto3 import json import logging import os import re from ipaddr import IPNetwork from ybops.utils import get_or_create, get_and_cleanup from ybops.common.exceptions import YBOpsRuntimeError from ybops.cloud.common.utils import request_retry_decorator RESOURCE_PREFIX_FORMAT = "yb-{}" IGW_CIDR = "0.0.0.0/0" SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw" ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt" SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg" PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}" class AwsBootstrapRegion(): def __init__(self, region, metadata, region_cidrs): self.region = region self.metadata = metadata self.region_cidrs = region_cidrs self.client = get_client(self.region) # Outputs. self.vpc = None self.igw = None self.peer_vpc = None self.sg_yugabyte = None self.subnets = [] self.route_table = None def bootstrap(self): self.setup_vpc() self.setup_igw() self.setup_subnets() self.setup_yugabyte_sg() self.setup_rt() def setup_vpc(self): vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region) vpc = create_vpc(client=self.client, tag_name=vpc_region_tag, cidr=get_region_cidr(self.metadata, self.region)) vpc.wait_until_available() self.vpc = vpc def setup_igw(self): igw_tag = IGW_PREFIX_FORMAT.format(self.region) igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc) self.igw = igw def setup_subnets(self): zones = get_zones(self.region) subnets = {} for zone_index, zone in enumerate(sorted(zones.keys())): vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone) zone_cidr = self.metadata["zone_cidr_format"].format( get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16) subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag) subnets[zone] = subnet self.subnets = subnets def setup_yugabyte_sg(self): sg_group_name = get_yb_sg_name(self.region) rules = list(self.metadata["sg_rules"]) for r in rules: r.update({"cidr_ip": IGW_CIDR}) sg = create_security_group(client=self.client, group_name=sg_group_name, description="YugaByte SG", vpc=self.vpc, rules=rules) self.sg_yugabyte = sg def setup_rt(self): route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region) route_table = create_route_table(client=self.client, tag_name=route_table_tag, vpc=self.vpc) # TODO: handle private/public case at somepoint, also NAT. add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id) current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations] missing_ids = [subnet.id for subnet in self.subnets.values() if subnet.id not in current_associated_subnet_ids] for subnet_id in missing_ids: route_table.associate_with_subnet(SubnetId=subnet_id) self.route_table = route_table def add_sg_ingress_to_sg(self, incoming_sg, target_sg): current_sg_ids = set([pair["GroupId"] for perm in target_sg.ip_permissions for pair in perm["UserIdGroupPairs"]]) if incoming_sg.id not in current_sg_ids: target_sg.authorize_ingress( IpPermissions=[{ "IpProtocol": "-1", "UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}]) def add_route_to_rt(route_table, cidr, target_type, target_id): kwargs = {target_type: target_id} route = get_route_by_cidr(route_table, cidr) if route is None: route_table.create_route(DestinationCidrBlock=cidr, **kwargs) elif getattr(route, dumb_camel_to_snake(target_type)) != target_id: route.replace(**kwargs) def add_cidr_to_rules(rules, cidr): rule_block = { "ip_protocol": "-1", "from_port": 0, "to_port": 65535, "cidr_ip": cidr } rules.append(rule_block) def get_cidr_prefix(metadata, region): return metadata["regions"][region]["cidr_prefix"] def get_region_cidr(metadata, region): return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region)) def get_region_cidrs(metadata): return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()]) def dumb_camel_to_snake(s): return re.sub("([A-Z])", "_\\1", s).lower()[1:] class YbVpcComponents: def __init__(self): self.region = None self.vpc = None self.sg_yugabyte = None self.customer_sgs = None self.route_table = None self.subnets = None @staticmethod def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids): c = YbVpcComponents() c.region = region client = get_client(region) c.vpc = client.Vpc(vpc_id) c.sg_yugabyte = client.SecurityGroup(sg_id) c.route_table = client.RouteTable(rt_id) c.subnets = {az: client.Subnet(subnet_id) for az, subnet_id in az_to_subnet_ids.iteritems()} return c @staticmethod def from_user_json(region, per_region_meta): c = YbVpcComponents() c.region = region client = get_client(region) vpc_id = per_region_meta.get("vpcId") if vpc_id: c.vpc = client.Vpc(vpc_id) else: c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region)) sg_ids = per_region_meta.get("customSecurityGroupId") if sg_ids: c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")] else: c.sg_yugabyte = get_security_group( client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc) if not vpc_id: c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region)) az_to_subnet_ids = {} if vpc_id: az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {}) else: az_to_subnet_ids = get_zones(region) c.subnets = {az: client.Subnet(subnet_id) for az, subnet_id in az_to_subnet_ids.iteritems()} return c def as_json(self): sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte] return vpc_components_as_json(self.vpc, sgs, self.subnets) class AwsBootstrapClient(): def __init__(self, metadata, host_vpc_id, host_vpc_region): self.metadata = metadata self.host_vpc_id = host_vpc_id self.host_vpc_region = host_vpc_region self.region_cidrs = get_region_cidrs(self.metadata) # Validation. self._validate_cidr_overlap() def _validate_cidr_overlap(self): region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()] all_networks = region_networks for i in xrange(len(all_networks)): for j in xrange(i + 1, len(all_networks)): left = all_networks[i] right = all_networks[j] if left.overlaps(right): raise YBOpsRuntimeError( "IP blocks in the CIDRs overlap: {} - {}".format(left, right)) def bootstrap_individual_region(self, region): if region is None: raise YBOpsRuntimeError("Must provider region to bootstrap!") client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs) client.bootstrap() return YbVpcComponents.from_pieces( region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id, {az: s.id for az, s in client.subnets.iteritems()}) def cross_link_regions(self, components): # Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings. region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()] host_vpc = None if self.host_vpc_id and self.host_vpc_region: host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id) region_and_vpc_tuples.append((self.host_vpc_region, host_vpc)) # Setup VPC peerings. for i in xrange(len(region_and_vpc_tuples) - 1): i_region, i_vpc = region_and_vpc_tuples[i] for j in xrange(i + 1, len(region_and_vpc_tuples)): j_region, j_vpc = region_and_vpc_tuples[j] peering = create_vpc_peering( # i is the host, j is the target. client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region) if len(peering) != 1: raise YBOpsRuntimeError( "Expecting one peering connection, got {}".format(peer_conn)) peering = peering[0] # Add route i -> j. add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block, "VpcPeeringConnectionId", peering.id) # Add route j -> i. # Note: If we have a host_vpc, it is the last in the list, and it doesn't have an # associated component, so we special case it. if host_vpc is None or j != len(region_and_vpc_tuples) - 1: add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id) else: # TODO: should ideally filter to the RT that is relevant, but we do not really # know the subnets which matter from this host_vpc... for rt in list(host_vpc.route_tables.all()): add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id) # Setup SG entries for all the CIDRs. all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples] rules = [] # Add CIDRs from all the VPCs, including the host. for cidr in all_cidrs: add_cidr_to_rules(rules, cidr) # Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS. # TODO(bogdan): custom CIDR entries for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []): add_cidr_to_rules(rules, cidr) for region, component in components.iteritems(): sg = component.sg_yugabyte ip_perms = sg.ip_permissions for rule in rules: found = False for perm in ip_perms: if perm.get("FromPort") == rule["from_port"] and \ perm.get("ToPort") == rule["to_port"] and \ perm.get("IpProtocol") == rule["ip_protocol"] and \ len([True for r in perm.get("IpRanges", []) if r.get("CidrIp") == rule["cidr_ip"]]) > 0: # This rule matches this permission, so no need to add it. found = True break if not found: try: sg.authorize_ingress(IpProtocol=rule["ip_protocol"], CidrIp=rule["cidr_ip"], FromPort=rule["from_port"], ToPort=rule["to_port"]) except Exception as e: if "InvalidPermission.Duplicate" not in str(e): raise YBOpsRuntimeError( "Authorize Security Group Ingress failed: {}".format(repr(e))) def aws_exception_handler(e): """AWS specific exception handler. Args: e: the exception that was raised by the underlying API call that just failed. Returns: True if this exception can be retried, False otherwise. """ return "Request limit exceeded" in str(e) def aws_request_limit_retry(fn): """A decorator for retrying an AWS operation after exceeding request limit. Does retries with randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries internally, but as of May 2017 there does not seem to be a good way of doing that. Initially not adding this decorator to all functions in this module. This should be done gradually as we encounter rate limiting errors. Relevant boto issues: https://github.com/boto/boto3/issues/770 https://github.com/boto/botocore/issues/882 """ return request_retry_decorator(fn, aws_exception_handler) def get_client(region): """Method to get boto3 ec2 resource for given region Args: region (str): Region name Returns: boto3 resource """ return boto3.resource("ec2", region_name=region) def get_clients(regions): """Method to get boto3 clients for given region or all the regions if none specified. Args: regions (list): List of regions to return clients for Returns: clients(obj): Map of region to boto3 resource """ return {region: get_client(region) for region in regions} def get_available_regions(metadata): return metadata["regions"].keys() def get_spot_pricing(region, zone, instance_type): client = boto3.client('ec2', region_name=region) prod_desc = ['Linux/UNIX (Amazon VPC)'] spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type], MaxResults=1, ProductDescriptions=prod_desc, AvailabilityZone=zone) if len(spot_price['SpotPriceHistory']) == 0: raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone)) return spot_price['SpotPriceHistory'][0]['SpotPrice'] def get_zones(region, dest_vpc_id=None): """Method to fetch zones for given region or all the regions if none specified. Args: region (str): Name of region to get zones of. Returns: zones (obj): Map of zone -> subnet """ result = {} filters = get_filters("state", "available") client = boto3.client("ec2", region_name=region) zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", []) new_client = get_client(region) zone_mapping = {} for z in zones: zone_name = z["ZoneName"] zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name) region_vpc = None if dest_vpc_id: region_vpc = new_client.Vpc(dest_vpc_id) else: region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region)) subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None) if subnet is None: subnet = next(iter([s for s in region_vpc.subnets.all() if s.availability_zone == zone_name]), None) zone_mapping[zone_name] = subnet.id if subnet is not None else None return zone_mapping def get_vpc(client, tag_name, **kwargs): """Method to fetch vpc based on the tag_name. Args: client (boto client): Boto Client for the region to query. tag_name (str): VPC tag name. Returns: VPC obj: VPC object or None. """ filters = get_tag_filter(tag_name) return next(iter(client.vpcs.filter(Filters=filters)), None) def fetch_subnets(vpc, tag_name): """Method to fetch subnets based on the tag_name. Args: vpc (vpc obj): VPC object to search for subnets tag_name (str): subnet tag name. Returns: subnets (list): list of aws subnets for given vpc. """ filters = get_tag_filter(tag_name) return vpc.subnets.filter(Filters=filters) def create_subnet(client, vpc, zone, cidr, tag_name): """Method to create subnet based on cidr and tag name. Args: client (boto client): Region specific boto client vpc (VPC object): VPC object to create subnet. zone (str): Availability zone name cidr (str): CIDR string tag_name (str): Tag name for subnet. Returns: subnet: Newly created subnet object. """ subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None) if subnet is None: subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone) # TODO: no direct waiter on subnet just yet, it seems... client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id]) tag_resource_name(client, subnet.id, tag_name) return subnet def get_security_group(client, group_name, vpc, **kwargs): """Method to fetch security group based on the group_name. Args: client (boto client): Region specific boto client group_name (str): Security Group name vpc (VPC object): The VPC in which to check for the SG Returns: SecurityGroup: Matching security group. """ filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id) return next(iter(client.security_groups.filter(Filters=filters)), None) @get_or_create(get_security_group) def create_security_group(client, group_name, vpc, description, rules): """Method to create a security group based on the group_name and authorize ingress with the rules provided. Args: client (boto client): Region specific boto client group_name (str): security group name description (str): description of the security group vpc (VPC Object): VPC object to create the security group rules (dict): List of rules to add to security group. """ sg = vpc.create_security_group(GroupName=group_name, Description=description) try: for rule in rules: sg.authorize_ingress(IpProtocol=rule["ip_protocol"], CidrIp=rule["cidr_ip"], FromPort=rule["from_port"], ToPort=rule["to_port"]) except Exception as e: logging.error("Authorize Security Group Ingress failed: {}".format(e)) sg.delete() raise YBOpsRuntimeError("Security Group creation failed.") return sg def get_igw(client, tag_name, **kwargs): """Method to fetch Internet Gateway based on tag_name. Args: client (boto client): Region specific boto client tag_name (str): Internet Gateway tag name. Returns: internet_gateway: internet gateway object. """ filters = get_tag_filter(tag_name) return next(iter(client.internet_gateways.filter(Filters=filters)), None) @get_or_create(get_igw) def create_igw(client, tag_name, vpc): """Method to create Internet Gateway based on tag_name in given VPC. If the gateway already exists, it would return that object. If the object doesn't have a tag, we would tag it accordingly. Args: client (boto client): Region specific boto client tag_name (str): Tag name for internet gateway. vpc (VPC object): VPC object to create Internet Gateway Returns: internet gateway: newly internet gateway object. """ # Query to make sure the region doesn't have any IGW already attached. existing_igw = next(iter(vpc.internet_gateways.all()), None) if existing_igw is not None: # If we have existing igw for the region, lets just tag it with yb-XX-igw tag_resource_name(client, existing_igw.id, tag_name) return existing_igw # If we don't have a internet gateway, lets create one and attach it to vpc igw = client.create_internet_gateway() tag_resource_name(client, igw.id, tag_name) vpc.attach_internet_gateway(InternetGatewayId=igw.id) return igw def get_route_table(client, tag_name, **kwargs): """Method to fetch route table based on tag_name Args: client (boto client): Region specific boto client tag_name (str): Route table tag name to search for. Returns: RouteTable (obj): Matching route table object or None. """ filters = get_tag_filter(tag_name) return next(iter(client.route_tables.filter(Filters=filters)), None) @get_or_create(get_route_table) def create_route_table(client, tag_name, vpc): """Method to create route table based on tag_name in given VPC. It will first query for the tag name to see if the route table already exists or if one is already attached to the VPC, if so it will return that route table. Args: client (boto client): Region specific boto client tag_name (str): Route table tag name vpc (vpc object): VPC object to create the route table against Returns: RouteTable (obj): newly created RouteTable object. """ # Check to see if there is a route table attached to VPC, if so, we can just tag it existing_route_table = next(iter(vpc.route_tables.all()), None) if existing_route_table is not None: tag_resource_name(client, existing_route_table.id, tag_name) return existing_route_table # If no route table exists, we can create one and tag it. route_table = vpc.create_route_table() tag_resource_name(client, route_table.id, tag_name) return route_table @get_and_cleanup(get_security_group) def cleanup_security_group(sg, **kwargs): """Method to cleanup security group for the matching group_name. Args: sg: Instance of security group matching the group_name. """ sg.delete() @get_and_cleanup(get_igw) def cleanup_igw(igw, **kwargs): """Method to cleanup Internet Gateway matching the tag name. And also remove any vpc that is attached to the Internet Gateway. Args: igw: Instance of Internet Gateway matching tag_name. """ for vpc in igw.attachments: igw.detach_from_vpc(VpcId=vpc['VpcId']) igw.delete() @get_and_cleanup(get_route_table) def cleanup_route_table(rt, **kwargs): """Method to cleanup the Route Table matching the tag name. Args: rt: Instance of Route Table matching tag_name. """ rt.delete() def get_route_by_cidr(route_table, cidr): """Method to check if given CIDR already attached to route table. Args: RouteTable (obj): Route Table object. cidr (str): CIDR string to check in route table. Returns: Route: the route for this CIDR or None if not found """ return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr) @get_or_create(get_vpc) def create_vpc(client, tag_name, cidr): """Method to create vpc based on the cidr and tag with tag_name. Args: client (boto client): Region specific boto client tag_name (str): VPC tag name cidr (str): CIDR string. Returns: VPC(Object): Newly created VPC object. """ vpc = client.create_vpc(CidrBlock=cidr) vpc.modify_attribute(EnableDnsHostnames={'Value': True}) tag_resource_name(client, vpc.id, tag_name) return vpc def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id): """Method to bootstrap vpc and security group, and enable vpc peering with the host_instance vpc. Args: metadata (obj): Cloud metadata object with cidr prefix and other metadata. region (str): Region name to create the vpc in. dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in. Returns: vpc_info (json): return vpc, subnet and security group as json. """ client = get_client(region) dest_vpc = client.Vpc(dest_vpc_id) subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()} sg_group_name = get_yb_sg_name(region) rules = metadata["sg_rules"] for r in rules: r.update({"cidr_ip": IGW_CIDR}) add_cidr_to_rules(rules, dest_vpc.cidr_block) sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc, description="YugaByte SG", rules=rules)] return vpc_components_as_json(dest_vpc, sgs, subnets) def query_vpc(region): """Method to query VPC against given region and respective subnets. Args: region (str): Region name to query the VPC. Returns: vpc and subnet info (obj): Object with region and zone subnet id. """ per_vpc_info = {} # Fetch all available AZs, as we want to group subnets by AZ. raw_client = boto3.client("ec2", region_name=region) zones = [z["ZoneName"] for z in raw_client.describe_availability_zones( Filters=get_filters("state", "available")).get("AvailabilityZones", [])] # Default to empty lists, in case some zones do not have subnets, so we can use this as a query # for all available AZs in this region. subnets_by_zone = {z: [] for z in zones} # Fetch SGs and group them by VPC ID. client = get_client(region) per_vpc_sgs = {} sgs = client.security_groups.all() for sg in sgs: sg_list = per_vpc_sgs.setdefault(sg.vpc_id, []) sg_list.append({ "sg_id": sg.group_id, # Note: Name tag is not mandatory or always present but group_name is! "sg_name": sg.group_name }) # Fetch all available VPCs so we can group by VPC ID. region_vpcs = client.vpcs.all() for vpc in region_vpcs: # Filter for available subnets and group by AZ. subnets = vpc.subnets.filter(Filters=get_filters("state", "available")) for s in subnets: subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, []) subnets_for_this_az.append({ "subnet_id": s.subnet_id, "name": _get_name_from_tags(s.tags), "public": s.map_public_ip_on_launch }) vpc_info = { "subnets_by_zone": subnets_by_zone, # In case we somehow did not find any SGs, default to empty list. "security_groups": per_vpc_sgs.get(vpc.id, []) } per_vpc_info[vpc.id] = vpc_info region_json = { "per_vpc_info": per_vpc_info } return region_json def _get_name_from_tags(tags): for t in tags if tags else []: if t.get("Key") == "Name": return t.get("Value", None) return None def vpc_components_as_json(vpc, sgs, subnets): """Method takes VPC, Security Group and Subnets and returns a json data format with ids. Args: vpc (VPC Object): Region specific VPC object sgs (List of Security Group Object): Region specific Security Group object subnets (subnet object map): Map of Subnet objects keyed of zone. Retuns: json (str): A Json string for yugaware to consume with necessary ids. """ result = {} result["vpc_id"] = vpc.id result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs] result["zones"] = {} for zone, subnet in subnets.iteritems(): result["zones"][zone] = subnet.id return result def delete_vpc(region, host_vpc_id=None, host_vpc_region=None): """Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering. Args: region (str): Region name to query the VPC. """ vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region) client = get_client(region) region_vpc = get_vpc(client, vpc_region_tag) if region_vpc is None: raise YBOpsRuntimeError("VPC not setup.") zones = get_zones(region) # Remove the yugabyte SG first. sg_group_name = get_yb_sg_name(region) cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc) # Cleanup the subnets. for zone, subnet_id in zones.iteritems(): vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone) if subnet_id is not None: client.Subnet(subnet_id).delete() # Remove the IGW. igw_tag = IGW_PREFIX_FORMAT.format(region) cleanup_igw(client=client, tag_name=igw_tag) # Remove this region's CIDR from the RT of the host vpc. host_vpc = None if host_vpc_id is not None and host_vpc_region is not None: host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id) for rt in list(host_vpc.route_tables.all()): delete_route(rt, region_vpc.cidr_block) # Remove all of the VPC peerings of this vpc. cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None) # Delete the VPC itself. region_vpc.delete() # Finally cleanup the Routing Table. route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region) cleanup_route_table(client=client, tag_name=route_table_tag) return {"success": "VPC deleted."} def tag_resource_name(client, resource_id, tag_name): """Method to create name tag for given resource. Args: client (boto3 client): Region specific boto client resource_id (str): EC2 resource id to tag tag_name (str): Tag name. """ tag_resource(client, resource_id, "Name", tag_name) def tag_resource(client, resource_id, tag_key, tag_value): """Method to attach arbitrary key-value tags to resources. Args: client (boto3 client): Region specific boto client resource_id (str): EC2 resource id to tag tag_key: Tag key tag_value: Tag value """ tags = [{"Key": tag_key, "Value": tag_value}] client.create_tags(Resources=[resource_id], Tags=tags) def get_filters(key, value): return [{'Name': key, 'Values': [value]}] def get_tag_filter(tag_name): return get_filters("tag:Name", tag_name) def get_vpc_peerings(vpc, host_vpc, **kwargs): """Method to fetch all the VPC peerings against given VPC. If host_vpc is provided it will check if there is a peering against that vpc. Args: vpc(VPC object): VPC Object to search for peerings host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc peering is done. Returns: VPC peering (array): Array list of vpc peerings. """ output = [] # Search through accepted vpc peerings. vpc_peerings = vpc.accepted_vpc_peering_connections.all() output.extend([vp for vp in vpc_peerings if vp.status.get('Code') == "active" and (host_vpc is None or vp.requester_vpc == host_vpc)]) # Also search through requested vpc peerings. vpc_peerings = vpc.requested_vpc_peering_connections.all() output.extend([vp for vp in vpc_peerings if vp.status.get('Code') == "active" and (host_vpc is None or vp.accepter_vpc == host_vpc)]) return output @get_and_cleanup(get_vpc_peerings) def cleanup_vpc_peering(vpc_peerings, **kwargs): for vpc_peering in vpc_peerings: vpc_peering.delete() @get_or_create(get_vpc_peerings) def create_vpc_peering(client, vpc, host_vpc, target_region): """Method would create a vpc peering between the newly created VPC and caller's VPC Also makes sure, if they aren't the same, then there is no need for vpc peering. Args: client (boto client): Region specific boto client vpc (VPC object): Newly created VPC object host_vpc(Host VPC object): Host VPC to peer with. target_region (region name): Region name in which peering is being created. Returns: VPC peering (array): Array list of vpc peerings. """ try: peer_conn = client.create_vpc_peering_connection( VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region) peer_conn.wait_until_exists() # Need to accept from the other end. remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id) remote_peer_conn.wait_until_exists() remote_peer_conn.accept() return [peer_conn] except Exception as e: logging.error(e) raise YBOpsRuntimeError("Unable to create VPC peering.") def get_device_names(instance_type, num_volumes): device_names = [] for i in xrange(num_volumes): device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}" index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i)) device_names.append(device_name_format.format(index)) return device_names def is_next_gen(instance_type): return instance_type.startswith(("c3", "c4", "c5", "m4", "r4")) def is_nvme(instance_type): return instance_type.startswith("i3") def has_ephemerals(instance_type): return not is_nvme(instance_type) and not is_next_gen(instance_type) def create_instance(args): client = get_client(args.region) vars = { "ImageId": args.machine_image, "KeyName": args.key_pair_name, "MinCount": 1, "MaxCount": 1, "InstanceType": args.instance_type, } # Network setup. # Lets assume they have provided security group id comma delimited. sg_ids = args.security_group_id.split(",") if args.security_group_id else None if sg_ids is None: # Figure out which VPC this instance will be brought up in and search for the SG in there. # This is for a bit of backwards compatibility with the previous mode of potentially using # YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs. # This means there could be customers that had that deployment mode from the start AND have # a SG we created back then, with the internal naming convention we use, but NOT in the YB # VPC (which they likely will not even have). vpc = get_vpc_for_subnet(client, args.cloud_subnet) sg_name = get_yb_sg_name(args.region) sg = get_security_group(client, sg_name, vpc) sg_ids = [sg.id] vars["NetworkInterfaces"] = [{ "DeviceIndex": 0, "AssociatePublicIpAddress": args.assign_public_ip, "SubnetId": args.cloud_subnet, "Groups": sg_ids }] # Volume setup. volumes = [] ebs = { "DeleteOnTermination": True, # TODO: constant "VolumeSize": 40, "VolumeType": "gp2" } if args.cmk_res_name is not None: ebs["Encrypted"] = True ebs["KmsKeyId"] = args.cmk_res_name volumes.append({ "DeviceName": "/dev/sda1", "Ebs": ebs }) device_names = get_device_names(args.instance_type, args.num_volumes) # TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack... for i, device_name in enumerate(device_names): volume = {} if has_ephemerals(args.instance_type): volume = { "DeviceName": "/dev/{}".format(device_name), "VirtualName": "ephemeral{}".format(i) } elif is_next_gen(args.instance_type): ebs = { "DeleteOnTermination": True, "VolumeType": args.volume_type, # TODO: make this int. "VolumeSize": args.volume_size } if args.cmk_res_name is not None: ebs["Encrypted"] = True ebs["KmsKeyId"] = args.cmk_res_name if args.volume_type == "io1": # TODO: make this int. ebs["Iops"] = args.disk_iops volume = { "DeviceName": "/dev/{}".format(device_name), "Ebs": ebs } volumes.append(volume) vars["BlockDeviceMappings"] = volumes # Tag setup. def __create_tag(k, v): return {"Key": k, "Value": v} # Add Name all the time. instance_tags = [ __create_tag("Name", args.search_pattern), __create_tag("launched-by", os.environ.get("USER", "unknown")), __create_tag("yb-server-type", args.type) ] custom_tags = args.instance_tags if args.instance_tags is not None else '{}' for k, v in json.loads(custom_tags).iteritems(): instance_tags.append(__create_tag(k, v)) vars["TagSpecifications"] = [{ "ResourceType": "instance", "Tags": instance_tags }] # TODO: user_data > templates/cloud_init.yml.j2, still needed? instance_ids = client.create_instances(**vars) if len(instance_ids) != 1: logging.error("Invalid create_instances response: {}".format(instance_ids)) raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format( len(instance_ids))) instance = instance_ids[0] instance.wait_until_running() def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str): instance = get_client(region).Instance(instance_id) # Remove all the tags we were asked to, except the internal ones. tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else []) # TODO: combine these with the above instance creation function. internal_tags = set(["Name", "launched-by", "yb-server-type"]) if tags_to_remove & internal_tags: raise YBOpsRuntimeError( "Was asked to remove tags: {}, which contain internal tags: {}".format( tags_to_remove, internal_tags )) # Note: passing an empty list to Tags will remove all tags from the instance. if tags_to_remove: instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove]) # Set all the tags provided. tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}") customer_tags = [] for k, v in tags_to_set.iteritems(): customer_tags.append({"Key": k, "Value": v}) instance.create_tags(Tags=customer_tags) def delete_route(rt, cidr): route = get_route_by_cidr(rt, cidr) if route is not None: route.delete() def get_vpc_for_subnet(client, subnet): return client.Subnet(subnet).vpc def get_yb_sg_name(region): return SG_YUGABYTE_PREFIX_FORMAT.format(region) def list_dns_record_set(hosted_zone_id): return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id) def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list): return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE') def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list): return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT') def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list): return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE') def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action): client = boto3.client('route53') records = [] for ip in ip_list: records.append({'Value': ip}) result = list_dns_record_set(hosted_zone_id) hosted_zone_name = result['HostedZone']['Name'] change_batch = { 'Comment': "YugaWare driven Record Set", 'Changes': [{ 'Action': action, 'ResourceRecordSet': { 'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name), 'Type': 'A', 'TTL': 5, 'ResourceRecords': records } }] } result = client.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch=change_batch) client.get_waiter('resource_record_sets_changed').wait( Id=result['ChangeInfo']['Id'], WaiterConfig={ 'Delay': 10, 'MaxAttempts': 60 })
managed/devops/opscli/ybops/cloud/aws/utils.py
40,257
AWS specific exception handler. Args: e: the exception that was raised by the underlying API call that just failed. Returns: True if this exception can be retried, False otherwise. A decorator for retrying an AWS operation after exceeding request limit. Does retries with randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries internally, but as of May 2017 there does not seem to be a good way of doing that. Initially not adding this decorator to all functions in this module. This should be done gradually as we encounter rate limiting errors. Relevant boto issues: https://github.com/boto/boto3/issues/770 https://github.com/boto/botocore/issues/882 Method to cleanup Internet Gateway matching the tag name. And also remove any vpc that is attached to the Internet Gateway. Args: igw: Instance of Internet Gateway matching tag_name. Method to cleanup the Route Table matching the tag name. Args: rt: Instance of Route Table matching tag_name. Method to cleanup security group for the matching group_name. Args: sg: Instance of security group matching the group_name. Method to create Internet Gateway based on tag_name in given VPC. If the gateway already exists, it would return that object. If the object doesn't have a tag, we would tag it accordingly. Args: client (boto client): Region specific boto client tag_name (str): Tag name for internet gateway. vpc (VPC object): VPC object to create Internet Gateway Returns: internet gateway: newly internet gateway object. Method to create route table based on tag_name in given VPC. It will first query for the tag name to see if the route table already exists or if one is already attached to the VPC, if so it will return that route table. Args: client (boto client): Region specific boto client tag_name (str): Route table tag name vpc (vpc object): VPC object to create the route table against Returns: RouteTable (obj): newly created RouteTable object. Method to create a security group based on the group_name and authorize ingress with the rules provided. Args: client (boto client): Region specific boto client group_name (str): security group name description (str): description of the security group vpc (VPC Object): VPC object to create the security group rules (dict): List of rules to add to security group. Method to create subnet based on cidr and tag name. Args: client (boto client): Region specific boto client vpc (VPC object): VPC object to create subnet. zone (str): Availability zone name cidr (str): CIDR string tag_name (str): Tag name for subnet. Returns: subnet: Newly created subnet object. Method to create vpc based on the cidr and tag with tag_name. Args: client (boto client): Region specific boto client tag_name (str): VPC tag name cidr (str): CIDR string. Returns: VPC(Object): Newly created VPC object. Method would create a vpc peering between the newly created VPC and caller's VPC Also makes sure, if they aren't the same, then there is no need for vpc peering. Args: client (boto client): Region specific boto client vpc (VPC object): Newly created VPC object host_vpc(Host VPC object): Host VPC to peer with. target_region (region name): Region name in which peering is being created. Returns: VPC peering (array): Array list of vpc peerings. Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering. Args: region (str): Region name to query the VPC. Method to fetch subnets based on the tag_name. Args: vpc (vpc obj): VPC object to search for subnets tag_name (str): subnet tag name. Returns: subnets (list): list of aws subnets for given vpc. Method to get boto3 ec2 resource for given region Args: region (str): Region name Returns: boto3 resource Method to get boto3 clients for given region or all the regions if none specified. Args: regions (list): List of regions to return clients for Returns: clients(obj): Map of region to boto3 resource Method to fetch Internet Gateway based on tag_name. Args: client (boto client): Region specific boto client tag_name (str): Internet Gateway tag name. Returns: internet_gateway: internet gateway object. Method to check if given CIDR already attached to route table. Args: RouteTable (obj): Route Table object. cidr (str): CIDR string to check in route table. Returns: Route: the route for this CIDR or None if not found Method to fetch route table based on tag_name Args: client (boto client): Region specific boto client tag_name (str): Route table tag name to search for. Returns: RouteTable (obj): Matching route table object or None. Method to fetch security group based on the group_name. Args: client (boto client): Region specific boto client group_name (str): Security Group name vpc (VPC object): The VPC in which to check for the SG Returns: SecurityGroup: Matching security group. Method to fetch vpc based on the tag_name. Args: client (boto client): Boto Client for the region to query. tag_name (str): VPC tag name. Returns: VPC obj: VPC object or None. Method to fetch all the VPC peerings against given VPC. If host_vpc is provided it will check if there is a peering against that vpc. Args: vpc(VPC object): VPC Object to search for peerings host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc peering is done. Returns: VPC peering (array): Array list of vpc peerings. Method to fetch zones for given region or all the regions if none specified. Args: region (str): Name of region to get zones of. Returns: zones (obj): Map of zone -> subnet Method to query VPC against given region and respective subnets. Args: region (str): Region name to query the VPC. Returns: vpc and subnet info (obj): Object with region and zone subnet id. Method to bootstrap vpc and security group, and enable vpc peering with the host_instance vpc. Args: metadata (obj): Cloud metadata object with cidr prefix and other metadata. region (str): Region name to create the vpc in. dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in. Returns: vpc_info (json): return vpc, subnet and security group as json. Method to attach arbitrary key-value tags to resources. Args: client (boto3 client): Region specific boto client resource_id (str): EC2 resource id to tag tag_key: Tag key tag_value: Tag value Method to create name tag for given resource. Args: client (boto3 client): Region specific boto client resource_id (str): EC2 resource id to tag tag_name (str): Tag name. Method takes VPC, Security Group and Subnets and returns a json data format with ids. Args: vpc (VPC Object): Region specific VPC object sgs (List of Security Group Object): Region specific Security Group object subnets (subnet object map): Map of Subnet objects keyed of zone. Retuns: json (str): A Json string for yugaware to consume with necessary ids. !/usr/bin/env python Copyright 2019 YugaByte, Inc. and Contributors Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt Outputs. TODO: handle private/public case at somepoint, also NAT. Validation. Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings. Setup VPC peerings. i is the host, j is the target. Add route i -> j. Add route j -> i. Note: If we have a host_vpc, it is the last in the list, and it doesn't have an associated component, so we special case it. TODO: should ideally filter to the RT that is relevant, but we do not really know the subnets which matter from this host_vpc... Setup SG entries for all the CIDRs. Add CIDRs from all the VPCs, including the host. Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS. TODO(bogdan): custom CIDR entries This rule matches this permission, so no need to add it. TODO: no direct waiter on subnet just yet, it seems... Query to make sure the region doesn't have any IGW already attached. If we have existing igw for the region, lets just tag it with yb-XX-igw If we don't have a internet gateway, lets create one and attach it to vpc Check to see if there is a route table attached to VPC, if so, we can just tag it If no route table exists, we can create one and tag it. Fetch all available AZs, as we want to group subnets by AZ. Default to empty lists, in case some zones do not have subnets, so we can use this as a query for all available AZs in this region. Fetch SGs and group them by VPC ID. Note: Name tag is not mandatory or always present but group_name is! Fetch all available VPCs so we can group by VPC ID. Filter for available subnets and group by AZ. In case we somehow did not find any SGs, default to empty list. Remove the yugabyte SG first. Cleanup the subnets. Remove the IGW. Remove this region's CIDR from the RT of the host vpc. Remove all of the VPC peerings of this vpc. Delete the VPC itself. Finally cleanup the Routing Table. Search through accepted vpc peerings. Also search through requested vpc peerings. Need to accept from the other end. Network setup. Lets assume they have provided security group id comma delimited. Figure out which VPC this instance will be brought up in and search for the SG in there. This is for a bit of backwards compatibility with the previous mode of potentially using YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs. This means there could be customers that had that deployment mode from the start AND have a SG we created back then, with the internal naming convention we use, but NOT in the YB VPC (which they likely will not even have). Volume setup. TODO: constant TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack... TODO: make this int. TODO: make this int. Tag setup. Add Name all the time. TODO: user_data > templates/cloud_init.yml.j2, still needed? Remove all the tags we were asked to, except the internal ones. TODO: combine these with the above instance creation function. Note: passing an empty list to Tags will remove all tags from the instance. Set all the tags provided.
10,491
en
0.831352
# -*- coding: utf-8 -*- """An implementation of the extension to ERMLP.""" from typing import Optional, Type import torch from torch import nn from ..base import EntityRelationEmbeddingModel from ...losses import BCEAfterSigmoidLoss, Loss from ...regularizers import Regularizer from ...triples import TriplesFactory __all__ = [ 'ERMLPE', ] class ERMLPE(EntityRelationEmbeddingModel): r"""An extension of ERMLP proposed by [sharifzadeh2019]_. This model uses a neural network-based approach similar to ERMLP and with slight modifications. In ERMLP, the model is: .. math:: f(h, r, t) = \textbf{w}^{T} g(\textbf{W} [\textbf{h}; \textbf{r}; \textbf{t}]) whereas in ERMPLE the model is: .. math:: f(h, r, t) = \textbf{t}^{T} f(\textbf{W} (g(\textbf{W} [\textbf{h}; \textbf{r}])) including dropouts and batch-norms between each two hidden layers. ConvE can be seen as a special case of ERMLPE that contains the unnecessary inductive bias of convolutional filters. The aim of this model is to show that lifting this bias from ConvE (which simply leaves us with a modified ERMLP model), not only reduces the number of parameters but also improves performance. """ #: The default strategy for optimizing the model's hyper-parameters hpo_default = dict( embedding_dim=dict(type=int, low=50, high=350, q=25), hidden_dim=dict(type=int, low=50, high=450, q=25), input_dropout=dict(type=float, low=0.0, high=0.8, q=0.1), hidden_dropout=dict(type=float, low=0.0, high=0.8, q=0.1), ) #: The default loss function class loss_default: Type[Loss] = BCEAfterSigmoidLoss #: The default parameters for the default loss function class loss_default_kwargs = {} def __init__( self, triples_factory: TriplesFactory, hidden_dim: int = 300, input_dropout: float = 0.2, hidden_dropout: float = 0.3, embedding_dim: int = 200, automatic_memory_optimization: Optional[bool] = None, loss: Optional[Loss] = None, preferred_device: Optional[str] = None, random_seed: Optional[int] = None, regularizer: Optional[Regularizer] = None, ) -> None: super().__init__( triples_factory=triples_factory, embedding_dim=embedding_dim, automatic_memory_optimization=automatic_memory_optimization, loss=loss, preferred_device=preferred_device, random_seed=random_seed, regularizer=regularizer, ) self.hidden_dim = hidden_dim self.input_dropout = input_dropout self.linear1 = nn.Linear(2 * self.embedding_dim, self.hidden_dim) self.linear2 = nn.Linear(self.hidden_dim, self.embedding_dim) self.input_dropout = nn.Dropout(self.input_dropout) self.bn1 = nn.BatchNorm1d(self.hidden_dim) self.bn2 = nn.BatchNorm1d(self.embedding_dim) self.mlp = nn.Sequential( self.linear1, nn.Dropout(hidden_dropout), self.bn1, nn.ReLU(), self.linear2, nn.Dropout(hidden_dropout), self.bn2, nn.ReLU(), ) # Finalize initialization self.reset_parameters_() def _reset_parameters_(self): # noqa: D102 self.entity_embeddings.reset_parameters() self.relation_embeddings.reset_parameters() for module in [ self.linear1, self.linear2, self.bn1, self.bn2, ]: module.reset_parameters() def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 # Get embeddings h = self.entity_embeddings(hrt_batch[:, 0]).view(-1, self.embedding_dim) r = self.relation_embeddings(hrt_batch[:, 1]).view(-1, self.embedding_dim) t = self.entity_embeddings(hrt_batch[:, 2]) # Embedding Regularization self.regularize_if_necessary(h, r, t) # Concatenate them x_s = torch.cat([h, r], dim=-1) x_s = self.input_dropout(x_s) # Predict t embedding x_t = self.mlp(x_s) # compare with all t's # For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row x = (x_t.view(-1, self.embedding_dim) * t).sum(dim=1, keepdim=True) # The application of the sigmoid during training is automatically handled by the default loss. return x def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 h = self.entity_embeddings(hr_batch[:, 0]).view(-1, self.embedding_dim) r = self.relation_embeddings(hr_batch[:, 1]).view(-1, self.embedding_dim) t = self.entity_embeddings.weight.transpose(1, 0) # Embedding Regularization self.regularize_if_necessary(h, r, t) # Concatenate them x_s = torch.cat([h, r], dim=-1) x_s = self.input_dropout(x_s) # Predict t embedding x_t = self.mlp(x_s) x = x_t @ t # The application of the sigmoid during training is automatically handled by the default loss. return x def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 h = self.entity_embeddings.weight r = self.relation_embeddings(rt_batch[:, 0]).view(-1, self.embedding_dim) t = self.entity_embeddings(rt_batch[:, 1]).view(-1, self.embedding_dim) # Embedding Regularization self.regularize_if_necessary(h, r, t) rt_batch_size = t.shape[0] # Extend each rt_batch of "r" with shape [rt_batch_size, dim] to [rt_batch_size, dim * num_entities] r = torch.repeat_interleave(r, self.num_entities, dim=0) # Extend each h with shape [num_entities, dim] to [rt_batch_size * num_entities, dim] # h = torch.repeat_interleave(h, rt_batch_size, dim=0) h = h.repeat(rt_batch_size, 1) # Extend t t = t.repeat_interleave(self.num_entities, dim=0) # Concatenate them x_s = torch.cat([h, r], dim=-1) x_s = self.input_dropout(x_s) # Predict t embedding x_t = self.mlp(x_s) # For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row x = (x_t.view(-1, self.embedding_dim) * t).sum(dim=1, keepdim=True) # The results have to be realigned with the expected output of the score_h function x = x.view(rt_batch_size, self.num_entities) # The application of the sigmoid during training is automatically handled by the default loss. return x
src/pykeen/models/unimodal/ermlpe.py
6,725
An extension of ERMLP proposed by [sharifzadeh2019]_. This model uses a neural network-based approach similar to ERMLP and with slight modifications. In ERMLP, the model is: .. math:: f(h, r, t) = \textbf{w}^{T} g(\textbf{W} [\textbf{h}; \textbf{r}; \textbf{t}]) whereas in ERMPLE the model is: .. math:: f(h, r, t) = \textbf{t}^{T} f(\textbf{W} (g(\textbf{W} [\textbf{h}; \textbf{r}])) including dropouts and batch-norms between each two hidden layers. ConvE can be seen as a special case of ERMLPE that contains the unnecessary inductive bias of convolutional filters. The aim of this model is to show that lifting this bias from ConvE (which simply leaves us with a modified ERMLP model), not only reduces the number of parameters but also improves performance. An implementation of the extension to ERMLP. -*- coding: utf-8 -*-: The default strategy for optimizing the model's hyper-parameters: The default loss function class: The default parameters for the default loss function class Finalize initialization noqa: D102 noqa: D102 Get embeddings Embedding Regularization Concatenate them Predict t embedding compare with all t's For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row The application of the sigmoid during training is automatically handled by the default loss. noqa: D102 Embedding Regularization Concatenate them Predict t embedding The application of the sigmoid during training is automatically handled by the default loss. noqa: D102 Embedding Regularization Extend each rt_batch of "r" with shape [rt_batch_size, dim] to [rt_batch_size, dim * num_entities] Extend each h with shape [num_entities, dim] to [rt_batch_size * num_entities, dim] h = torch.repeat_interleave(h, rt_batch_size, dim=0) Extend t Concatenate them Predict t embedding For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row The results have to be realigned with the expected output of the score_h function The application of the sigmoid during training is automatically handled by the default loss.
2,109
en
0.855343
import sqlite3 from abc import ABCMeta, abstractmethod from model.dao.daoexception import DAOException class AbstractDAO(object): __metaclass__ = ABCMeta def __init__(self, conn): self._conn = conn """ base CRUD operation """ # GENERIC CREATE FUNCTION def _insert(self, request, parameters): with self._conn as conn: try: c = conn.cursor() c.execute(request, parameters) conn.commit() return c.lastrowid except sqlite3.Error as ex: conn.rollback() DAOException(self, ex) # GENERIC READ FUNCTION def _read(self, request, parameters=None): with self._conn as conn: try: c = conn.cursor() if parameters is None: c.execute(request) else: c.execute(request, parameters) return c.fetchall() except Exception as ex: DAOException(self, ex) # GENERIC UPDATE FUNCTION def _update(self, request, parameters): with self._conn as conn: try: c = conn.cursor() c.execute(request, parameters) conn.commit() return True except Exception as ex: conn.rollback() DAOException(self, ex) return False # GENERIC DELETE FUNCTION def _delete(self, request, obj_id): with self._conn as conn: try: c = conn.cursor() c.execute(request, obj_id) conn.commit() return True except Exception as ex: conn.rollback() DAOException(self, ex) return False
model/dao/abstractdao.py
1,854
GENERIC CREATE FUNCTION GENERIC READ FUNCTION GENERIC UPDATE FUNCTION GENERIC DELETE FUNCTION
93
en
0.373353
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/10/31 0031 18:55 # @Author : Hadrianl # @File : realtime_data_server # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rqalpha.const import COMMISSION_TYPE from spapi.spAPI import * from spapi.sp_struct import * import zmq import datetime as dt from rqalpha.api import logger from queue import Queue, Empty import pymongo as pmg from threading import Thread from collections import deque import pandas as pd from rqalpha.events import EVENT import time from rqalpha.environment import Environment from rqalpha.model.instrument import Instrument from .util import _convert_from_ctype class RealtimeDataServer: def __init__(self, sp_info, db_info, socket_info): mongo_cli = pmg.MongoClient(db_info['host']) admin_db = mongo_cli.get_database('admin') admin_db.authenticate(db_info['user'], db_info['pwd']) self._db = mongo_cli.get_database(db_info['db']) self._col = self._db.get_collection('realtime_future_1min_') self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True) self._col.create_index([('code', pmg.ASCENDING)]) self.ctx = zmq.Context() self.trigger_socket = self.ctx.socket(zmq.PUB) self.trigger_socket.bind(f'tcp://*: {socket_info["trigger_port"]}') self.prod_codes = {} initialize() set_login_info(**sp_info) self._init_callback() login() time.sleep(3) self._init_subscribe() def _init_callback(self): self._ticker_queues = {} self._price_queues = {} self._trigger_queue = Queue() self._resample_thread = {} @on_login_reply # 登录成功时候调用 def login_reply(user_id, ret_code, ret_msg): if ret_code == 0: api_logger.info(f'@{user_id.decode()}登录成功') self._init_subscribe() else: api_logger.error(f'@{user_id.decode()}登录失败--errcode:{ret_code}--errmsg:{ret_msg.decode()}') @on_instrument_list_reply # 产品系列信息的回调推送,用load_instrument_list()触发 def inst_list_reply(req_id, is_ready, ret_msg): if is_ready: api_logger.info('<产品>' + f'信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}') else: api_logger.error('<产品>' + f'信息正在加载......req_id{req_id}-msg:{ret_msg.decode()}') @on_product_list_by_code_reply # 根据产品系列名返回合约信息 def product_list_by_code_reply(req_id, inst_code, is_ready, ret_msg): if is_ready: if inst_code == '': api_logger.info('<合约>' + f'该产品系列没有合约信息 req_id:{req_id}-msg:{ret_msg.decode()}') else: api_logger.info('<合约>' + f'产品:{inst_code.decode()}合约信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}') else: api_logger.error('<合约>' + f'产品:{inst_code.decode()}合约信息正在加载......req_id:{req_id}-msg:{ret_msg.decode()}') # @on_business_date_reply # 登录成功后会返回一个交易日期 def business_date_reply(business_date): self.trade_date = dt.datetime.fromtimestamp(business_date) api_logger.info('<日期>' + f'当前交易日--{self.trade_date}') @on_ticker_update # ticker数据推送 def ticker_update(ticker: SPApiTicker): ticker_dict = _convert_from_ctype(ticker) self._ticker_queues[ticker_dict['ProdCode']].put(ticker_dict) api_logger.info(f'{ticker_dict}') @on_api_price_update # price数据推送 def price_update(price: SPApiPrice): price_dict = _convert_from_ctype(price) self._price_queues[price_dict['ProdCode']].append(price_dict) api_logger.info(f'{price_dict}') @on_connecting_reply # 连接状态改变时调用 def connecting_reply(host_id, con_status): api_logger.info(f'<连接>{HOST_TYPE[host_id]}状态改变--{HOST_CON_STATUS[con_status]}') # global login_flag self.on_login_reply = login_reply self.inst_list_reply = inst_list_reply self.product_list_by_code_reply = product_list_by_code_reply self.business_date_reply = business_date_reply self.ticker_update = ticker_update self.price_update = price_update self.connecting_reply = connecting_reply def _init_subscribe(self): contract_col = self._db.get_collection('realtime_future_contract_info') code = contract_col.find() self.prod_codes = {c['Filler']: c['CODE'] for c in code} for p in self.prod_codes: self.subscribe_ticker(p) for p in self.prod_codes: self.subscribe_price(p) def _resample_ticker(self, prod_code): tickers = [] q = self._ticker_queues[prod_code] code = self.prod_codes[prod_code] time_diff = 0 while True: try: tick = q.get(timeout=1) time_diff = tick['TickerTime'] - time.time() print(time_diff) except Empty: if tickers and time.time() % (tickers[-1]['TickerTime'] // 60) >= 61 + time_diff: # 在没有新的一分钟tick数据时,跨过下分钟超过3秒会自动生成bar price_list = [] vol_list = [] d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0) for t in tickers: price_list.append(t['Price']) vol_list.append(t['Qty']) o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list) self._col.update_one({'datetime': d, 'code': code}, {'$set': {'datetime': d, 'code': code, 'open': o, 'high': h, 'low': l, 'close': c, 'volume': v, 'trade_date': self.trade_date}}, upsert=True) self._trigger_queue.put(d) tickers.clear() continue if tick is None: break if tickers and tickers[-1]['TickerTime'] // 60 != tick['TickerTime'] // 60: price_list = [] vol_list = [] d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0) for t in tickers: price_list.append(t['Price']) vol_list.append(t['Qty']) o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list) self._col.update_one({'datetime': d, 'code': code}, {'$set': {'datetime': d, 'code': code, 'open': o, 'high': h, 'low': l, 'close': c, 'volume': v, 'trade_date': self.trade_date}}, upsert=True) self._trigger_queue.put(d) tickers.clear() tickers.append(tick) def subscribe_ticker(self, prod_code): self._ticker_queues.setdefault(prod_code, Queue()) subscribe_ticker(prod_code, 1) t = self._resample_thread.setdefault(prod_code, Thread(target=self._resample_ticker, args=(prod_code, ))) if not t.isAlive(): t.setDaemon(True) t.start() def unsubscribe_ticker(self, prod_code): subscribe_ticker(prod_code, 0) q = self._ticker_queues.pop(prod_code) t = self._resample_thread.pop(prod_code) q.put(None) t.join() def subscribe_price(self, prod_code): self._price_queues.setdefault(prod_code, deque(maxlen=1)) subscribe_price(prod_code, 1) def unsubscribe_price(self, prod_code): try: self._price_queues.pop(prod_code) finally: subscribe_price(prod_code, 0) def publish_bar_signal(self): dt_list = [] while True: d = self._trigger_queue.get() dt_list.append(d) print(d) if len(dt_list) >= len(self._resample_thread) or dt.datetime.now() > d + dt.timedelta(seconds=2): self.trigger_socket.send_pyobj(d) dt_list.clear() def add_contract(db_info, code): mongo_cli = pmg.MongoClient(db_info['host']) admin_db = mongo_cli.get_database('admin') admin_db.authenticate(db_info['user'], db_info['pwd']) db = mongo_cli.get_database(db_info['db']) contract_col = db.get_collection('realtime_future_contract_info') product_info = db.get_collection('realtime_future_product_info') contract_col.create_index([('DATE', pmg.DESCENDING), ('CODE', pmg.ASCENDING)], unique=True) contract_col.create_index([('CODE', pmg.ASCENDING)]) product_info.create_index([('DATE', pmg.DESCENDING), ('CLASS_CODE', pmg.ASCENDING)], unique=True) product_info.create_index([('CLASS_CODE', pmg.ASCENDING)])
rqalpha/examples/extend_api/HKMod/realtime_data_server.py
9,854
!/usr/bin/env python -*- coding: utf-8 -*- @Time : 2018/10/31 0031 18:55 @Author : Hadrianl @File : realtime_data_server Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 登录成功时候调用 产品系列信息的回调推送,用load_instrument_list()触发 根据产品系列名返回合约信息 登录成功后会返回一个交易日期 ticker数据推送 price数据推送 连接状态改变时调用 global login_flag 在没有新的一分钟tick数据时,跨过下分钟超过3秒会自动生成bar
809
en
0.517652
from pygments.style import Style from pygments.token import ( Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text ) class BaseSixteenStyle(Style): base00 = '#151515' base01 = '#202020' base02 = '#303030' base03 = '#505050' base04 = '#B0B0B0' base05 = '#D0D0D0' base06 = '#E0E0E0' base07 = '#F5F5F5' base08 = '#AC4142' base09 = '#D28445' base0a = '#F4BF75' base0b = '#90A959' base0c = '#75B5AA' base0d = '#6A9FB5' base0e = '#AA759F' base0f = '#8F5536' default_style = '' background_color = base00 highlight_color = base02 styles = { Text: base05, Error: base08, # .err Comment: f'italic {base03}', # .c Comment.Preproc: base0f, # .cp Comment.PreprocFile: base0b, # .cpf Keyword: base0e, # .k Keyword.Type: base08, # .kt Name.Attribute: base0d, # .na Name.Builtin: base0d, # .nb Name.Builtin.Pseudo: base08, # .bp Name.Class: base0d, # .nc Name.Constant: base09, # .no Name.Decorator: base09, # .nd Name.Function: base0d, # .nf Name.Namespace: base0d, # .nn Name.Tag: base0e, # .nt Name.Variable: base0d, # .nv Name.Variable.Instance: base08, # .vi Number: base09, # .m Operator: base0c, # .o Operator.Word: base0e, # .ow Literal: base0b, # .l String: base0b, # .s String.Interpol: base0f, # .si String.Regex: base0c, # .sr String.Symbol: base09, # .ss } from string import capwords # noqa: E402 BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format( capwords('classic-dark', '-').replace('-', '') ) globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle'] del globals()['BaseSixteenStyle'] del capwords
pygments_base16/base16-classic-dark.py
1,875
.err .c .cp .cpf .k .kt .na .nb .bp .nc .no .nd .nf .nn .nt .nv .vi .m .o .ow .l .s .si .sr .ss noqa: E402
106
mn
0.280091
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from tensorio import compare_tensor from akg.utils import kernel_exec as utils from test_op import abs_sum from akg.utils.dsl_create import get_reduce_out_shape from gen_random import random_gaussian def abs_sum_run(shape, reduce_axis, keepdims, dtype, attrs): op_attrs = [reduce_axis, keepdims] if 'tuning' in attrs.keys(): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(abs_sum.abs_sum, [shape], [dtype], op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t) if t: expect, input1, output = gen_data(dtype, keepdims, reduce_axis, shape) return mod, expect, (input1, output) else: return mod else: expect, input1, output = gen_data(dtype, keepdims, reduce_axis, shape) mod = utils.op_build_test(abs_sum.abs_sum, [shape], [dtype], op_attrs, kernel_name="abs_sum", attrs=attrs) output = utils.mod_launch(mod, (input1, output), expect=expect) return input1, output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-3, equal_nan=True) def gen_data(dtype, keepdims, reduce_axis, shape): input1 = random_gaussian(shape, miu=1, sigma=0.1) input1 = input1.astype(dtype) input1_abs = np.abs(input1) expect = np.sum(input1_abs, axis=reduce_axis, keepdims=keepdims) out_shape = get_reduce_out_shape(shape, axis=reduce_axis, keepdims=keepdims) output = np.full(out_shape, np.nan, dtype) return expect, input1, output
tests/common/test_run/abs_sum_run.py
2,135
Copyright 2019 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
561
en
0.849356
import glob import os from distutils.dir_util import copy_tree from shutil import copy from alembic.command import init as _init from metric.cli.conf import Conf from metric.cli.template import Template from metric.src import Base from metric.src.package import Package def init(name): """ ## Init [ID] Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru. [EN] Init is the command initiation by metric to create a project with the foundation that has been setup, there are 2 ways to work with it, either you can create from current working directory (CWD) or new directory. @param name: project name """ project_path = os.getcwd() if name != '.': project_path = os.path.join(os.getcwd(), name) Package.make_directory(project_path) _init(Base.base_configuration(project_path), project_path) packages_build = { 'apps': ('resources',), 'models': tuple(), } for k, v in packages_build.items(): Package.make_package(os.path.join(project_path, k)) for i in v: Package.make_package(os.path.join(project_path, k, i)) dir_build = { 'apps': ('views',), 'models': ('fields',), '.': ('scripts',) } for k, v in dir_build.items(): for i in v: Package.make_directory(os.path.join(project_path, k, i)) file_remove = ['script.py.mako'] [os.remove(os.path.join(project_path, i)) for i in file_remove] scripts = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../scripts") [copy(file, os.path.join(project_path, 'scripts')) for file in glob.glob(os.path.join(scripts, "*.mako"))] os.rename(os.path.join(project_path, 'env.py'), os.path.join(project_path, 'scripts', 'env.py')) copy_tree(os.path.join(scripts, "setup"), project_path) for file in glob.glob(os.path.join(os.path.join(scripts, "setup"), "*.py")): copy(file, project_path) Conf.reset(project_path) def make_resource(name): """ ## Make resource [ID] Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di sediakan. [EN] This is a command that used to create new "resource" based from the existing "script" provided. @param name: resource name """ t = Template() t.template_type = 'resource' t.make(name)
metric/cli/__init__.py
2,595
## Init [ID] Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru. [EN] Init is the command initiation by metric to create a project with the foundation that has been setup, there are 2 ways to work with it, either you can create from current working directory (CWD) or new directory. @param name: project name ## Make resource [ID] Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di sediakan. [EN] This is a command that used to create new "resource" based from the existing "script" provided. @param name: resource name
777
id
0.631506
import cv2 import os image = cv2.imread("/content/drive/My Drive/DIC_personal/data/face.jpg") cascade = cv2.CascadeClassifier("/content/drive/My Drive/DIC_personal/haarcascades/haarcascade_upperbody.xml") #image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) face_list = cascade.detectMultiScale(image) #face_list = cascade.detectMultiScale(image,scaleFactor=1.2, minNeighbors=2, minSize=(1,1)) color = (0, 0, 255) if len(face_list)>0: for face in face_list: x, y, w, h = face cv2.rectangle(image,(x,y),(x+w, y+h), color, thickness=2) else: print("No human") cv2.imshow('Frame',image) cv2.waitKey(0) cv2.destroyAllWindows() https://qiita.com/PonDad/items/6f9e6d9397951cadc6be
face_1.py
703
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)face_list = cascade.detectMultiScale(image,scaleFactor=1.2, minNeighbors=2, minSize=(1,1))
137
en
0.274659
# coding: utf-8 """ FlashBlade REST API A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/). OpenAPI spec version: 2.3 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flashblade.FB_2_3 import models class ReplicationPerformance(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'transmitted_bytes_per_sec': 'float', 'received_bytes_per_sec': 'float' } attribute_map = { 'transmitted_bytes_per_sec': 'transmitted_bytes_per_sec', 'received_bytes_per_sec': 'received_bytes_per_sec' } required_args = { } def __init__( self, transmitted_bytes_per_sec=None, # type: float received_bytes_per_sec=None, # type: float ): """ Keyword args: transmitted_bytes_per_sec (float): Total bytes transmitted per second. received_bytes_per_sec (float): Total bytes received per second. """ if transmitted_bytes_per_sec is not None: self.transmitted_bytes_per_sec = transmitted_bytes_per_sec if received_bytes_per_sec is not None: self.received_bytes_per_sec = received_bytes_per_sec def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ReplicationPerformance`".format(key)) if key == "transmitted_bytes_per_sec" and value is not None: if value < 0.0: raise ValueError("Invalid value for `transmitted_bytes_per_sec`, must be a value greater than or equal to `0.0`") if key == "received_bytes_per_sec" and value is not None: if value < 0.0: raise ValueError("Invalid value for `received_bytes_per_sec`, must be a value greater than or equal to `0.0`") self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): return None else: return value def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ReplicationPerformance, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ReplicationPerformance): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
pypureclient/flashblade/FB_2_3/models/replication_performance.py
4,097
Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. Returns true if both objects are equal Keyword args: transmitted_bytes_per_sec (float): Total bytes transmitted per second. received_bytes_per_sec (float): Total bytes received per second. Returns true if both objects are not equal For `print` and `pprint` Returns the model properties as a dict Returns the string representation of the model FlashBlade REST API A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/). OpenAPI spec version: 2.3 Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8 type: float type: float
851
en
0.734015
''' Author: Guanghan Ning E-mail: guanghan.ning@jd.com October 22th, 2018 Unit test for graph. ''' import os import sys sys.path.append(os.path.abspath("../utils/")) from graph import * def test_normalize_diagraph(): num_node = 15 self_link = [(i, i) for i in range(num_node)] neighbor_link = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 8), (8, 7), (7, 6), (8, 12), (12, 9), (9, 10), (10, 11), (9, 3), (12, 13), (13, 14)] edge = self_link + neighbor_link print("Edge: \n{}\n".format(edge)) hop_dis = get_hop_distance(num_node, edge, max_hop=1) print("Hop_dis: \n{}\n".format(hop_dis)) max_hop = 1 dilation = 1 valid_hop = range(0, max_hop + 1, dilation) print("Valid_hop: \n{}\n".format(valid_hop)) adjacency = np.zeros((num_node, num_node)) for hop in valid_hop: adjacency[hop_dis == hop] = 1 print("Adjacency matrix: \n{}\n".format(adjacency)) normalize_adjacency = normalize_digraph(adjacency) print("Normalized adjacency matrix: \n{}\n".format(normalize_adjacency)) return if __name__ == "__main__": test_normalize_diagraph()
pose_trackers/lighttrack/graph/unit_test/test_graph.py
1,172
Author: Guanghan Ning E-mail: guanghan.ning@jd.com October 22th, 2018 Unit test for graph.
91
en
0.639994
from django.shortcuts import render def home(request): """ View function for simply rendering the Ionic Angular index.html """ return render(request, 'www/index.html')
practicality/frontend/views.py
190
View function for simply rendering the Ionic Angular index.html
63
en
0.464412
""" Remove super classes from the train dataset and keep it only in the validation dataset. Example command: python create_rcv1_heldout_split.py --train_fraction 0.75 --seed 42 """ import argparse import jsonlines from collections import Counter import numpy as np import random import copy import os import json def create_dataset_split(args): random.seed(args.seed) # Read the JSON file containing one JSON per line and store the dict all_docs = [] with jsonlines.open(args.file) as reader: for obj in reader: all_docs.append(obj) # Get a list of all the labels label_statistics = Counter() for doc in all_docs: label_statistics.update(doc['bip:topics:1.0']) all_labels = list(label_statistics) # Ignore superclass labels during training super_class_labels = ['C15', 'C151', 'C17', 'C18', 'C31', 'C33', 'C41', 'E12', 'E13', 'E14', 'E21', 'E31', 'E41', 'E51', 'G15', 'M13', 'M14'] train_labels = [label for label in all_labels if label not in super_class_labels] # Remove labels in train_labels from the train data new_docs = [] for doc in all_docs: doc['bip:topics:1.0'] = [topic for topic in doc['bip:topics:1.0'] if topic in train_labels] if len(doc['bip:topics:1.0']) != 0: new_docs.append(doc) # Create a new file # Store list of dicts as a json save_name = 'rcv1_superclass_{}.json'.format(args.seed) args.save_dir = os.path.split(args.file)[0] f = open(os.path.join(args.save_dir, save_name), 'w', encoding="ISO-8859-1") for document in new_docs: f.write(str(json.dumps(document)) + '\n') f.close() def main(): parser = argparse.ArgumentParser() # Dataset Arguments parser.add_argument("--file", default='', type=str, help="") parser.add_argument("--seed", default=42, type=int, help="Random seed for splitting classes.") args = parser.parse_args() create_dataset_split(args) if __name__ == '__main__': main()
run_rcv1/preprocessing/create_rcv1_superclass_split.py
2,015
Remove super classes from the train dataset and keep it only in the validation dataset. Example command: python create_rcv1_heldout_split.py --train_fraction 0.75 --seed 42 Read the JSON file containing one JSON per line and store the dict Get a list of all the labels Ignore superclass labels during training Remove labels in train_labels from the train data Create a new file Store list of dicts as a json Dataset Arguments
432
en
0.698155
#!/usr/bin/env python """Convert *.json, *.csv and other text data files to js for local use and avoid ajax call. """ import optparse from os import listdir from os.path import abspath, isfile, isdir, join, splitext, basename import json; #curdir = os.path.abspath('.') curdir = "." filter_text_ext = [".json", ".csv"] filter_binary_ext = [] def jsfy_file(path, basedir, fout): fname = basename(path) if(fname.startswith(".")): return #print(path, basedir) if(not path.startswith(basedir)): return filename, extname = splitext( path ) #print( extname ) if(extname in filter_text_ext): res_key = path[ len(basedir) : ] print( res_key + " -> " + path ) fin = open(path, "r") txt = json.dumps( fin.read() ) fout.write("jsfy_res[\"" + res_key + "\"] = " + txt + ";\n\n"); #elif(extname in filter_binary_ext): # pass def jsfy_dir(path, basedir, fout): if(not path.endswith("/")): path = path + "/" fname = basename(path) if(fname.startswith(".")): return #print(path, basedir) if(not path.startswith(basedir)): return #print( path + ":" ) for f in listdir(path): subpath = join(path,f) if( isfile(subpath) ): jsfy_file(subpath, basedir, fout) elif( isdir(subpath) ): jsfy_dir(subpath, basedir, fout) def main(): """The entry point for this script.""" usage = """usage: %prog [dir] [-b basedir] [-o jsfile] example: %prog %prog assets -o js/jsfy_res.js """ parser = optparse.OptionParser(usage) parser.add_option("-b", "--base", dest="basedir", help="base dir") parser.add_option("-o", "--output", dest="outputpath", help="export js file path") (options, args) = parser.parse_args() if( isinstance(options.basedir, str)): basedir = options.basedir else: basedir = "." basedir = abspath(basedir) if( isinstance(options.outputpath, str)): outputpath = options.outputpath else: outputpath ="./jsfy_res.js" fout = open( outputpath, "w" ) fout.write("// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n" ) fout.write("var jsfy_res = jsfy_res || {};\n\n" ) if(not basedir.endswith("/")): basedir = basedir + "/" for f in args: f = abspath(f) if( isfile(f) ): jsfy_file(f,basedir,fout) elif( isdir(f) ): jsfy_dir(f,basedir,fout) fout.close() # end of main() if __name__ == "__main__": main()
tools/jsfy.py
2,580
The entry point for this script. Convert *.json, *.csv and other text data files to js for local use and avoid ajax call. !/usr/bin/env pythoncurdir = os.path.abspath('.')print(path, basedir)print( extname )elif(extname in filter_binary_ext):print(path, basedir)print( path + ":" ) end of main()
296
en
0.474594
try: from heat.common.i18n import _ except ImportError: pass from heat.engine import attributes from heat.engine import constraints from heat.engine import clients from heat.engine import properties from vnc_api import vnc_api from contrail_heat.resources import contrail try: from heat.openstack.common import log as logging except ImportError: from oslo_log import log as logging import uuid import copy LOG = logging.getLogger(__name__) class NetworkPolicy(contrail.ContrailResource): PROPERTIES = ( NAME, ENTRIES, ) = ( 'name', 'entries', ) _rule_schema = { "policy_rule": properties.Schema( properties.Schema.LIST, _('Array of policy rules'), schema=properties.Schema( properties.Schema.MAP, schema={ "direction": properties.Schema( properties.Schema.STRING, _('Direction of policy'), constraints=[ constraints.AllowedValues(['<>', '<', '>']), ], default='<>' ), "protocol": properties.Schema( properties.Schema.STRING, _('Protocol to match'), default='any' ), "src_ports": properties.Schema( properties.Schema.LIST, _('Array of src ports to match'), required=True, schema=properties.Schema( properties.Schema.MAP, schema={ "start_port": properties.Schema( properties.Schema.INTEGER, _('start port to match'), required=True ), "end_port": properties.Schema( properties.Schema.INTEGER, _('end port to match'), required=True ), } ) ), "dst_ports": properties.Schema( properties.Schema.LIST, _('Array of dst ports to match'), required=True, schema=properties.Schema( properties.Schema.MAP, schema={ "start_port": properties.Schema( properties.Schema.INTEGER, _('start port to match'), required=True ), "end_port": properties.Schema( properties.Schema.INTEGER, _('end port to match'), required=True ), } ) ), "dst_addresses": properties.Schema( properties.Schema.LIST, _('Array of dst addresses to match'), required=True, schema=properties.Schema( properties.Schema.MAP, schema={ "virtual_network": properties.Schema( properties.Schema.STRING, _('Virtual network to match'), required=True ), } ) ), "src_addresses": properties.Schema( properties.Schema.LIST, _('Array of src addresses to match'), required=True, schema=properties.Schema( properties.Schema.MAP, schema={ "virtual_network": properties.Schema( properties.Schema.STRING, _('Virtual network to match'), required=True ), } ) ), "action_list": properties.Schema( properties.Schema.MAP, _('Array of src addresses to match'), update_allowed=True, required=True, schema={ "simple_action": properties.Schema( properties.Schema.STRING, _('Simple Action'), update_allowed=True, default='pass' ), "apply_service": properties.Schema( properties.Schema.LIST, _('Apply service'), update_allowed=True, ), "mirror_to": properties.Schema( properties.Schema.STRING, _('Mirror to'), update_allowed=True, ), } ), } ), required=True, update_allowed=True, ), } properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the policy'), required=True ), ENTRIES: properties.Schema( properties.Schema.MAP, _('Policy entries'), schema=_rule_schema, update_allowed=True, required=True ) } attributes_schema = { "name": attributes.Schema( _('The name of the policy.'), ), "fq_name": attributes.Schema( _('FQ name of this policy.'), ), "tenant_id": attributes.Schema( _('The tenant owning this network.'), ), "rules": attributes.Schema( _('List of rules.'), ), "show": attributes.Schema( _('All attributes.'), ), } def fix_apply_service(self, props): for policy_rule in props['entries']['policy_rule']: for index, service in enumerate( policy_rule['action_list']['apply_service'] or []): try: si_obj = self.vnc_lib().service_instance_read(id=service) except: si_obj = self.vnc_lib().service_instance_read( fq_name_str=service) policy_rule['action_list']['apply_service'][ index] = si_obj.get_fq_name_str() def fix_mirror_to(self, props): for policy_rule in props['entries']['policy_rule']: service = policy_rule['action_list']['mirror_to'] if service: try: si_obj = self.vnc_lib().service_instance_read(id=service) except: si_obj = self.vnc_lib().service_instance_read( fq_name_str=service) policy_rule['action_list'][ 'mirror_to'] = vnc_api.MirrorActionType( analyzer_name=si_obj.get_fq_name_str()) def fix_vn_to_fqname(self, props): for policy_rule in props['entries']['policy_rule']: for dest_address in policy_rule['dst_addresses']: try: dest_address['virtual_network'] = ':'.join( self.vnc_lib().id_to_fq_name( dest_address['virtual_network'])) except Exception: # the user input is already an fq_name_string pass for src_address in policy_rule['src_addresses']: try: src_address['virtual_network'] = ':'.join( self.vnc_lib().id_to_fq_name( src_address['virtual_network'])) except Exception: # the user input is already an fq_name_string pass update_allowed_keys = ('Properties',) @contrail.set_auth_token def handle_create(self): props = {} props['entries'] = copy.deepcopy(self.properties['entries']) self.fix_vn_to_fqname(props) self.fix_apply_service(props) self.fix_mirror_to(props) tenant_id = self.stack.context.tenant_id project_obj = self.vnc_lib().project_read(id=str(uuid.UUID(tenant_id))) np_obj = vnc_api.NetworkPolicy(name=self.properties[self.NAME], parent_obj=project_obj) np_obj.set_network_policy_entries( vnc_api.PolicyEntriesType.factory(**props['entries'])) np_uuid = super(NetworkPolicy, self).resource_create(np_obj) self.resource_id_set(np_uuid) @contrail.set_auth_token def handle_update(self, json_snippet, tmpl_diff, prop_diff): props = {} props['entries'] = copy.deepcopy(prop_diff['entries']) self.fix_vn_to_fqname(props) self.fix_apply_service(props) self.fix_mirror_to(props) try: np_obj = self.vnc_lib().network_policy_read(id=self.resource_id) except vnc_api.NoIdError: LOG.warn(_("Network Policy %s not found.") % self.name) raise except: LOG.warn(_("Unknown error.")) raise np_obj.set_network_policy_entries( vnc_api.PolicyEntriesType.factory(**props['entries'])) self.vnc_lib().network_policy_update(np_obj) @contrail.set_auth_token def _show_resource(self): np_obj = self.vnc_lib().network_policy_read(id=self.resource_id) dict = {} dict['name'] = np_obj.get_display_name() dict['fq_name'] = np_obj.get_fq_name_str() rules = [] entries = np_obj.get_network_policy_entries() if entries: for rule in entries.get_policy_rule(): policy_rule = {} policy_rule['direction'] = rule.get_direction() policy_rule['protocol'] = rule.get_protocol() policy_rule['dst_addresses'] = [] for addr in rule.get_dst_addresses() or []: policy_rule['dst_addresses'].append( addr.get_virtual_network()) a_list = rule.get_action_list() policy_rule['action_list'] = { 'simple_action': a_list.get_simple_action(), 'apply_service': a_list.get_apply_service(), 'mirror_to': a_list.get_mirror_to() } policy_rule['dst_ports'] = rule.get_dst_ports() policy_rule['application'] = rule.get_application() policy_rule['src_addresses'] = [] for addr in rule.get_src_addresses() or []: policy_rule['src_addresses'].append( addr.get_virtual_network()) policy_rule['src_ports'] = rule.get_src_ports() rules.append(policy_rule) dict['rules'] = rules return dict @contrail.set_auth_token def handle_delete(self): try: self.vnc_lib().network_policy_delete(id=self.resource_id) except Exception: pass def resource_mapping(): return { 'OS::Contrail::NetworkPolicy': NetworkPolicy, }
contrail_heat/resources/network_policy.py
12,283
the user input is already an fq_name_string the user input is already an fq_name_string
87
en
0.84272
""" Serializer fields for django_hal """ from collections import OrderedDict from django.utils.http import urlencode from rest_framework import serializers from .utils import reverse class LinksField(serializers.DictField): """HAL-style _links field. Parameters ---------- *args : tuple A tuple representing the relation name, and arguments to reverse the url. Example: `(name, urlpattern, {'pk', 'pk'})`. name : str The string used to identify the url in the final output. urlpattern : str A named urlpattern. kwargs : dict The kwargs to pass (with the urlpattern) to `reverse`. This is a dict where the key is the url kwarg, and the value is the attribute to lookup on the instance. So, `{'user', 'pk'}` would translate to `{'user': getattr(instance, 'pk')}`. Example ------- MySerializer(serializers.Serializer): _links = LinksField( ('self', 'namespace:view-name', {'pk': 'pk'}) ) # Outputs: # # { # '_links': { # 'self': 'https://.../my-resource/34' # } # } A shorthand syntax is available to reduce the repetitiveness of `{'pk': 'pk'}`, when both the kwarg and the instance attribute name are the same. ('ref', 'urlpattern', 'pk') is equivalent to ('ref', 'urlpattern', {'pk': 'pk'}) In a full example that looks like: MySerializer(serializers.Serializer): _links = LinksField( ('self', 'namespace:view-name', 'pk') ) # Outputs: # # { # '_links': { # 'self': { 'href': 'https://.../my-resource/34' } # } # } """ def __init__(self, *links): super(LinksField, self).__init__(read_only=True) self.links = links def to_representation(self, instance): """Return an ordered dictionary of HAL-style links.""" request = self.context.get('request') ret = OrderedDict() for link in self.links: name = link[0] ret[name] = self.to_link(request, instance, *link[1:]) return ret def get_attribute(self, instance, *args, **kwargs): """Return the whole instance, instead of looking up an attribute value. Implementation note: We do this because `Serializer.to_representation` builds the list of serializer fields with something like: for field in serializer_fields: field.to_representation(field.get_attribute(instance)) Since we need the instance in `to_representation` so we can query arbitrary attributes on it to build urls, we simply have to return the instance here. """ return instance def to_link(self, request, instance, urlpattern, kwargs=None, query_kwargs=None): """Return an absolute url for the given urlpattern.""" if query_kwargs: query_kwargs = {k: getattr(instance, v) for k, v in query_kwargs.items()} if not kwargs: url = reverse(urlpattern, request=request) if not query_kwargs: return {'href': url} return {'href': '%s?%s' % (url, urlencode(query_kwargs))} if isinstance(kwargs, basestring): # `(ref, urlpattern, string)` where `string` is equivalent to # `{string: string}` url = reverse(urlpattern, kwargs={kwargs: getattr(instance, kwargs)}, request=request) if not query_kwargs: return {'href': url} return {'href': '%s?%s' % (url, urlencode(query_kwargs))} reverse_kwargs = {} if kwargs: for k, v in kwargs.items(): reverse_kwargs[k] = getattr(instance, v) try: url = reverse(urlpattern, kwargs=reverse_kwargs, request=request) if not query_kwargs: return {'href': url} return {'href': '%s?%s' % (url, urlencode(query_kwargs))} except NoReverseMatch: return None class QueryField(serializers.HyperlinkedIdentityField): """Return the query url that lists related objects in a reverse relation. Example ------- .. code:: python class Book: title = CharField() author = ForeignKey(Author) class Author: name = CharField() url('books/query/author/<pk>', ..., name='book-query-by-author') class AuthorSerializer: name = CharField() books = QueryField('book-query-by-author') >>> nick = Author(name='Nick').save() >>> book1 = Book(title='Part 1', author=nick) >>> book2 = Book(title='Part 2', author=nick) >>> AuthorSerializer(nick) { 'name': 'Nick', 'books': '../books/query/author/1', } Raises ------ django.*.NoReverseMatch if the `view_name` and `lookup_field` attributes are not configured to correctly match the URL conf. """ lookup_field = 'pk' def __init__(self, view_name, url_kwarg=None, query_kwarg=None, **kwargs): assert url_kwarg is not None or query_kwarg is not None, 'The `url_kwarg` argument is required.' # noqa kwargs['lookup_field'] = kwargs.get('lookup_field', self.lookup_field) self.url_kwarg = url_kwarg self.query_kwarg = query_kwarg super(QueryField, self).__init__(view_name, **kwargs) def get_url(self, obj, view_name, request, response_format): lookup_value = getattr(obj, self.lookup_field) if self.url_kwarg: kwargs = {self.url_kwarg: lookup_value} return reverse(view_name, kwargs=kwargs, request=request, format=response_format) url = reverse(view_name, request=request, format=response_format) query_kwargs = {self.query_kwarg: lookup_value} return u'%s?%s' % (url, urlencode(query_kwargs))
django_hal/fields.py
6,360
HAL-style _links field. Parameters ---------- *args : tuple A tuple representing the relation name, and arguments to reverse the url. Example: `(name, urlpattern, {'pk', 'pk'})`. name : str The string used to identify the url in the final output. urlpattern : str A named urlpattern. kwargs : dict The kwargs to pass (with the urlpattern) to `reverse`. This is a dict where the key is the url kwarg, and the value is the attribute to lookup on the instance. So, `{'user', 'pk'}` would translate to `{'user': getattr(instance, 'pk')}`. Example ------- MySerializer(serializers.Serializer): _links = LinksField( ('self', 'namespace:view-name', {'pk': 'pk'}) ) # Outputs: # # { # '_links': { # 'self': 'https://.../my-resource/34' # } # } A shorthand syntax is available to reduce the repetitiveness of `{'pk': 'pk'}`, when both the kwarg and the instance attribute name are the same. ('ref', 'urlpattern', 'pk') is equivalent to ('ref', 'urlpattern', {'pk': 'pk'}) In a full example that looks like: MySerializer(serializers.Serializer): _links = LinksField( ('self', 'namespace:view-name', 'pk') ) # Outputs: # # { # '_links': { # 'self': { 'href': 'https://.../my-resource/34' } # } # } Return the query url that lists related objects in a reverse relation. Example ------- .. code:: python class Book: title = CharField() author = ForeignKey(Author) class Author: name = CharField() url('books/query/author/<pk>', ..., name='book-query-by-author') class AuthorSerializer: name = CharField() books = QueryField('book-query-by-author') >>> nick = Author(name='Nick').save() >>> book1 = Book(title='Part 1', author=nick) >>> book2 = Book(title='Part 2', author=nick) >>> AuthorSerializer(nick) { 'name': 'Nick', 'books': '../books/query/author/1', } Raises ------ django.*.NoReverseMatch if the `view_name` and `lookup_field` attributes are not configured to correctly match the URL conf. Return the whole instance, instead of looking up an attribute value. Implementation note: We do this because `Serializer.to_representation` builds the list of serializer fields with something like: for field in serializer_fields: field.to_representation(field.get_attribute(instance)) Since we need the instance in `to_representation` so we can query arbitrary attributes on it to build urls, we simply have to return the instance here. Return an absolute url for the given urlpattern. Return an ordered dictionary of HAL-style links. Serializer fields for django_hal `(ref, urlpattern, string)` where `string` is equivalent to `{string: string}` noqa
2,968
en
0.58391
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module has all statistic related transforms.""" from __future__ import absolute_import from __future__ import division import heapq import math import sys from builtins import round from apache_beam import coders from apache_beam import typehints from apache_beam.transforms.core import * from apache_beam.transforms.ptransform import PTransform __all__ = [ 'ApproximateUnique', ] # Type variables T = typehints.TypeVariable('T') K = typehints.TypeVariable('K') V = typehints.TypeVariable('V') class ApproximateUnique(object): """ Hashes input elements and uses those to extrapolate the size of the entire set of hash values by assuming the rest of the hash values are as densely distributed as the sample space. """ _NO_VALUE_ERR_MSG = 'Either size or error should be set. Received {}.' _MULTI_VALUE_ERR_MSG = 'Either size or error should be set. ' \ 'Received {size = %s, error = %s}.' _INPUT_SIZE_ERR_MSG = 'ApproximateUnique needs a size >= 16 for an error ' \ '<= 0.50. In general, the estimation error is about ' \ '2 / sqrt(sample_size). Received {size = %s}.' _INPUT_ERROR_ERR_MSG = 'ApproximateUnique needs an estimation error ' \ 'between 0.01 and 0.50. Received {error = %s}.' @staticmethod def parse_input_params(size=None, error=None): """ Check if input params are valid and return sample size. :param size: an int not smaller than 16, which we would use to estimate number of unique values. :param error: max estimation error, which is a float between 0.01 and 0.50. If error is given, sample size will be calculated from error with _get_sample_size_from_est_error function. :return: sample size :raises: ValueError: If both size and error are given, or neither is given, or values are out of range. """ if None not in (size, error): raise ValueError(ApproximateUnique._MULTI_VALUE_ERR_MSG % (size, error)) elif size is None and error is None: raise ValueError(ApproximateUnique._NO_VALUE_ERR_MSG) elif size is not None: if not isinstance(size, int) or size < 16: raise ValueError(ApproximateUnique._INPUT_SIZE_ERR_MSG % (size)) else: return size else: if error < 0.01 or error > 0.5: raise ValueError(ApproximateUnique._INPUT_ERROR_ERR_MSG % (error)) else: return ApproximateUnique._get_sample_size_from_est_error(error) @staticmethod def _get_sample_size_from_est_error(est_err): """ :return: sample size Calculate sample size from estimation error """ #math.ceil in python2.7 returns a float, while it returns an int in python3. return int(math.ceil(4.0 / math.pow(est_err, 2.0))) @typehints.with_input_types(T) @typehints.with_output_types(int) class Globally(PTransform): """ Approximate.Globally approximate number of unique values""" def __init__(self, size=None, error=None): self._sample_size = ApproximateUnique.parse_input_params(size, error) def expand(self, pcoll): coder = coders.registry.get_coder(pcoll) return pcoll \ | 'CountGlobalUniqueValues' \ >> (CombineGlobally(ApproximateUniqueCombineFn(self._sample_size, coder))) @typehints.with_input_types(typehints.KV[K, V]) @typehints.with_output_types(typehints.KV[K, int]) class PerKey(PTransform): """ Approximate.PerKey approximate number of unique values per key""" def __init__(self, size=None, error=None): self._sample_size = ApproximateUnique.parse_input_params(size, error) def expand(self, pcoll): coder = coders.registry.get_coder(pcoll) return pcoll \ | 'CountPerKeyUniqueValues' \ >> (CombinePerKey(ApproximateUniqueCombineFn(self._sample_size, coder))) class _LargestUnique(object): """ An object to keep samples and calculate sample hash space. It is an accumulator of a combine function. """ _HASH_SPACE_SIZE = 2.0 * sys.maxsize def __init__(self, sample_size): self._sample_size = sample_size self._min_hash = sys.maxsize self._sample_heap = [] self._sample_set = set() def add(self, element): """ :param an element from pcoll. :return: boolean type whether the value is in the heap Adds a value to the heap, returning whether the value is (large enough to be) in the heap. """ if len(self._sample_heap) >= self._sample_size and element < self._min_hash: return False if element not in self._sample_set: self._sample_set.add(element) heapq.heappush(self._sample_heap, element) if len(self._sample_heap) > self._sample_size: temp = heapq.heappop(self._sample_heap) self._sample_set.remove(temp) self._min_hash = self._sample_heap[0] elif element < self._min_hash: self._min_hash = element return True def get_estimate(self): """ :return: estimation count of unique values If heap size is smaller than sample size, just return heap size. Otherwise, takes into account the possibility of hash collisions, which become more likely than not for 2^32 distinct elements. Note that log(1+x) ~ x for small x, so for sampleSize << maxHash log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size and hence estimate ~ sample_size * hash_space / sample_space as one would expect. Given sample_size / sample_space = est / hash_space est = sample_size * hash_space / sample_space Given above sample_size approximate, est = log1p(-sample_size/sample_space) / log1p(-1/sample_space) * hash_space / sample_space """ if len(self._sample_heap) < self._sample_size: return len(self._sample_heap) else: sample_space_size = sys.maxsize - 1.0 * self._min_hash est = (math.log1p(-self._sample_size / sample_space_size) / math.log1p(-1 / sample_space_size) * self._HASH_SPACE_SIZE / sample_space_size) return round(est) class ApproximateUniqueCombineFn(CombineFn): """ ApproximateUniqueCombineFn computes an estimate of the number of unique values that were combined. """ def __init__(self, sample_size, coder): self._sample_size = sample_size self._coder = coder def create_accumulator(self, *args, **kwargs): return _LargestUnique(self._sample_size) def add_input(self, accumulator, element, *args, **kwargs): try: accumulator.add(hash(self._coder.encode(element))) return accumulator except Exception as e: raise RuntimeError("Runtime exception: %s", e) # created an issue https://issues.apache.org/jira/browse/BEAM-7285 to speep up # merge process. def merge_accumulators(self, accumulators, *args, **kwargs): merged_accumulator = self.create_accumulator() for accumulator in accumulators: for i in accumulator._sample_heap: merged_accumulator.add(i) return merged_accumulator @staticmethod def extract_output(accumulator): return accumulator.get_estimate() def display_data(self): return {'sample_size': self._sample_size}
sdks/python/apache_beam/transforms/stats.py
8,149
Hashes input elements and uses those to extrapolate the size of the entire set of hash values by assuming the rest of the hash values are as densely distributed as the sample space. ApproximateUniqueCombineFn computes an estimate of the number of unique values that were combined. Approximate.Globally approximate number of unique values Approximate.PerKey approximate number of unique values per key An object to keep samples and calculate sample hash space. It is an accumulator of a combine function. :return: sample size Calculate sample size from estimation error :param an element from pcoll. :return: boolean type whether the value is in the heap Adds a value to the heap, returning whether the value is (large enough to be) in the heap. :return: estimation count of unique values If heap size is smaller than sample size, just return heap size. Otherwise, takes into account the possibility of hash collisions, which become more likely than not for 2^32 distinct elements. Note that log(1+x) ~ x for small x, so for sampleSize << maxHash log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size and hence estimate ~ sample_size * hash_space / sample_space as one would expect. Given sample_size / sample_space = est / hash_space est = sample_size * hash_space / sample_space Given above sample_size approximate, est = log1p(-sample_size/sample_space) / log1p(-1/sample_space) * hash_space / sample_space Check if input params are valid and return sample size. :param size: an int not smaller than 16, which we would use to estimate number of unique values. :param error: max estimation error, which is a float between 0.01 and 0.50. If error is given, sample size will be calculated from error with _get_sample_size_from_est_error function. :return: sample size :raises: ValueError: If both size and error are given, or neither is given, or values are out of range. This module has all statistic related transforms. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Type variablesmath.ceil in python2.7 returns a float, while it returns an int in python3. created an issue https://issues.apache.org/jira/browse/BEAM-7285 to speep up merge process.
2,894
en
0.834038
from meta_policy_search.utils import logger from meta_policy_search.meta_algos.base import MAMLAlgo from meta_policy_search.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer import tensorflow as tf from collections import OrderedDict class TRPOMAML(MAMLAlgo): """ Algorithm for TRPO MAML Args: policy (Policy): policy object name (str): tf variable scope step_size (int): trust region size for the meta policy optimization through TPRO inner_type (str): One of 'log_likelihood', 'likelihood_ratio', 'dice', choose which inner update to use exploration (bool): whether to use E-MAML or MAML inner_lr (float) : gradient step size used for inner step meta_batch_size (int): number of meta-learning tasks num_inner_grad_steps (int) : number of gradient updates taken per maml iteration trainable_inner_step_size (boolean): whether make the inner step size a trainable variable """ def __init__( self, *args, name="trpo_maml", step_size=0.01, inner_type='likelihood_ratio', exploration=False, **kwargs ): super(TRPOMAML, self).__init__(*args, **kwargs) assert inner_type in ["log_likelihood", "likelihood_ratio", "dice"] self.step_size = step_size self.inner_type = inner_type self.name = name self._optimization_keys = ['observations', 'actions', 'advantages', 'agent_infos'] self.exploration = exploration if exploration: # add adjusted average rewards tp optimization keys self._optimization_keys.append('adj_avg_rewards') self.optimizer = ConjugateGradientOptimizer() self.build_graph() def _adapt_objective_sym(self, action_sym, adv_sym, dist_info_old_sym, dist_info_new_sym): if self.inner_type == 'likelihood_ratio': with tf.variable_scope("likelihood_ratio"): likelihood_ratio_adapt = self.policy.distribution.likelihood_ratio_sym(action_sym, dist_info_old_sym, dist_info_new_sym) with tf.variable_scope("surrogate_loss"): surr_obj_adapt = -tf.reduce_mean(likelihood_ratio_adapt * adv_sym) elif self.inner_type == 'log_likelihood': with tf.variable_scope("log_likelihood"): log_likelihood_adapt = self.policy.distribution.log_likelihood_sym(action_sym, dist_info_new_sym) with tf.variable_scope("surrogate_loss"): surr_obj_adapt = -tf.reduce_mean(log_likelihood_adapt * adv_sym) else: raise NotImplementedError return surr_obj_adapt def build_graph(self): """ Creates the computation graph """ """ Create Variables """ # assert self.num_inner_grad_steps == 1 or not self.exploration, "Not sure if the math is right for more than 1 inner step" with tf.variable_scope(self.name): self.step_sizes = self._create_step_size_vars() """ --- Build inner update graph for adapting the policy and sampling trajectories --- """ # this graph is only used for adapting the policy and not computing the meta-updates self.adapted_policies_params, self.adapt_input_ph_dict = self._build_inner_adaption() """ ----- Build graph for the meta-update ----- """ self.meta_op_phs_dict = OrderedDict() obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step0') self.meta_op_phs_dict.update(all_phs_dict) distribution_info_vars, current_policy_params = [], [] all_surr_objs, all_inner_kls = [], [] for i in range(self.meta_batch_size): dist_info_sym = self.policy.distribution_info_sym(obs_phs[i], params=None) distribution_info_vars.append(dist_info_sym) # step 0 current_policy_params.append(self.policy.policy_params) # set to real policy_params (tf.Variable) initial_distribution_info_vars = distribution_info_vars initial_action_phs = action_phs with tf.variable_scope(self.name): """ Inner updates""" for step_id in range(1, self.num_inner_grad_steps+1): surr_objs, adapted_policy_params = [], [] # inner adaptation step for each task for i in range(self.meta_batch_size): surr_loss = self._adapt_objective_sym(action_phs[i], adv_phs[i], dist_info_old_phs[i], distribution_info_vars[i]) adapted_params_var = self._adapt_sym(surr_loss, current_policy_params[i]) adapted_policy_params.append(adapted_params_var) surr_objs.append(surr_loss) all_surr_objs.append(surr_objs) # Create new placeholders for the next step obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step%i' % step_id) self.meta_op_phs_dict.update(all_phs_dict) # dist_info_vars_for_next_step distribution_info_vars = [self.policy.distribution_info_sym(obs_phs[i], params=adapted_policy_params[i]) for i in range(self.meta_batch_size)] current_policy_params = adapted_policy_params """ Outer objective """ surr_objs, outer_kls = [], [] # Create placeholders # meta-objective for i in range(self.meta_batch_size): likelihood_ratio = self.policy.distribution.likelihood_ratio_sym(action_phs[i], dist_info_old_phs[i], distribution_info_vars[i]) outer_kl = tf.reduce_mean(self.policy.distribution.kl_sym(dist_info_old_phs[i], distribution_info_vars[i])) surr_obj = - tf.reduce_mean(likelihood_ratio * adv_phs[i]) if self.exploration: # add adj_avg_reward placeholder adj_avg_rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='adj_avg_rewards' + '_' + str(self.num_inner_grad_steps) + '_' + str(i)) self.meta_op_phs_dict['step%i_task%i_%s' % (self.num_inner_grad_steps, i, 'adj_avg_rewards')] = adj_avg_rewards log_likelihood_inital = self.policy.distribution.log_likelihood_sym(initial_action_phs[i], initial_distribution_info_vars[i]) surr_obj += - tf.reduce_mean(adj_avg_rewards) * tf.reduce_mean(log_likelihood_inital) surr_objs.append(surr_obj) outer_kls.append(outer_kl) mean_outer_kl = tf.reduce_mean(tf.stack(outer_kls)) """ Mean over meta tasks """ meta_objective = tf.reduce_mean(tf.stack(surr_objs, 0)) self.optimizer.build_graph( loss=meta_objective, target=self.policy, input_ph_dict=self.meta_op_phs_dict, leq_constraint=(mean_outer_kl, self.step_size), ) def optimize_policy(self, all_samples_data, log=True): """ Performs MAML outer step Args: all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and meta task log (bool) : whether to log statistics Returns: None """ meta_op_input_dict = self._extract_input_dict_meta_op(all_samples_data, self._optimization_keys) logger.log("Computing KL before") mean_kl_before = self.optimizer.constraint_val(meta_op_input_dict) logger.log("Computing loss before") loss_before = self.optimizer.loss(meta_op_input_dict) logger.log("Optimizing") self.optimizer.optimize(meta_op_input_dict) logger.log("Computing loss after") loss_after = self.optimizer.loss(meta_op_input_dict) logger.log("Computing KL after") mean_kl = self.optimizer.constraint_val(meta_op_input_dict) if log: logger.logkv('MeanKLBefore', mean_kl_before) logger.logkv('MeanKL', mean_kl) logger.logkv('LossBefore', loss_before) logger.logkv('LossAfter', loss_after) logger.logkv('dLoss', loss_before - loss_after)
meta_policy_search/meta_algos/trpo_maml.py
8,751
Algorithm for TRPO MAML Args: policy (Policy): policy object name (str): tf variable scope step_size (int): trust region size for the meta policy optimization through TPRO inner_type (str): One of 'log_likelihood', 'likelihood_ratio', 'dice', choose which inner update to use exploration (bool): whether to use E-MAML or MAML inner_lr (float) : gradient step size used for inner step meta_batch_size (int): number of meta-learning tasks num_inner_grad_steps (int) : number of gradient updates taken per maml iteration trainable_inner_step_size (boolean): whether make the inner step size a trainable variable Creates the computation graph Performs MAML outer step Args: all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and meta task log (bool) : whether to log statistics Returns: None add adjusted average rewards tp optimization keys assert self.num_inner_grad_steps == 1 or not self.exploration, "Not sure if the math is right for more than 1 inner step" this graph is only used for adapting the policy and not computing the meta-updates step 0 set to real policy_params (tf.Variable) inner adaptation step for each task Create new placeholders for the next step dist_info_vars_for_next_step Create placeholders meta-objective add adj_avg_reward placeholder
1,371
en
0.628064
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2014, Nigel Small # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from py2neo.error.client import * from py2neo.error.server import *
py2neo/error/__init__.py
698
!/usr/bin/env python -*- coding: utf-8 -*- Copyright 2011-2014, Nigel Small Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
597
en
0.84117
import json import os import pandas as pd import sys sys.path.append('scripts/') from polygon import Collection, Footprint class Writer: """ Class that stores smart label values per instance """ def __init__(self, filename): """ Class initialization. :param filename: name of the file to store the data, str """ self.filename = filename self.content = {} def add(self, instance, result): """ Function that adds an instance with its smart labels to the collection :param instance: name of instance, str :param result: smart labels, dict {label_name: label_value} :return: """ self.content[instance] = result def get_instances(self) -> list: """ Function that gets the instances that already exist in the file :return: existing instances, list """ return list(self.content.keys()) def reset(self): """ Function that resets the file to an empty state. :return: """ del self.content self.content = {} def save(self): """ Function that saves all the smart labels in the class to a local file TODO: add saving to AWS based on AWS_SAVE in config :return: """ with open(self.filename, "w") as f: json.dump(self.content, f) class JsonWriter(Writer): """ Class that saves results in json format. """ def __init__(self, filename='test'): Writer.__init__(self, filename) if not self.filename.endswith('.json'): self.filename += '.json' # with open(self.filename, 'r') as f: # self.content = json.load(f) self.content = {} def save(self): """ Function that saves the writer's content to local system in json format. :return: """ with open(self.filename, 'a') as json_file: json.dump(self.content, json_file) class CsvWriter: def __init__(self, filename='result', features=[]): assert isinstance(filename, str), "Expected name to be str, got {}".format(filename) self.filename = filename self.features = features self.content = {} if self.filename + '.csv' in os.listdir(): self.csv = pd.read_csv(self.filename + '.csv', index_col=0) # self.csv = self.csv.to_dict(orient='list') else: self.csv = {} self.reset() self.csv = pd.DataFrame(self.csv) self.csv.to_csv(self.filename + '.csv', mode='w') print('csv saved as {}.csv'.format(self.filename)) def add(self, instance, result): if self._check(result): for _feature in list(result.keys()): if _feature not in list(self.csv.keys()): return ValueError self.content[instance] = result result = {key: [value] for key, value in result.items()} _df = pd.DataFrame.from_dict(result) self.csv = self.csv.append(_df, ignore_index=True) def _check(self, result): return len(list(result.keys())) == len(self.features) def save(self): df = pd.DataFrame(self.csv) df.to_csv(self.filename + '.csv', mode='a', header=False) def reset(self): self.csv = {} # self.csv['iter'] = [] for feature in self.features: self.csv[feature] = [] class ShpWriter: def __init__(self, name='result'): self.name = name def save(self, collection): if not isinstance(collection, Collection): print('Expected Collection, got {}'.format(collection)) raise TypeError if not isinstance(collection.class_type, Footprint.__class__): print('Collection should be made of Footprints, got {}'.format(collection.class_type)) raise AttributeError r = [] for f in collection: r.append(f.polygon) dict = {'name': [0 for x in r], 'geometry': r} df = gpd.GeoDataFrame(dict) df.to_file('{}.shp'.format(self.name))
scripts/writer.py
3,535
Class that saves results in json format. Class that stores smart label values per instance Class initialization. :param filename: name of the file to store the data, str Function that adds an instance with its smart labels to the collection :param instance: name of instance, str :param result: smart labels, dict {label_name: label_value} :return: Function that gets the instances that already exist in the file :return: existing instances, list Function that resets the file to an empty state. :return: Function that saves all the smart labels in the class to a local file TODO: add saving to AWS based on AWS_SAVE in config :return: Function that saves the writer's content to local system in json format. :return: with open(self.filename, 'r') as f: self.content = json.load(f) self.csv = self.csv.to_dict(orient='list') self.csv['iter'] = []
849
en
0.712835
# Author # Angelica ACOSTA ARTETA import unittest from balance import summing, stringarray, need, weighting class TestBalance(unittest.TestCase): def test_summing(self): self.assertEqual(summing([]), 0) self.assertEqual(summing([3]), 3) self.assertEqual(summing([1,1,1,1,1]), 5) self.assertEqual(summing([1,-1]), 0) self.assertEqual(summing([-2,-2]), -4) def test_stringarray(self): self.assertEqual(stringarray([]), "") self.assertEqual(stringarray([5]), "5") self.assertEqual(stringarray([1,1,1,1,1]), "1, 1, 1, 1, 1") def test_need(self): self.assertEqual(need(1), (1,0)) self.assertEqual(need(0), (None,None)) self.assertEqual(need(-4), (None,None)) self.assertEqual(need(27), (27,3)) def test_weighting(self): first_test=weighting(1) self.assertEqual(summing(first_test[0]), summing(first_test[1])) second_test=weighting(532) self.assertEqual(summing(second_test[0]), summing(second_test[1])) thrird_test=weighting(1000) self.assertEqual(summing(thrird_test[0]), summing(thrird_test[1])) if __name__ == '__main__': unittest.main()
test_balance.py
1,219
Author Angelica ACOSTA ARTETA
29
en
0.423854
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections import six as _six from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow from tensorflow.python.eager import context as _context from tensorflow.python.eager import core as _core from tensorflow.python.eager import execute as _execute from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import errors as _errors from tensorflow.python.framework import tensor_shape as _tensor_shape from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.util.tf_export import tf_export @tf_export('copy') def copy(input, tensor_name="", debug_ops_spec=[], name=None): r"""Copy Op. Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the device on which the tensor is allocated. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details. Unlike the CopyHost Op, this op does not have HostMemory constraint on its input or output. Args: input: A `Tensor`. Input tensor. tensor_name: An optional `string`. Defaults to `""`. The name of the input tensor. debug_ops_spec: An optional list of `strings`. Defaults to `[]`. A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor, deep-copied from input. """ _ctx = _context.context() if not _ctx.executing_eagerly(): if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_ops_spec is None: debug_ops_spec = [] if not isinstance(debug_ops_spec, (list, tuple)): raise TypeError( "Expected list for 'debug_ops_spec' argument to " "'copy' Op, not %r." % debug_ops_spec) debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec] _, _, _op = _op_def_lib._apply_op_helper( "Copy", input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "tensor_name", _op.get_attr("tensor_name"), "debug_ops_spec", _op.get_attr("debug_ops_spec")) _execute.record_gradient( "Copy", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._handle, _ctx.device_name, "Copy", name, _ctx._post_execution_callbacks, input, "tensor_name", tensor_name, "debug_ops_spec", debug_ops_spec) return _result except _core._FallbackException: return copy_eager_fallback( input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None): r"""This is the slowpath function for Eager mode. This is for function copy """ _ctx = _context.context() if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_ops_spec is None: debug_ops_spec = [] if not isinstance(debug_ops_spec, (list, tuple)): raise TypeError( "Expected list for 'debug_ops_spec' argument to " "'copy' Op, not %r." % debug_ops_spec) debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec] _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec", debug_ops_spec) _result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Copy", _inputs_flat, _attrs, _result, name) _result, = _result return _result @tf_export('copy_host') def copy_host(input, tensor_name="", debug_ops_spec=[], name=None): r"""Copy Host Op. Performs CPU-to-CPU deep-copying of tensor. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details. Unlike the Copy Op, this op has HostMemory constraint on its input or output. Args: input: A `Tensor`. Input tensor. tensor_name: An optional `string`. Defaults to `""`. The name of the input tensor. debug_ops_spec: An optional list of `strings`. Defaults to `[]`. A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor, deep-copied from input. """ _ctx = _context.context() if not _ctx.executing_eagerly(): if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_ops_spec is None: debug_ops_spec = [] if not isinstance(debug_ops_spec, (list, tuple)): raise TypeError( "Expected list for 'debug_ops_spec' argument to " "'copy_host' Op, not %r." % debug_ops_spec) debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec] _, _, _op = _op_def_lib._apply_op_helper( "CopyHost", input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "tensor_name", _op.get_attr("tensor_name"), "debug_ops_spec", _op.get_attr("debug_ops_spec")) _execute.record_gradient( "CopyHost", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._handle, _ctx.device_name, "CopyHost", name, _ctx._post_execution_callbacks, input, "tensor_name", tensor_name, "debug_ops_spec", debug_ops_spec) return _result except _core._FallbackException: return copy_host_eager_fallback( input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None): r"""This is the slowpath function for Eager mode. This is for function copy_host """ _ctx = _context.context() if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_ops_spec is None: debug_ops_spec = [] if not isinstance(debug_ops_spec, (list, tuple)): raise TypeError( "Expected list for 'debug_ops_spec' argument to " "'copy_host' Op, not %r." % debug_ops_spec) debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec] _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec", debug_ops_spec) _result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "CopyHost", _inputs_flat, _attrs, _result, name) _result, = _result return _result @tf_export('debug_identity') def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None): r"""Debug Identity Op. Provides an identity mapping of the non-Ref type input tensor for debugging. Args: input: A `Tensor`. Input tensor, non-Reference type. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011 gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor that equals the input tensor. """ _ctx = _context.context() if not _ctx.executing_eagerly(): if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_identity' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _, _, _op = _op_def_lib._apply_op_helper( "DebugIdentity", input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "device_name", _op.get_attr("device_name"), "tensor_name", _op.get_attr("tensor_name"), "debug_urls", _op.get_attr("debug_urls"), "gated_grpc", _op.get_attr("gated_grpc")) _execute.record_gradient( "DebugIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._handle, _ctx.device_name, "DebugIdentity", name, _ctx._post_execution_callbacks, input, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc) return _result except _core._FallbackException: return debug_identity_eager_fallback( input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None): r"""This is the slowpath function for Eager mode. This is for function debug_identity """ _ctx = _context.context() if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_identity' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc) _result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DebugIdentity", _inputs_flat, _attrs, _result, name) _result, = _result return _result @tf_export('debug_nan_count') def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None): r"""Debug NaN Value Counter Op Counts number of NaNs in the input tensor, for debugging. Args: input: A `Tensor`. Input tensor, non-Reference type. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011. gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. An integer output tensor that is the number of NaNs in the input. """ _ctx = _context.context() if not _ctx.executing_eagerly(): if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_nan_count' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _, _, _op = _op_def_lib._apply_op_helper( "DebugNanCount", input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "device_name", _op.get_attr("device_name"), "tensor_name", _op.get_attr("tensor_name"), "debug_urls", _op.get_attr("debug_urls"), "gated_grpc", _op.get_attr("gated_grpc")) _execute.record_gradient( "DebugNanCount", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._handle, _ctx.device_name, "DebugNanCount", name, _ctx._post_execution_callbacks, input, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc) return _result except _core._FallbackException: return debug_nan_count_eager_fallback( input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None): r"""This is the slowpath function for Eager mode. This is for function debug_nan_count """ _ctx = _context.context() if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_nan_count' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc) _result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DebugNanCount", _inputs_flat, _attrs, _result, name) _result, = _result return _result @tf_export('debug_numeric_summary') def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None): r"""Debug Numeric Summary Op. Provide a basic summary of numeric value types, range and distribution. Args: input: A `Tensor`. Input tensor, non-Reference type, float or double. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011 lower_bound: An optional `float`. Defaults to `float('-inf')`. (float) The lower bound <= which values will be included in the generalized -inf count. Default: -inf. upper_bound: An optional `float`. Defaults to `float('inf')`. (float) The upper bound >= which values will be included in the generalized +inf count. Default: +inf. mute_if_healthy: An optional `bool`. Defaults to `False`. (bool) Do not send data to the debug URLs unless at least one of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and inf counts) is non-zero. gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `float64`. A double tensor of shape [14 + nDimensions], where nDimensions is the the number of dimensions of the tensor's shape. The elements of output are: [0]: is initialized (1.0) or not (0.0). [1]: total number of elements [2]: NaN element count [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by default. [4]: negative element count (excluding -inf), if lower_bound is the default -inf. Otherwise, this is the count of elements > lower_bound and < 0. [5]: zero element count [6]: positive element count (excluding +inf), if upper_bound is the default -inf. Otherwise, this is the count of elements < upper_bound and > 0. [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by default. Output elements [1:8] are all zero, if the tensor is uninitialized. [8]: minimum of all non-inf and non-NaN elements. If uninitialized or no such element exists: +inf. [9]: maximum of all non-inf and non-NaN elements. If uninitialized or no such element exists: -inf. [10]: mean of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [11]: variance of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [12]: Data type of the tensor encoded as an enum integer. See the DataType proto for more details. [13]: Number of dimensions of the tensor (ndims). [14+]: Sizes of the dimensions. """ _ctx = _context.context() if not _ctx.executing_eagerly(): if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_numeric_summary' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if lower_bound is None: lower_bound = float('-inf') lower_bound = _execute.make_float(lower_bound, "lower_bound") if upper_bound is None: upper_bound = float('inf') upper_bound = _execute.make_float(upper_bound, "upper_bound") if mute_if_healthy is None: mute_if_healthy = False mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy") if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _, _, _op = _op_def_lib._apply_op_helper( "DebugNumericSummary", input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "device_name", _op.get_attr("device_name"), "tensor_name", _op.get_attr("tensor_name"), "debug_urls", _op.get_attr("debug_urls"), "lower_bound", _op.get_attr("lower_bound"), "upper_bound", _op.get_attr("upper_bound"), "mute_if_healthy", _op.get_attr("mute_if_healthy"), "gated_grpc", _op.get_attr("gated_grpc")) _execute.record_gradient( "DebugNumericSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._handle, _ctx.device_name, "DebugNumericSummary", name, _ctx._post_execution_callbacks, input, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound, "upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy, "gated_grpc", gated_grpc) return _result except _core._FallbackException: return debug_numeric_summary_eager_fallback( input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None): r"""This is the slowpath function for Eager mode. This is for function debug_numeric_summary """ _ctx = _context.context() if device_name is None: device_name = "" device_name = _execute.make_str(device_name, "device_name") if tensor_name is None: tensor_name = "" tensor_name = _execute.make_str(tensor_name, "tensor_name") if debug_urls is None: debug_urls = [] if not isinstance(debug_urls, (list, tuple)): raise TypeError( "Expected list for 'debug_urls' argument to " "'debug_numeric_summary' Op, not %r." % debug_urls) debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls] if lower_bound is None: lower_bound = float('-inf') lower_bound = _execute.make_float(lower_bound, "lower_bound") if upper_bound is None: upper_bound = float('inf') upper_bound = _execute.make_float(upper_bound, "upper_bound") if mute_if_healthy is None: mute_if_healthy = False mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy") if gated_grpc is None: gated_grpc = False gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "device_name", device_name, "tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound, "upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy, "gated_grpc", gated_grpc) _result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "DebugNumericSummary", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _InitOpDefLibrary(op_list_proto_bytes): op_list = _op_def_pb2.OpList() op_list.ParseFromString(op_list_proto_bytes) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib # op { # name: "Copy" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "tensor_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "debug_ops_spec" # type: "list(string)" # default_value { # list { # } # } # } # allows_uninitialized_input: true # } # op { # name: "CopyHost" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "tensor_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "debug_ops_spec" # type: "list(string)" # default_value { # list { # } # } # } # allows_uninitialized_input: true # } # op { # name: "DebugIdentity" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # attr { # name: "device_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "tensor_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "debug_urls" # type: "list(string)" # default_value { # list { # } # } # } # attr { # name: "gated_grpc" # type: "bool" # default_value { # b: false # } # } # allows_uninitialized_input: true # } # op { # name: "DebugNanCount" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type: DT_INT64 # } # attr { # name: "T" # type: "type" # } # attr { # name: "device_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "tensor_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "debug_urls" # type: "list(string)" # default_value { # list { # } # } # } # attr { # name: "gated_grpc" # type: "bool" # default_value { # b: false # } # } # allows_uninitialized_input: true # } # op { # name: "DebugNumericSummary" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type: DT_DOUBLE # } # attr { # name: "T" # type: "type" # } # attr { # name: "device_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "tensor_name" # type: "string" # default_value { # s: "" # } # } # attr { # name: "debug_urls" # type: "list(string)" # default_value { # list { # } # } # } # attr { # name: "lower_bound" # type: "float" # default_value { # f: -inf # } # } # attr { # name: "upper_bound" # type: "float" # default_value { # f: inf # } # } # attr { # name: "mute_if_healthy" # type: "bool" # default_value { # b: false # } # } # attr { # name: "gated_grpc" # type: "bool" # default_value { # b: false # } # } # allows_uninitialized_input: true # } _op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py
33,055
Copy Op. Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the device on which the tensor is allocated. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details. Unlike the CopyHost Op, this op does not have HostMemory constraint on its input or output. Args: input: A `Tensor`. Input tensor. tensor_name: An optional `string`. Defaults to `""`. The name of the input tensor. debug_ops_spec: An optional list of `strings`. Defaults to `[]`. A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor, deep-copied from input. This is the slowpath function for Eager mode. This is for function copy Copy Host Op. Performs CPU-to-CPU deep-copying of tensor. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details. Unlike the Copy Op, this op has HostMemory constraint on its input or output. Args: input: A `Tensor`. Input tensor. tensor_name: An optional `string`. Defaults to `""`. The name of the input tensor. debug_ops_spec: An optional list of `strings`. Defaults to `[]`. A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor, deep-copied from input. This is the slowpath function for Eager mode. This is for function copy_host Debug Identity Op. Provides an identity mapping of the non-Ref type input tensor for debugging. Args: input: A `Tensor`. Input tensor, non-Reference type. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011 gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Output tensor that equals the input tensor. This is the slowpath function for Eager mode. This is for function debug_identity Debug NaN Value Counter Op Counts number of NaNs in the input tensor, for debugging. Args: input: A `Tensor`. Input tensor, non-Reference type. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011. gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. An integer output tensor that is the number of NaNs in the input. This is the slowpath function for Eager mode. This is for function debug_nan_count Debug Numeric Summary Op. Provide a basic summary of numeric value types, range and distribution. Args: input: A `Tensor`. Input tensor, non-Reference type, float or double. device_name: An optional `string`. Defaults to `""`. tensor_name: An optional `string`. Defaults to `""`. Name of the input tensor. debug_urls: An optional list of `strings`. Defaults to `[]`. List of URLs to debug targets, e.g., file:///foo/tfdbg_dump, grpc:://localhost:11011 lower_bound: An optional `float`. Defaults to `float('-inf')`. (float) The lower bound <= which values will be included in the generalized -inf count. Default: -inf. upper_bound: An optional `float`. Defaults to `float('inf')`. (float) The upper bound >= which values will be included in the generalized +inf count. Default: +inf. mute_if_healthy: An optional `bool`. Defaults to `False`. (bool) Do not send data to the debug URLs unless at least one of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and inf counts) is non-zero. gated_grpc: An optional `bool`. Defaults to `False`. Whether this op will be gated. If any of the debug_urls of this debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `float64`. A double tensor of shape [14 + nDimensions], where nDimensions is the the number of dimensions of the tensor's shape. The elements of output are: [0]: is initialized (1.0) or not (0.0). [1]: total number of elements [2]: NaN element count [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by default. [4]: negative element count (excluding -inf), if lower_bound is the default -inf. Otherwise, this is the count of elements > lower_bound and < 0. [5]: zero element count [6]: positive element count (excluding +inf), if upper_bound is the default -inf. Otherwise, this is the count of elements < upper_bound and > 0. [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by default. Output elements [1:8] are all zero, if the tensor is uninitialized. [8]: minimum of all non-inf and non-NaN elements. If uninitialized or no such element exists: +inf. [9]: maximum of all non-inf and non-NaN elements. If uninitialized or no such element exists: -inf. [10]: mean of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [11]: variance of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [12]: Data type of the tensor encoded as an enum integer. See the DataType proto for more details. [13]: Number of dimensions of the tensor (ndims). [14+]: Sizes of the dimensions. This is the slowpath function for Eager mode. This is for function debug_numeric_summary Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. Needed to trigger the call to _set_call_cpp_shape_fn. op { name: "Copy" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" default_value { s: "" } } attr { name: "debug_ops_spec" type: "list(string)" default_value { list { } } } allows_uninitialized_input: true } op { name: "CopyHost" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" default_value { s: "" } } attr { name: "debug_ops_spec" type: "list(string)" default_value { list { } } } allows_uninitialized_input: true } op { name: "DebugIdentity" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } } attr { name: "debug_urls" type: "list(string)" default_value { list { } } } attr { name: "gated_grpc" type: "bool" default_value { b: false } } allows_uninitialized_input: true } op { name: "DebugNanCount" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } } attr { name: "debug_urls" type: "list(string)" default_value { list { } } } attr { name: "gated_grpc" type: "bool" default_value { b: false } } allows_uninitialized_input: true } op { name: "DebugNumericSummary" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_DOUBLE } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } } attr { name: "debug_urls" type: "list(string)" default_value { list { } } } attr { name: "lower_bound" type: "float" default_value { f: -inf } } attr { name: "upper_bound" type: "float" default_value { f: inf } } attr { name: "mute_if_healthy" type: "bool" default_value { b: false } } attr { name: "gated_grpc" type: "bool" default_value { b: false } } allows_uninitialized_input: true }
10,920
en
0.569576
from dagster import AssetKey, DagsterEventType, EventRecordsFilter, check, seven from .utils import capture_error def _normalize_asset_cursor_str(cursor_string): # the cursor for assets is derived from a json serialized string of the path. Because there are # json serialization differences between JS and Python in its treatment of whitespace, we should # take extra precaution here and do a deserialization/serialization pass if not cursor_string: return cursor_string try: return seven.json.dumps(seven.json.loads(cursor_string)) except seven.JSONDecodeError: return cursor_string @capture_error def get_assets(graphene_info, prefix=None, cursor=None, limit=None): from ..schema.pipelines.pipeline import GrapheneAsset from ..schema.roots.assets import GrapheneAssetConnection instance = graphene_info.context.instance normalized_cursor_str = _normalize_asset_cursor_str(cursor) materialized_keys = instance.get_asset_keys( prefix=prefix, limit=limit, cursor=normalized_cursor_str ) asset_nodes_by_asset_key = { asset_key: asset_node for asset_key, asset_node in get_asset_nodes_by_asset_key(graphene_info).items() if (not prefix or asset_key.path[: len(prefix)] == prefix) and (not cursor or asset_key.to_string() > cursor) } asset_keys = sorted(set(materialized_keys).union(asset_nodes_by_asset_key.keys()), key=str) if limit: asset_keys = asset_keys[:limit] return GrapheneAssetConnection( nodes=[ GrapheneAsset( key=asset_key, definition=asset_nodes_by_asset_key.get(asset_key), ) for asset_key in asset_keys ] ) def get_asset_nodes_by_asset_key(graphene_info): from ..schema.asset_graph import GrapheneAssetNode return { external_asset_node.asset_key: GrapheneAssetNode(repository, external_asset_node) for location in graphene_info.context.repository_locations for repository in location.get_repositories().values() for external_asset_node in repository.get_external_asset_nodes() } def get_asset_nodes(graphene_info): from ..schema.asset_graph import GrapheneAssetNode return [ GrapheneAssetNode(repository, external_asset_node) for location in graphene_info.context.repository_locations for repository in location.get_repositories().values() for external_asset_node in repository.get_external_asset_nodes() ] def get_asset_node(graphene_info, asset_key): from ..schema.errors import GrapheneAssetNotFoundError check.inst_param(asset_key, "asset_key", AssetKey) node = next((n for n in get_asset_nodes(graphene_info) if n.assetKey == asset_key), None) if not node: return GrapheneAssetNotFoundError(asset_key=asset_key) return node def get_asset(graphene_info, asset_key): from ..schema.errors import GrapheneAssetNotFoundError from ..schema.pipelines.pipeline import GrapheneAsset check.inst_param(asset_key, "asset_key", AssetKey) instance = graphene_info.context.instance asset_nodes_by_asset_key = get_asset_nodes_by_asset_key(graphene_info) asset_node = asset_nodes_by_asset_key.get(asset_key) if not asset_node and not instance.has_asset_key(asset_key): return GrapheneAssetNotFoundError(asset_key=asset_key) return GrapheneAsset(key=asset_key, definition=asset_node) def get_asset_events(graphene_info, asset_key, partitions=None, limit=None, before_timestamp=None): check.inst_param(asset_key, "asset_key", AssetKey) check.opt_int_param(limit, "limit") check.opt_float_param(before_timestamp, "before_timestamp") instance = graphene_info.context.instance event_records = instance.get_event_records( EventRecordsFilter( event_type=DagsterEventType.ASSET_MATERIALIZATION, asset_key=asset_key, asset_partitions=partitions, before_timestamp=before_timestamp, ), limit=limit, ) return [event_record.event_log_entry for event_record in event_records] def get_asset_run_ids(graphene_info, asset_key): check.inst_param(asset_key, "asset_key", AssetKey) instance = graphene_info.context.instance return instance.run_ids_for_asset_key(asset_key) def get_assets_for_run_id(graphene_info, run_id): from ..schema.pipelines.pipeline import GrapheneAsset check.str_param(run_id, "run_id") records = graphene_info.context.instance.all_logs(run_id) asset_keys = [ record.dagster_event.asset_key for record in records if record.is_dagster_event and record.dagster_event.asset_key ] return [GrapheneAsset(key=asset_key) for asset_key in asset_keys]
python_modules/dagster-graphql/dagster_graphql/implementation/fetch_assets.py
4,827
the cursor for assets is derived from a json serialized string of the path. Because there are json serialization differences between JS and Python in its treatment of whitespace, we should take extra precaution here and do a deserialization/serialization pass
260
en
0.930657
"""This uses the CLUE as a Bluetooth LE sensor node.""" # Adafruit Service demo for Adafruit CLUE board. # Accessible via Adafruit Bluefruit Playground app and Web Bluetooth Dashboard. import time import board from digitalio import DigitalInOut import neopixel_write from adafruit_ble import BLERadio import ulab from adafruit_clue import clue from adafruit_ble_adafruit.adafruit_service import AdafruitServerAdvertisement from adafruit_ble_adafruit.accelerometer_service import AccelerometerService from adafruit_ble_adafruit.addressable_pixel_service import AddressablePixelService from adafruit_ble_adafruit.barometric_pressure_service import BarometricPressureService from adafruit_ble_adafruit.button_service import ButtonService from adafruit_ble_adafruit.humidity_service import HumidityService from adafruit_ble_adafruit.light_sensor_service import LightSensorService from adafruit_ble_adafruit.microphone_service import MicrophoneService from adafruit_ble_adafruit.temperature_service import TemperatureService from adafruit_ble_adafruit.tone_service import ToneService accel_svc = AccelerometerService() accel_svc.measurement_period = 100 accel_last_update = 0 # CLUE has just one board pixel. 3 RGB bytes * 1 pixel. NEOPIXEL_BUF_LENGTH = 3 * 1 neopixel_svc = AddressablePixelService() neopixel_buf = bytearray(NEOPIXEL_BUF_LENGTH) # Take over NeoPixel control from clue. clue._pixel.deinit() # pylint: disable=protected-access neopixel_out = DigitalInOut(board.NEOPIXEL) neopixel_out.switch_to_output() baro_svc = BarometricPressureService() baro_svc.measurement_period = 100 baro_last_update = 0 button_svc = ButtonService() button_svc.set_pressed(False, clue.button_a, clue.button_b) humidity_svc = HumidityService() humidity_svc.measurement_period = 100 humidity_last_update = 0 light_svc = LightSensorService() light_svc.measurement_period = 100 light_last_update = 0 # Send 256 16-bit samples at a time. MIC_NUM_SAMPLES = 256 mic_svc = MicrophoneService() mic_svc.number_of_channels = 1 mic_svc.measurement_period = 100 mic_last_update = 0 mic_samples = ulab.zeros(MIC_NUM_SAMPLES, dtype=ulab.uint16) temp_svc = TemperatureService() temp_svc.measurement_period = 100 temp_last_update = 0 tone_svc = ToneService() clue_display = clue.simple_text_display(text_scale=3, colors=(clue.WHITE,)) clue_display[0].text = "Temperature &" clue_display[1].text = "Humidity" clue_display[3].text = "Temp: {:.1f} C".format(clue.temperature) clue_display[5].text = "Humi: {:.1f} %".format(clue.humidity) ble = BLERadio() # The Web Bluetooth dashboard identifies known boards by their # advertised name, not by advertising manufacturer data. ble.name = "Attic" # The Bluefruit Playground app looks in the manufacturer data # in the advertisement. That data uses the USB PID as a unique ID. # Adafruit CLUE USB PID: # Arduino: 0x8071, CircuitPython: 0x8072, app supports either adv = AdafruitServerAdvertisement() adv.pid = 0x8072 while True: # Advertise when not connected. ble.start_advertising(adv) while not ble.connected: pass ble.stop_advertising() while ble.connected: now_msecs = time.monotonic_ns() // 1000000 # pylint: disable=no-member if now_msecs - accel_last_update >= accel_svc.measurement_period: accel_svc.acceleration = clue.acceleration accel_last_update = now_msecs if now_msecs - baro_last_update >= baro_svc.measurement_period: baro_svc.pressure = clue.pressure baro_last_update = now_msecs button_svc.set_pressed(False, clue.button_a, clue.button_b) if now_msecs - humidity_last_update >= humidity_svc.measurement_period: humidity_svc.humidity = clue.humidity humidity_last_update = now_msecs clue_display[5].text = "Humi: {:.1f} %".format(clue.humidity) print("Humi: {:.1f} %".format(clue.humidity)) if now_msecs - light_last_update >= light_svc.measurement_period: # Return "clear" color value from color sensor. light_svc.light_level = clue.color[3] light_last_update = now_msecs if now_msecs - mic_last_update >= mic_svc.measurement_period: clue._mic.record( # pylint: disable=protected-access mic_samples, len(mic_samples) ) # This subtraction yields unsigned values which are # reinterpreted as signed after passing. mic_svc.sound_samples = mic_samples - 32768 mic_last_update = now_msecs neopixel_values = neopixel_svc.values if neopixel_values is not None: start = neopixel_values.start if start > NEOPIXEL_BUF_LENGTH: continue data = neopixel_values.data data_len = min(len(data), NEOPIXEL_BUF_LENGTH - start) neopixel_buf[start : start + data_len] = data[:data_len] if neopixel_values.write_now: neopixel_write.neopixel_write(neopixel_out, neopixel_buf) if now_msecs - temp_last_update >= temp_svc.measurement_period: temp_svc.temperature = clue.temperature temp_last_update = now_msecs clue_display[3].text = "Temp: {:.1f} C".format(clue.temperature) print("Temp: {:.1f} C".format(clue.temperature)) tone = tone_svc.tone if tone is not None: freq, duration_msecs = tone if freq != 0: if duration_msecs != 0: # Note that this blocks. Alternatively we could # use now_msecs to time a tone in a non-blocking # way, but then the other updates might make the # tone interval less consistent. clue.play_tone(freq, duration_msecs / 1000) else: clue.stop_tone() clue.start_tone(freq) else: clue.stop_tone() last_tone = tone clue_display.show() time.sleep(5) # import time # from adafruit_clue import clue # import adafruit_ble_broadcastnet # print("This is BroadcastNet CLUE sensor:", adafruit_ble_broadcastnet.device_address) # while True: # measurement = adafruit_ble_broadcastnet.AdafruitSensorMeasurement() # measurement.temperature = clue.temperature # measurement.pressure = clue.pressure # measurement.relative_humidity = clue.humidity # measurement.acceleration = clue.acceleration # measurement.magnetic = clue.magnetic # print(measurement) # adafruit_ble_broadcastnet.broadcast(measurement) # time.sleep(5) # """This uses the CLUE as a Bluetooth LE sensor node.""" # import time # from adafruit_clue import clue # from adafruit_ble import BLERadio # from adafruit_ble.advertising.standard import ProvideServicesAdvertisement # from adafruit_ble.services.nordic import UARTService # ble = BLERadio() # ble.name = "patio" # uart_server = UARTService() # advertisement = ProvideServicesAdvertisement(uart_server) # while True: # # measurement = adafruit_ble.advertising.AdafruitSensorMeasurement() # # measurement.temperature = clue.temperature # # measurement.pressure = clue.pressure # # measurement.relative_humidity = clue.humidity # # measurement.acceleration = clue.acceleration # # measurement.magnetic = clue.magnetic # print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) # # Advertise when not connected. # ble.start_advertising(advertisement) # print(advertisement) # while not ble.connected: # pass # ble.stop_advertising() # while ble.connected: # print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) # uart_server.write("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) # time.sleep(15) # #time.sleep(1)
clue/temperature/code.py
7,938
This uses the CLUE as a Bluetooth LE sensor node. Adafruit Service demo for Adafruit CLUE board. Accessible via Adafruit Bluefruit Playground app and Web Bluetooth Dashboard. CLUE has just one board pixel. 3 RGB bytes * 1 pixel. Take over NeoPixel control from clue. pylint: disable=protected-access Send 256 16-bit samples at a time. The Web Bluetooth dashboard identifies known boards by their advertised name, not by advertising manufacturer data. The Bluefruit Playground app looks in the manufacturer data in the advertisement. That data uses the USB PID as a unique ID. Adafruit CLUE USB PID: Arduino: 0x8071, CircuitPython: 0x8072, app supports either Advertise when not connected. pylint: disable=no-member Return "clear" color value from color sensor. pylint: disable=protected-access This subtraction yields unsigned values which are reinterpreted as signed after passing. Note that this blocks. Alternatively we could use now_msecs to time a tone in a non-blocking way, but then the other updates might make the tone interval less consistent. import time from adafruit_clue import clue import adafruit_ble_broadcastnet print("This is BroadcastNet CLUE sensor:", adafruit_ble_broadcastnet.device_address) while True: measurement = adafruit_ble_broadcastnet.AdafruitSensorMeasurement() measurement.temperature = clue.temperature measurement.pressure = clue.pressure measurement.relative_humidity = clue.humidity measurement.acceleration = clue.acceleration measurement.magnetic = clue.magnetic print(measurement) adafruit_ble_broadcastnet.broadcast(measurement) time.sleep(5) """This uses the CLUE as a Bluetooth LE sensor node.""" import time from adafruit_clue import clue from adafruit_ble import BLERadio from adafruit_ble.advertising.standard import ProvideServicesAdvertisement from adafruit_ble.services.nordic import UARTService ble = BLERadio() ble.name = "patio" uart_server = UARTService() advertisement = ProvideServicesAdvertisement(uart_server) while True: measurement = adafruit_ble.advertising.AdafruitSensorMeasurement() measurement.temperature = clue.temperature measurement.pressure = clue.pressure measurement.relative_humidity = clue.humidity measurement.acceleration = clue.acceleration measurement.magnetic = clue.magnetic print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) Advertise when not connected. ble.start_advertising(advertisement) print(advertisement) while not ble.connected: pass ble.stop_advertising() while ble.connected: print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) uart_server.write("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure)) time.sleep(15) time.sleep(1)
2,816
en
0.722182
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Main ansible-lint package.""" from ansiblelint.rules import AnsibleLintRule from ansiblelint.version import __version__ __all__ = ( "__version__", "AnsibleLintRule" # deprecated, import it directly from rules )
lib/ansiblelint/__init__.py
1,341
Main ansible-lint package. Copyright (c) 2013-2014 Will Thames <will@thames.id.au> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. deprecated, import it directly from rules
1,147
en
0.861929
""" Quick Sort ---------- Uses partitioning to recursively divide and sort the list Time Complexity: O(n**2) worst case Space Complexity: O(n**2) this version Stable: No Psuedo Code: CLRS. Introduction to Algorithms. 3rd ed. """ count = 0 def sort(seq): """ Takes a list of integers and sorts them in ascending order. This sorted list is then returned. :param seq: A list of integers :rtype: A list of sorted integers """ global count if len(seq) <= 1: return seq else: pivot = seq[0] left, right = [], [] for x in seq[1:]: count += 1 if x < pivot: left.append(x) else: right.append(x) return sort(left) + [pivot] + sort(right) if __name__ == '__main__': # print sort([9,8,7,6,5,4,3,2,1,0]) print sort([1,2,3,4,5,6,7,8,9,10]) print count
algorithms/sorting/quick_sort.py
930
print sort([9,8,7,6,5,4,3,2,1,0])
33
en
0.402043
import logging from datetime import datetime from pprint import pprint as pp import click from flask.cli import with_appcontext from scout.load import load_exons from scout.server.extensions import store from scout.utils.handle import get_file_handle from scout.utils.scout_requests import fetch_ensembl_exons LOG = logging.getLogger(__name__) @click.command("exons", short_help="Load exons") @click.option( "-e", "--exons-file", type=click.Path(exists=True), help="Path to file with ensembl exons", ) @click.option("-b", "--build", type=click.Choice(["37", "38"]), default="37", show_default=True) @with_appcontext def exons(build, exons_file): """Load exons into the scout database. If no file, fetch exons from ensembl biomart""" adapter = store LOG.info("Running scout load exons") start = datetime.now() # Test if there are any exons loaded existing_exon = adapter.exon(build=build) if existing_exon: LOG.warning("Dropping all exons ") adapter.drop_exons(build=build) LOG.info("Exons dropped") # Load the exons nr_exons = 0 if exons_file: ensembl_exons = get_file_handle(exons_file) for nr_exons, line in enumerate(ensembl_exons, 1): pass ensembl_exons = get_file_handle(exons_file) else: ensembl_exons = fetch_ensembl_exons(build=build) nr_exons = 1360000 try: load_exons(adapter, ensembl_exons, build, nr_exons=nr_exons) except Exception as err: LOG.warning("Something went wrong with ensembl biomart") # LOG.info("Try to fetch one chromosome at the time") LOG.info("Please download a mart dump manually, see instructions in user guide for admins") return LOG.info("Time to load exons: {0}".format(datetime.now() - start))
scout/commands/load/exons.py
1,827
Load exons into the scout database. If no file, fetch exons from ensembl biomart Test if there are any exons loaded Load the exons LOG.info("Try to fetch one chromosome at the time")
184
en
0.711816
import os import sys as _sys import platform import re PY2 = _sys.version_info < (3,) PY3 = not PY2 RE_NUM = re.compile(r'(\d+).+') if not PY2: # these were moved around for Python 3 from urllib.parse import (quote as url_quote, unquote as url_unquote, urlencode) # Python 3 does not have basestring anymore; we include # *only* the str here as this is used for textual data. basestring = (str,) # for assertions that the data is either encoded or non-encoded text str_or_bytes = (str, bytes) # xrange is gone, replace it with range xrange = range # the unicode type is str unicode_type = str def dictkeys(dct): """ Returns a list of keys of dictionary dict.keys returns a view that works like .keys in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). """ return list(dct.keys()) def dictvalues(dct): """ Returns a list of values of a dictionary dict.values returns a view that works like .values in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). """ return list(dct.values()) def dict_iteritems(dct): """ Returns an iterator of items (key/value pairs) of a dictionary dict.items returns a view that works like .items in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). """ return dct.items() def dict_itervalues(dct): """ :param dict dct: :returns: an iterator of the values of a dictionary """ return dct.values() def byte(*args): """ This is the same as Python 2 `chr(n)` for bytes in Python 3 Returns a single byte `bytes` for the given int argument (we optimize it a bit here by passing the positional argument tuple directly to the bytes constructor. """ return bytes(args) class long(int): """ A marker class that signifies that the integer value should be serialized as `l` instead of `I` """ def __repr__(self): return str(self) + 'L' def canonical_str(value): """ Return the canonical str value for the string. In both Python 3 and Python 2 this is str. """ return str(value) def is_integer(value): return isinstance(value, int) else: from urllib import quote as url_quote, unquote as url_unquote, urlencode basestring = basestring str_or_bytes = basestring xrange = xrange unicode_type = unicode dictkeys = dict.keys dictvalues = dict.values dict_iteritems = dict.iteritems dict_itervalues = dict.itervalues byte = chr long = long def canonical_str(value): """ Returns the canonical string value of the given string. In Python 2 this is the value unchanged if it is an str, otherwise it is the unicode value encoded as UTF-8. """ try: return str(value) except UnicodeEncodeError: return str(value.encode('utf-8')) def is_integer(value): return isinstance(value, (int, long)) def as_bytes(value): if not isinstance(value, bytes): return value.encode('UTF-8') return value def to_digit(value): if value.isdigit(): return int(value) match = RE_NUM.match(value) return int(match.groups()[0]) if match else 0 def get_linux_version(release_str): ver_str = release_str.split('-')[0] return tuple(map(to_digit, ver_str.split('.')[:3])) HAVE_SIGNAL = os.name == 'posix' EINTR_IS_EXPOSED = _sys.version_info[:2] <= (3, 4) LINUX_VERSION = None if platform.system() == 'Linux': LINUX_VERSION = get_linux_version(platform.release())
pika/compat.py
4,157
A marker class that signifies that the integer value should be serialized as `l` instead of `I` This is the same as Python 2 `chr(n)` for bytes in Python 3 Returns a single byte `bytes` for the given int argument (we optimize it a bit here by passing the positional argument tuple directly to the bytes constructor. Return the canonical str value for the string. In both Python 3 and Python 2 this is str. Returns the canonical string value of the given string. In Python 2 this is the value unchanged if it is an str, otherwise it is the unicode value encoded as UTF-8. Returns an iterator of items (key/value pairs) of a dictionary dict.items returns a view that works like .items in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). :param dict dct: :returns: an iterator of the values of a dictionary Returns a list of keys of dictionary dict.keys returns a view that works like .keys in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). Returns a list of values of a dictionary dict.values returns a view that works like .values in Python 2 *except* any modifications in the dictionary will be visible (and will cause errors if the view is being iterated over while it is modified). these were moved around for Python 3 Python 3 does not have basestring anymore; we include *only* the str here as this is used for textual data. for assertions that the data is either encoded or non-encoded text xrange is gone, replace it with range the unicode type is str
1,669
en
0.81202
# coding: utf-8 """ Qc API Qc API # noqa: E501 The version of the OpenAPI document: 3.0.0 Contact: cloudsupport@telestream.net Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from telestream_cloud_qc.configuration import Configuration class ExtendedBoolValueTest(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'value': 'ExtendedBool', 'reject_on_error': 'bool', 'checked': 'bool' } attribute_map = { 'value': 'value', 'reject_on_error': 'reject_on_error', 'checked': 'checked' } def __init__(self, value=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501 """ExtendedBoolValueTest - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._value = None self._reject_on_error = None self._checked = None self.discriminator = None if value is not None: self.value = value if reject_on_error is not None: self.reject_on_error = reject_on_error if checked is not None: self.checked = checked @property def value(self): """Gets the value of this ExtendedBoolValueTest. # noqa: E501 :return: The value of this ExtendedBoolValueTest. # noqa: E501 :rtype: ExtendedBool """ return self._value @value.setter def value(self, value): """Sets the value of this ExtendedBoolValueTest. :param value: The value of this ExtendedBoolValueTest. # noqa: E501 :type: ExtendedBool """ self._value = value @property def reject_on_error(self): """Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :rtype: bool """ return self._reject_on_error @reject_on_error.setter def reject_on_error(self, reject_on_error): """Sets the reject_on_error of this ExtendedBoolValueTest. :param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :type: bool """ self._reject_on_error = reject_on_error @property def checked(self): """Gets the checked of this ExtendedBoolValueTest. # noqa: E501 :return: The checked of this ExtendedBoolValueTest. # noqa: E501 :rtype: bool """ return self._checked @checked.setter def checked(self, checked): """Sets the checked of this ExtendedBoolValueTest. :param checked: The checked of this ExtendedBoolValueTest. # noqa: E501 :type: bool """ self._checked = checked def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ExtendedBoolValueTest): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ExtendedBoolValueTest): return True return self.to_dict() != other.to_dict()
telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py
4,831
NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Returns true if both objects are equal ExtendedBoolValueTest - a model defined in OpenAPI Returns true if both objects are not equal For `print` and `pprint` Gets the checked of this ExtendedBoolValueTest. # noqa: E501 :return: The checked of this ExtendedBoolValueTest. # noqa: E501 :rtype: bool Sets the checked of this ExtendedBoolValueTest. :param checked: The checked of this ExtendedBoolValueTest. # noqa: E501 :type: bool Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :rtype: bool Sets the reject_on_error of this ExtendedBoolValueTest. :param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501 :type: bool Returns the model properties as a dict Returns the string representation of the model Gets the value of this ExtendedBoolValueTest. # noqa: E501 :return: The value of this ExtendedBoolValueTest. # noqa: E501 :rtype: ExtendedBool Sets the value of this ExtendedBoolValueTest. :param value: The value of this ExtendedBoolValueTest. # noqa: E501 :type: ExtendedBool Qc API Qc API # noqa: E501 The version of the OpenAPI document: 3.0.0 Contact: cloudsupport@telestream.net Generated by: https://openapi-generator.tech coding: utf-8 noqa: F401 noqa: E501 noqa: E501
1,453
en
0.457719
# Everything we've seen to this point has been a problem known as regression in # which we're trying to predict an actual numeric value for each observation of # N input numeric values. A more common problem is that of classification - # predicting a single binary occurance, class or label for each input. The # example we'll explore now is attempting to predict for every passenger aboard # the Titanic, if they survived or not. Clearly, this is not a numeric value, # but a boolean one: True (survived) or False (didn't survive) # # A different way to think about classification is in terms closer to regression # where instead of approximating an output value for each input, we're # learning a threshold line in the function where values below these threshold # doesn't belong to a class, and values above it do. # # The weights of an output unit determine the logical expression for the # corresponding input, while the bias acts as the threshold (axon hillock) that # must be surpassed in order for the unit to activate. So the bias basically # describe the excitability of the unit, or how likely it is to fire. While the # weights are the effect of the individual inputs. Mathematically: # # y = w * x + b >= 0 => w * x >= -b # # That means that in order for the output of the unit to be greater than 1 we # need w * x to be greater than the negative of the bias. Remember that in # classification the input x is a binary 0 or 1, so we have two cases: # # x = 0: w * 0 >= -b = 0 >= -b # x = 1: w * 1 >= -b = w >= -b # # So basically, the bias describes two properties: (a) the default activation of # the unit, whether it should fire or not on zero input (x = 0). And (b) how big # should the weights be to excite or inhibit that default activation for a non- # zero input (x = 1). A positive bias (1) will fire unless there are enough # negative weights (where the input is 1) to inhibit it, while a negative bias # (-1) will not fire unless there are enough positive weights to excite it. With # these two variables, we can describe any single-argument boolean function: # # w b y >= -b f # ================================= # 0 1 0 * x >= -1 T # 0 -1 0 * x >= 1 F # 1 -1 1 * x >= 1 x F (when x=F) or T (x=T) # identify # -1 0 -1 * x >= 0 !x F (when x=T) or T (x=F) # negation # # When we add arguments, we can support more boolean operations like AND and OR. # Lets start with AND: we will need the sum of a subgroup of the weights exceed # the negative bias: # # w1 w2 b y >= -b f # ================================== # 1 1 -2 x1 + x2 >= 2 x1 AND x2 # -1 1 -1 -x1 + x2 >= 1 !x1 AND x2 # 1 -1 -1 x1 - x2 >= 1 x1 AND !x2 # -1 -1 0 -x1 - x2 >= 0 !x1 AND !x2 # # It's possible to have other weights, but there's a subgroup of the weights # where each isn't big enough to exceed -b by itself, but their sum does. All # of these weights needs to be activated (by an input of 1) in order for the sum # to be greater than -b. # # Now for the OR. Because we might have several such subgroups that satisfy the # relationship above, each subgroup can, by itself, exceed -b. Thus there's an # OR operator between these subgroups: # # w1 w2 w3 b y >= -b f # ============================================== # 1 1 2 -2 x1 + x2 + 2*x3 >= 2 ( x1 AND x2) OR ( x3) # -1 1 -2 -1 -x1 + x2 - 2*x3 >= 1 (!x1 AND x2) OR (!x3) # # We end up with function structures like: # # f = (x1 AND x2 ...) OR ( x2 AND x3 ...) ... # f = (x1 AND !x2 ...) OR (!x2 AND x3 ...) ... # f = (x1 AND x2 ...) OR ( x3 AND x4 ...) ... # ^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^ # subgroup 1 subgroup 2 # # Where the OR separates all subgroups of the weights that has a sum greater # than -b, while the AND separates the individual weights within each such # group. # # NOTE that each input is always used with the same sign across all subgroups, # either identity or negation - never both. Our model can only approximate # linear boolean functions which are ones where each input always contributes # the same amount towards the same output: T or F. If one argument is more # likely to make the output true, it must be the case that regardless of all # other arguments, it will continue to make the output similarily likely to be # true (or false). It cannot be the case where one of the inputs is sometimes # used as an identity and other times is negated. For example, these boolean # functions aren't linear and thus cannot be approximated by this model: # # (x1 AND !x2) OR (!x1 AND x2) # exclusive-or (XOR) # (x1 AND x2) OR (!x1 AND !x2) # Equivalence # # This is because it's impossible to choose a weight for the input that's both # negative and positive. We need to pick one. So either that input makes the # output bigger, or smaller, or neither - but not conditionally both. NOTE that # this is a weak definition of linearity in boolean function, and is possibly # wrong. I couldn't easily wrap my head around it, so perhaps the wikipedia # entry[1] on it will help. # # [1] https://en.wikipedia.org/wiki/Linearity#Boolean_functions import numpy as np np.random.seed(1) EPOCHS = 300 ALPHA = 0.01 # Our 1-dimensional input is the sex of the passenger: m (male) or f (female) # Our output is a number, either 1 (survived) or 0 (didn't survive) X = ["f", "m", "f", "m", "f", "m", "f", "m", "f", "m", "f", "m", "f", "m"] T = [ 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0 ] # One of the main issues to take care of is encoding: how do we transform these # textual categories into numeric inputs that we can estimate. One naive # approach might be to use a single input feature, say a value of 0 represents a # male, and 1 represents a female. That wouldn't work, because any kind of # weight we'll use will end up increasing for females. Thus we have no way to # find different weights for the different categories. This is not necessarily # correct for ordinal values like age or fare cost, but it's still common to # learn these weights independently by grouping multiple numeric values into a # discrete set of categories ("young", "old" for age; "cheap", "expansive" for # fare cost). The same limitation obviously applied if we use more values with # binary encoding. # # The best known approach currently is one-hot (or one-of-k) in which each value # is assigned a completely different input. If we have k values, we'll use # k input neurons (one for male and the other for female) in which only one # neuron can be lit (value of 1) for any given training case. If we have # multiple categories we can concatenate multiple such one-of-k's as needed as # that maintains the fact that each value is assign a separate input and weight. N = len(set(X)) # 1 per unique value # encode the input data strings into a list of one-of-k's. We want to return a # list of numbers, where all are set zeros, but only one is to set to one. That # should be applied to each feature - one for value. More features would require # a concatenation of such one-of-k's def one_of_k(v): x = np.zeros(N) idx = ["m", "f"].index(v) x[idx] = 1. return x X = np.array([one_of_k(x) for x in X]) w = np.random.randn(N + 1) * 0.01 # start with small random weights data = zip(X, T) for i in xrange(EPOCHS): np.random.shuffle(data) e = 0 # we will now also compute the accuracy as a count of how many instances in # the data were predicted correctly. This is a more quantitive way of # representing the correctness of the prediction as opposed to an arbitrary # error function accuracy = 0 # mini-batches for x, t in data: # predict x = np.append(x, 1.) # add the fixed bias. y = sum(w * x) # error & derivatives e += (y - t) ** 2 / 2 dy = (y - t) dw = dy * x # update w += ALPHA * -dw # mini-batch update # did we predict correctly? We need to transform the output number # into a boolean prediction: whether the label should be turned on # or off. For this example, we'll simply see if the prediction is # closer to 0 or 1, by first clipping to the [0, 1] range in order # to trim values outside of this range, and then rounding. accuracy += 1 if round(np.clip(y, 0, 1)) == t else 0 e /= len(data) print "%s: ERROR = %f ; ACCURACY = %d of %d" % (i, e, accuracy, len(data)) print print "W = %s" % w
07_classification.py
8,658
Everything we've seen to this point has been a problem known as regression in which we're trying to predict an actual numeric value for each observation of N input numeric values. A more common problem is that of classification - predicting a single binary occurance, class or label for each input. The example we'll explore now is attempting to predict for every passenger aboard the Titanic, if they survived or not. Clearly, this is not a numeric value, but a boolean one: True (survived) or False (didn't survive) A different way to think about classification is in terms closer to regression where instead of approximating an output value for each input, we're learning a threshold line in the function where values below these threshold doesn't belong to a class, and values above it do. The weights of an output unit determine the logical expression for the corresponding input, while the bias acts as the threshold (axon hillock) that must be surpassed in order for the unit to activate. So the bias basically describe the excitability of the unit, or how likely it is to fire. While the weights are the effect of the individual inputs. Mathematically: y = w * x + b >= 0 => w * x >= -b That means that in order for the output of the unit to be greater than 1 we need w * x to be greater than the negative of the bias. Remember that in classification the input x is a binary 0 or 1, so we have two cases: x = 0: w * 0 >= -b = 0 >= -b x = 1: w * 1 >= -b = w >= -b So basically, the bias describes two properties: (a) the default activation of the unit, whether it should fire or not on zero input (x = 0). And (b) how big should the weights be to excite or inhibit that default activation for a non- zero input (x = 1). A positive bias (1) will fire unless there are enough negative weights (where the input is 1) to inhibit it, while a negative bias (-1) will not fire unless there are enough positive weights to excite it. With these two variables, we can describe any single-argument boolean function: w b y >= -b f ================================= 0 1 0 * x >= -1 T 0 -1 0 * x >= 1 F 1 -1 1 * x >= 1 x F (when x=F) or T (x=T) identify -1 0 -1 * x >= 0 !x F (when x=T) or T (x=F) negation When we add arguments, we can support more boolean operations like AND and OR. Lets start with AND: we will need the sum of a subgroup of the weights exceed the negative bias: w1 w2 b y >= -b f ================================== 1 1 -2 x1 + x2 >= 2 x1 AND x2 -1 1 -1 -x1 + x2 >= 1 !x1 AND x2 1 -1 -1 x1 - x2 >= 1 x1 AND !x2 -1 -1 0 -x1 - x2 >= 0 !x1 AND !x2 It's possible to have other weights, but there's a subgroup of the weights where each isn't big enough to exceed -b by itself, but their sum does. All of these weights needs to be activated (by an input of 1) in order for the sum to be greater than -b. Now for the OR. Because we might have several such subgroups that satisfy the relationship above, each subgroup can, by itself, exceed -b. Thus there's an OR operator between these subgroups: w1 w2 w3 b y >= -b f ============================================== 1 1 2 -2 x1 + x2 + 2*x3 >= 2 ( x1 AND x2) OR ( x3) -1 1 -2 -1 -x1 + x2 - 2*x3 >= 1 (!x1 AND x2) OR (!x3) We end up with function structures like: f = (x1 AND x2 ...) OR ( x2 AND x3 ...) ... f = (x1 AND !x2 ...) OR (!x2 AND x3 ...) ... f = (x1 AND x2 ...) OR ( x3 AND x4 ...) ... ^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^ subgroup 1 subgroup 2 Where the OR separates all subgroups of the weights that has a sum greater than -b, while the AND separates the individual weights within each such group. NOTE that each input is always used with the same sign across all subgroups, either identity or negation - never both. Our model can only approximate linear boolean functions which are ones where each input always contributes the same amount towards the same output: T or F. If one argument is more likely to make the output true, it must be the case that regardless of all other arguments, it will continue to make the output similarily likely to be true (or false). It cannot be the case where one of the inputs is sometimes used as an identity and other times is negated. For example, these boolean functions aren't linear and thus cannot be approximated by this model: (x1 AND !x2) OR (!x1 AND x2) exclusive-or (XOR) (x1 AND x2) OR (!x1 AND !x2) Equivalence This is because it's impossible to choose a weight for the input that's both negative and positive. We need to pick one. So either that input makes the output bigger, or smaller, or neither - but not conditionally both. NOTE that this is a weak definition of linearity in boolean function, and is possibly wrong. I couldn't easily wrap my head around it, so perhaps the wikipedia entry[1] on it will help. [1] https://en.wikipedia.org/wiki/LinearityBoolean_functions Our 1-dimensional input is the sex of the passenger: m (male) or f (female) Our output is a number, either 1 (survived) or 0 (didn't survive) One of the main issues to take care of is encoding: how do we transform these textual categories into numeric inputs that we can estimate. One naive approach might be to use a single input feature, say a value of 0 represents a male, and 1 represents a female. That wouldn't work, because any kind of weight we'll use will end up increasing for females. Thus we have no way to find different weights for the different categories. This is not necessarily correct for ordinal values like age or fare cost, but it's still common to learn these weights independently by grouping multiple numeric values into a discrete set of categories ("young", "old" for age; "cheap", "expansive" for fare cost). The same limitation obviously applied if we use more values with binary encoding. The best known approach currently is one-hot (or one-of-k) in which each value is assigned a completely different input. If we have k values, we'll use k input neurons (one for male and the other for female) in which only one neuron can be lit (value of 1) for any given training case. If we have multiple categories we can concatenate multiple such one-of-k's as needed as that maintains the fact that each value is assign a separate input and weight. 1 per unique value encode the input data strings into a list of one-of-k's. We want to return a list of numbers, where all are set zeros, but only one is to set to one. That should be applied to each feature - one for value. More features would require a concatenation of such one-of-k's start with small random weights we will now also compute the accuracy as a count of how many instances in the data were predicted correctly. This is a more quantitive way of representing the correctness of the prediction as opposed to an arbitrary error function mini-batches predict add the fixed bias. error & derivatives update mini-batch update did we predict correctly? We need to transform the output number into a boolean prediction: whether the label should be turned on or off. For this example, we'll simply see if the prediction is closer to 0 or 1, by first clipping to the [0, 1] range in order to trim values outside of this range, and then rounding.
7,422
en
0.916011
from enum import Enum from dataclasses import dataclass class TokenType(Enum): #TYPES INT = 0 FLOAT = 1 #OPERATORS PLUS = 2 MINUS = 3 DIVIDE = 4 MULTIPLY = 5 #PARENTHESES LPAREN = 6 RPAREN = 7 #SQUARE BRACKETS L_SQUAREBRACKET = 8 R_SQUAREBRACKET = 9 #ANGLE BRACKETS L_ANGLEBRACKET = 12 R_ANGLEBRACKET = 13 @dataclass class Token: type: TokenType value: any = None def __repr__(self): return self.type.name + (f":{self.value}" if self.value != None else "")
tokens.py
590
TYPESOPERATORSPARENTHESESSQUARE BRACKETSANGLE BRACKETS
54
en
0.357072
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Jan 29, 2021 @file: train_unmixing.py @desc: Perform the training of the models for the unmixing problem. @author: laugh12321 @contact: laugh12321@vip.qq.com """ import os import numpy as np import tensorflow as tf from typing import Dict import src.model.enums as enums from src.utils import io, transforms from src.model.models import _get_model from src.utils.transforms import UNMIXING_TRANSFORMS from src.evaluation import time_metrics from src.evaluation.performance_metrics import UNMIXING_LOSSES, \ UNMIXING_TRAIN_METRICS def train(data: Dict[str, np.ndarray], model_name: str, dest_path: str, sample_size: int, n_classes: int, lr: float, batch_size: int, epochs: int, verbose: int, shuffle: bool, patience: int, seed: int): """ Function for running experiments on various unmixing models, given a set of hyper parameters. :param data: The data dictionary containing the subsets for training and validation. First dimension of the datasets should be the number of samples. :param model_name: Name of the model, it serves as a key in the dictionary holding all functions returning models. :param dest_path: Path to where all experiment runs will be saved as subdirectories in this given directory. :param sample_size: Size of the input sample. :param n_classes: Number of classes. :param lr: Learning rate for the model, i.e., regulates the size of the step in the gradient descent process. :param batch_size: Size of the batch used in training phase, it is the size of samples per gradient step. :param epochs: Number of epochs for model to train. :param verbose: Verbosity mode used in training, (0, 1 or 2). :param shuffle: Boolean indicating whether to shuffle datasets. :param patience: Number of epochs without improvement in order to stop the training phase. :param seed: Seed for training reproducibility. """ # Reproducibility: np.random.seed(seed=seed) model = _get_model( model_key=model_name, **{'input_size': sample_size, 'n_classes': n_classes}) model.summary() model.compile( optimizer=tf.keras.optimizers.Adam(lr=lr), loss=UNMIXING_LOSSES[model_name], metrics=UNMIXING_TRAIN_METRICS[model_name]) time_history = time_metrics.TimeHistory() mcp_save = tf.keras.callbacks.ModelCheckpoint( os.path.join(dest_path, 'model.h5'), save_best_only=True, monitor='val_loss', mode='min') early_stopping = tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=patience, mode='min') callbacks = [time_history, mcp_save, early_stopping] train_dict = data[enums.Dataset.TRAIN].copy() val_dict = data[enums.Dataset.VAL].copy() min_, max_ = data[enums.DataStats.MIN], data[enums.DataStats.MAX] transformations = [transforms.MinMaxNormalize(min_=min_, max_=max_)] transformations += [t() for t in UNMIXING_TRANSFORMS[model_name]] train_dict = transforms.apply_transformations(train_dict, transformations) val_dict = transforms.apply_transformations(val_dict, transformations) history = model.fit( x=train_dict[enums.Dataset.DATA], y=train_dict[enums.Dataset.LABELS], epochs=epochs, verbose=verbose, shuffle=shuffle, validation_data=(val_dict[enums.Dataset.DATA], val_dict[enums.Dataset.LABELS]), callbacks=callbacks, batch_size=batch_size) np.savetxt(os.path.join(dest_path, 'min-max.csv'), np.array([min_, max_]), delimiter=',', fmt='%f') history.history[time_metrics.TimeHistory.__name__] = time_history.average io.save_metrics(dest_path=dest_path, file_name='training_metrics.csv', metrics=history.history)
src/model/train_unmixing.py
4,094
Function for running experiments on various unmixing models, given a set of hyper parameters. :param data: The data dictionary containing the subsets for training and validation. First dimension of the datasets should be the number of samples. :param model_name: Name of the model, it serves as a key in the dictionary holding all functions returning models. :param dest_path: Path to where all experiment runs will be saved as subdirectories in this given directory. :param sample_size: Size of the input sample. :param n_classes: Number of classes. :param lr: Learning rate for the model, i.e., regulates the size of the step in the gradient descent process. :param batch_size: Size of the batch used in training phase, it is the size of samples per gradient step. :param epochs: Number of epochs for model to train. :param verbose: Verbosity mode used in training, (0, 1 or 2). :param shuffle: Boolean indicating whether to shuffle datasets. :param patience: Number of epochs without improvement in order to stop the training phase. :param seed: Seed for training reproducibility. Created on Jan 29, 2021 @file: train_unmixing.py @desc: Perform the training of the models for the unmixing problem. @author: laugh12321 @contact: laugh12321@vip.qq.com !/usr/bin/env python -*- coding: utf-8 -*- Reproducibility:
1,343
en
0.833456
"""Probability mass function for a beta binomial distribution Functions --------- betabinom_pmf Probability mass function for a beta binomial distribution """ from bbpmf.betabinom_pmf import betabinom_pmf
bbpmf/__init__.py
211
Probability mass function for a beta binomial distribution Functions --------- betabinom_pmf Probability mass function for a beta binomial distribution
156
en
0.405207
import random random.sample(set([1, 2, 3, 4, 5, 6]), 2) # random select from set
snippets/python-set-random.py
81
random select from set
22
en
0.831725
"""Tests for stubs. Verify that various things in stubs are consistent with how things behave at runtime. """ import argparse import copy import enum import importlib import inspect import re import sys import types import warnings from functools import singledispatch from pathlib import Path from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast from typing_extensions import Type import mypy.build import mypy.modulefinder import mypy.types from mypy import nodes from mypy.config_parser import parse_config_file from mypy.options import Options from mypy.util import FancyFormatter class Missing: """Marker object for things that are missing (from a stub or the runtime).""" def __repr__(self) -> str: return "MISSING" MISSING = Missing() T = TypeVar("T") if sys.version_info >= (3, 5, 3): MaybeMissing = Union[T, Missing] else: # work around a bug in 3.5.2 and earlier's typing.py class MaybeMissingMeta(type): def __getitem__(self, arg: Any) -> Any: return Union[arg, Missing] class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore pass _formatter = FancyFormatter(sys.stdout, sys.stderr, False) def _style(message: str, **kwargs: Any) -> str: """Wrapper around mypy.util for fancy formatting.""" kwargs.setdefault("color", "none") return _formatter.style(message, **kwargs) class Error: def __init__( self, object_path: List[str], message: str, stub_object: MaybeMissing[nodes.Node], runtime_object: MaybeMissing[Any], *, stub_desc: Optional[str] = None, runtime_desc: Optional[str] = None ) -> None: """Represents an error found by stubtest. :param object_path: Location of the object with the error, e.g. ``["module", "Class", "method"]`` :param message: Error message :param stub_object: The mypy node representing the stub :param runtime_object: Actual object obtained from the runtime :param stub_desc: Specialised description for the stub object, should you wish :param runtime_desc: Specialised description for the runtime object, should you wish """ self.object_desc = ".".join(object_path) self.message = message self.stub_object = stub_object self.runtime_object = runtime_object self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object)) self.runtime_desc = runtime_desc or str(runtime_object) def is_missing_stub(self) -> bool: """Whether or not the error is for something missing from the stub.""" return isinstance(self.stub_object, Missing) def is_positional_only_related(self) -> bool: """Whether or not the error is for something being (or not being) positional-only.""" # TODO: This is hacky, use error codes or something more resilient return "leading double underscore" in self.message def get_description(self, concise: bool = False) -> str: """Returns a description of the error. :param concise: Whether to return a concise, one-line description """ if concise: return _style(self.object_desc, bold=True) + " " + self.message stub_line = None stub_file = None # type: None if not isinstance(self.stub_object, Missing): stub_line = self.stub_object.line # TODO: Find a way of getting the stub file stub_loc_str = "" if stub_line: stub_loc_str += " at line {}".format(stub_line) if stub_file: stub_loc_str += " in file {}".format(Path(stub_file)) runtime_line = None runtime_file = None if not isinstance(self.runtime_object, Missing): try: runtime_line = inspect.getsourcelines(self.runtime_object)[1] except (OSError, TypeError): pass try: runtime_file = inspect.getsourcefile(self.runtime_object) except TypeError: pass runtime_loc_str = "" if runtime_line: runtime_loc_str += " at line {}".format(runtime_line) if runtime_file: runtime_loc_str += " in file {}".format(Path(runtime_file)) output = [ _style("error: ", color="red", bold=True), _style(self.object_desc, bold=True), " ", self.message, "\n", "Stub:", _style(stub_loc_str, dim=True), "\n", _style(self.stub_desc + "\n", color="blue", dim=True), "Runtime:", _style(runtime_loc_str, dim=True), "\n", _style(self.runtime_desc + "\n", color="blue", dim=True), ] return "".join(output) def test_module(module_name: str) -> Iterator[Error]: """Tests a given module's stub against introspecting it at runtime. Requires the stub to have been built already, accomplished by a call to ``build_stubs``. :param module_name: The module to test """ stub = get_stub(module_name) if stub is None: yield Error([module_name], "failed to find stubs", MISSING, None) return try: with warnings.catch_warnings(): warnings.simplefilter("ignore") runtime = importlib.import_module(module_name) except Exception as e: yield Error([module_name], "failed to import: {}".format(e), stub, MISSING) return with warnings.catch_warnings(): warnings.simplefilter("ignore") yield from verify(stub, runtime, [module_name]) @singledispatch def verify( stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: """Entry point for comparing a stub to a runtime object. We use single dispatch based on the type of ``stub``. :param stub: The mypy node representing a part of the stub :param runtime: The runtime object corresponding to ``stub`` """ yield Error(object_path, "is an unknown mypy node", stub, runtime) @verify.register(nodes.MypyFile) def verify_mypyfile( stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): yield Error(object_path, "is not present at runtime", stub, runtime) return if not isinstance(runtime, types.ModuleType): yield Error(object_path, "is not a module", stub, runtime) return # Check things in the stub that are public to_check = set( m for m, o in stub.names.items() if o.module_public and (not m.startswith("_") or hasattr(runtime, m)) ) runtime_public_contents = [ m for m in dir(runtime) if not m.startswith("_") # Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported # modules and infinitely recurse. Unfortunately, there's no way to detect an explicit # reexport missing from the stubs (that isn't specified in __all__) and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__ ] # Check all things declared in module's __all__, falling back to runtime_public_contents to_check.update(getattr(runtime, "__all__", runtime_public_contents)) to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"}) for entry in sorted(to_check): yield from verify( stub.names[entry].node if entry in stub.names else MISSING, getattr(runtime, entry, MISSING), object_path + [entry], ) @verify.register(nodes.TypeInfo) def verify_typeinfo( stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub)) return if not isinstance(runtime, type): yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub)) return # Check everything already defined in the stub to_check = set(stub.names) # There's a reasonable case to be made that we should always check all dunders, but it's # currently quite noisy. We could turn this into a denylist instead of an allowlist. to_check.update( # cast to workaround mypyc complaints m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS ) for entry in sorted(to_check): mangled_entry = entry if entry.startswith("__") and not entry.endswith("__"): mangled_entry = "_{}{}".format(stub.name, entry) yield from verify( next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING), getattr(runtime, mangled_entry, MISSING), object_path + [entry], ) def _verify_static_class_methods( stub: nodes.FuncBase, runtime: Any, object_path: List[str] ) -> Iterator[str]: if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"): # Special cased by Python, so don't bother checking return if inspect.isbuiltin(runtime): # The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do # something a little hacky that seems to work well probably_class_method = isinstance(getattr(runtime, "__self__", None), type) if probably_class_method and not stub.is_class: yield "runtime is a classmethod but stub is not" if not probably_class_method and stub.is_class: yield "stub is a classmethod but runtime is not" return # Look the object up statically, to avoid binding by the descriptor protocol static_runtime = importlib.import_module(object_path[0]) for entry in object_path[1:]: try: static_runtime = inspect.getattr_static(static_runtime, entry) except AttributeError: # This can happen with mangled names, ignore for now. # TODO: pass more information about ancestors of nodes/objects to verify, so we don't # have to do this hacky lookup. Would be useful in a couple other places too. return if isinstance(static_runtime, classmethod) and not stub.is_class: yield "runtime is a classmethod but stub is not" if not isinstance(static_runtime, classmethod) and stub.is_class: yield "stub is a classmethod but runtime is not" if isinstance(static_runtime, staticmethod) and not stub.is_static: yield "runtime is a staticmethod but stub is not" if not isinstance(static_runtime, staticmethod) and stub.is_static: yield "stub is a staticmethod but runtime is not" def _verify_arg_name( stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str ) -> Iterator[str]: """Checks whether argument names match.""" # Ignore exact names for most dunder methods if is_dunder(function_name, exclude_special=True): return def strip_prefix(s: str, prefix: str) -> str: return s[len(prefix):] if s.startswith(prefix) else s if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name: return def names_approx_match(a: str, b: str) -> bool: a = a.strip("_") b = b.strip("_") return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1 # Be more permissive about names matching for positional-only arguments if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match( stub_arg.variable.name, runtime_arg.name ): return # This comes up with namedtuples, so ignore if stub_arg.variable.name == "_self": return yield ( 'stub argument "{}" differs from runtime argument "{}"'.format( stub_arg.variable.name, runtime_arg.name ) ) def _verify_arg_default_value( stub_arg: nodes.Argument, runtime_arg: inspect.Parameter ) -> Iterator[str]: """Checks whether argument default values are compatible.""" if runtime_arg.default != inspect.Parameter.empty: if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT): yield ( 'runtime argument "{}" has a default value but stub argument does not'.format( runtime_arg.name ) ) else: runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default) # Fallback to the type annotation type if var type is missing. The type annotation # is an UnboundType, but I don't know enough to know what the pros and cons here are. # UnboundTypes have ugly question marks following them, so default to var type. # Note we do this same fallback when constructing signatures in from_overloadedfuncdef stub_type = stub_arg.variable.type or stub_arg.type_annotation if isinstance(stub_type, mypy.types.TypeVarType): stub_type = stub_type.upper_bound if ( runtime_type is not None and stub_type is not None # Avoid false positives for marker objects and type(runtime_arg.default) != object and not is_subtype_helper(runtime_type, stub_type) ): yield ( 'runtime argument "{}" has a default value of type {}, ' "which is incompatible with stub argument type {}".format( runtime_arg.name, runtime_type, stub_type ) ) else: if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT): yield ( 'stub argument "{}" has a default value but runtime argument does not'.format( stub_arg.variable.name ) ) def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]: if name in ("__init_subclass__", "__class_getitem__"): # These are implicitly classmethods. If the stub chooses not to have @classmethod, we # should remove the cls argument if args[0].variable.name == "cls": return args[1:] return args class Signature(Generic[T]): def __init__(self) -> None: self.pos = [] # type: List[T] self.kwonly = {} # type: Dict[str, T] self.varpos = None # type: Optional[T] self.varkw = None # type: Optional[T] def __str__(self) -> str: def get_name(arg: Any) -> str: if isinstance(arg, inspect.Parameter): return arg.name if isinstance(arg, nodes.Argument): return arg.variable.name raise AssertionError def get_type(arg: Any) -> Optional[str]: if isinstance(arg, inspect.Parameter): return None if isinstance(arg, nodes.Argument): return str(arg.variable.type or arg.type_annotation) raise AssertionError def has_default(arg: Any) -> bool: if isinstance(arg, inspect.Parameter): return arg.default != inspect.Parameter.empty if isinstance(arg, nodes.Argument): return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT) raise AssertionError def get_desc(arg: Any) -> str: arg_type = get_type(arg) return ( get_name(arg) + (": {}".format(arg_type) if arg_type else "") + (" = ..." if has_default(arg) else "") ) kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a))) ret = "def (" ret += ", ".join( [get_desc(arg) for arg in self.pos] + (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else [])) + [get_desc(arg) for arg in kw_only] + (["**" + get_name(self.varkw)] if self.varkw else []) ) ret += ")" return ret @staticmethod def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]": stub_sig = Signature() # type: Signature[nodes.Argument] stub_args = maybe_strip_cls(stub.name, stub.arguments) for stub_arg in stub_args: if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT): stub_sig.pos.append(stub_arg) elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT): stub_sig.kwonly[stub_arg.variable.name] = stub_arg elif stub_arg.kind == nodes.ARG_STAR: stub_sig.varpos = stub_arg elif stub_arg.kind == nodes.ARG_STAR2: stub_sig.varkw = stub_arg else: raise AssertionError return stub_sig @staticmethod def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]": runtime_sig = Signature() # type: Signature[inspect.Parameter] for runtime_arg in signature.parameters.values(): if runtime_arg.kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, ): runtime_sig.pos.append(runtime_arg) elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY: runtime_sig.kwonly[runtime_arg.name] = runtime_arg elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL: runtime_sig.varpos = runtime_arg elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD: runtime_sig.varkw = runtime_arg else: raise AssertionError return runtime_sig @staticmethod def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]": """Returns a Signature from an OverloadedFuncDef. If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we try and combine the overload's items into a single signature that is compatible with any lies it might try to tell. """ # For most dunder methods, just assume all args are positional-only assume_positional_only = is_dunder(stub.name, exclude_special=True) all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]] for func in map(_resolve_funcitem_from_decorator, stub.items): assert func is not None args = maybe_strip_cls(stub.name, func.arguments) for index, arg in enumerate(args): # For positional-only args, we allow overloads to have different names for the same # argument. To accomplish this, we just make up a fake index-based name. name = ( "__{}".format(index) if arg.variable.name.startswith("__") or assume_positional_only else arg.variable.name ) all_args.setdefault(name, []).append((arg, index)) def get_position(arg_name: str) -> int: # We just need this to return the positional args in the correct order. return max(index for _, index in all_args[arg_name]) def get_type(arg_name: str) -> mypy.types.ProperType: with mypy.state.strict_optional_set(True): all_types = [ arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name] ] return mypy.typeops.make_simplified_union([t for t in all_types if t]) def get_kind(arg_name: str) -> int: kinds = {arg.kind for arg, _ in all_args[arg_name]} if nodes.ARG_STAR in kinds: return nodes.ARG_STAR if nodes.ARG_STAR2 in kinds: return nodes.ARG_STAR2 # The logic here is based on two tenets: # 1) If an arg is ever optional (or unspecified), it is optional # 2) If an arg is ever positional, it is positional is_opt = ( len(all_args[arg_name]) < len(stub.items) or nodes.ARG_OPT in kinds or nodes.ARG_NAMED_OPT in kinds ) is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds if is_opt: return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT return nodes.ARG_POS if is_pos else nodes.ARG_NAMED sig = Signature() # type: Signature[nodes.Argument] for arg_name in sorted(all_args, key=get_position): # example_arg_name gives us a real name (in case we had a fake index-based name) example_arg_name = all_args[arg_name][0][0].variable.name arg = nodes.Argument( nodes.Var(example_arg_name, get_type(arg_name)), type_annotation=None, initializer=None, kind=get_kind(arg_name), ) if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT): sig.pos.append(arg) elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT): sig.kwonly[arg.variable.name] = arg elif arg.kind == nodes.ARG_STAR: sig.varpos = arg elif arg.kind == nodes.ARG_STAR2: sig.varkw = arg else: raise AssertionError return sig def _verify_signature( stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str ) -> Iterator[str]: # Check positional arguments match up for stub_arg, runtime_arg in zip(stub.pos, runtime.pos): yield from _verify_arg_name(stub_arg, runtime_arg, function_name) yield from _verify_arg_default_value(stub_arg, runtime_arg) if ( runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and not stub_arg.variable.name.startswith("__") and not stub_arg.variable.name.strip("_") == "self" and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods ): yield ( 'stub argument "{}" should be positional-only ' '(rename with a leading double underscore, i.e. "__{}")'.format( stub_arg.variable.name, runtime_arg.name ) ) if ( runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY and stub_arg.variable.name.startswith("__") ): yield ( 'stub argument "{}" should be positional or keyword ' "(remove leading double underscore)".format(stub_arg.variable.name) ) # Check unmatched positional args if len(stub.pos) > len(runtime.pos): # There are cases where the stub exhaustively lists out the extra parameters the function # would take through *args. Hence, a) we can't check that the runtime actually takes those # parameters and b) below, we don't enforce that the stub takes *args, since runtime logic # may prevent those arguments from actually being accepted. if runtime.varpos is None: for stub_arg in stub.pos[len(runtime.pos):]: # If the variable is in runtime.kwonly, it's just mislabelled as not a # keyword-only argument if stub_arg.variable.name not in runtime.kwonly: yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name) else: yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name) if stub.varpos is not None: yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name) elif len(stub.pos) < len(runtime.pos): for runtime_arg in runtime.pos[len(stub.pos):]: if runtime_arg.name not in stub.kwonly: yield 'stub does not have argument "{}"'.format(runtime_arg.name) else: yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name) # Checks involving *args if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None: if stub.varpos is None and runtime.varpos is not None: yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name) if stub.varpos is not None and runtime.varpos is None: yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name) # Check keyword-only args for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)): stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg] yield from _verify_arg_name(stub_arg, runtime_arg, function_name) yield from _verify_arg_default_value(stub_arg, runtime_arg) # Check unmatched keyword-only args if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)): # There are cases where the stub exhaustively lists out the extra parameters the function # would take through *kwargs. Hence, a) we only check if the runtime actually takes those # parameters when the above condition holds and b) below, we don't enforce that the stub # takes *kwargs, since runtime logic may prevent additional arguments from actually being # accepted. for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)): yield 'runtime does not have argument "{}"'.format(arg) for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)): if arg in set(stub_arg.variable.name for stub_arg in stub.pos): # Don't report this if we've reported it before if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None: yield 'stub argument "{}" is not keyword-only'.format(arg) else: yield 'stub does not have argument "{}"'.format(arg) # Checks involving **kwargs if stub.varkw is None and runtime.varkw is not None: # As mentioned above, don't enforce that the stub takes **kwargs. # Also check against positional parameters, to avoid a nitpicky message when an argument # isn't marked as keyword-only stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos) # Ideally we'd do a strict subset check, but in practice the errors from that aren't useful if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names): yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name) if stub.varkw is not None and runtime.varkw is None: yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name) @verify.register(nodes.FuncItem) def verify_funcitem( stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): yield Error(object_path, "is not present at runtime", stub, runtime) return if ( not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType)) and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType)) and not inspect.ismethoddescriptor(runtime) ): yield Error(object_path, "is not a function", stub, runtime) if not callable(runtime): return for message in _verify_static_class_methods(stub, runtime, object_path): yield Error(object_path, "is inconsistent, " + message, stub, runtime) try: signature = inspect.signature(runtime) except (ValueError, RuntimeError): # inspect.signature throws sometimes # catch RuntimeError because of https://bugs.python.org/issue39504 return stub_sig = Signature.from_funcitem(stub) runtime_sig = Signature.from_inspect_signature(signature) for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name): yield Error( object_path, "is inconsistent, " + message, stub, runtime, runtime_desc="def " + str(signature), ) @verify.register(Missing) def verify_none( stub: Missing, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: yield Error(object_path, "is not present in stub", stub, runtime) @verify.register(nodes.Var) def verify_var( stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): # Don't always yield an error here, because we often can't find instance variables if len(object_path) <= 2: yield Error(object_path, "is not present at runtime", stub, runtime) return runtime_type = get_mypy_type_of_runtime_value(runtime) if ( runtime_type is not None and stub.type is not None and not is_subtype_helper(runtime_type, stub.type) ): should_error = True # Avoid errors when defining enums, since runtime_type is the enum itself, but we'd # annotate it with the type of runtime.value if isinstance(runtime, enum.Enum): runtime_type = get_mypy_type_of_runtime_value(runtime.value) if runtime_type is not None and is_subtype_helper(runtime_type, stub.type): should_error = False if should_error: yield Error( object_path, "variable differs from runtime type {}".format(runtime_type), stub, runtime, ) @verify.register(nodes.OverloadedFuncDef) def verify_overloadedfuncdef( stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): yield Error(object_path, "is not present at runtime", stub, runtime) return if stub.is_property: # We get here in cases of overloads from property.setter return if ( not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType)) and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType)) and not inspect.ismethoddescriptor(runtime) ): yield Error(object_path, "is not a function", stub, runtime) if not callable(runtime): return for message in _verify_static_class_methods(stub, runtime, object_path): yield Error(object_path, "is inconsistent, " + message, stub, runtime) try: signature = inspect.signature(runtime) except ValueError: return stub_sig = Signature.from_overloadedfuncdef(stub) runtime_sig = Signature.from_inspect_signature(signature) for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name): # TODO: This is a little hacky, but the addition here is super useful if "has a default value of type" in message: message += ( ". This is often caused by overloads failing to account for explicitly passing " "in the default value." ) yield Error( object_path, "is inconsistent, " + message, stub, runtime, stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig), runtime_desc="def " + str(signature), ) @verify.register(nodes.TypeVarExpr) def verify_typevarexpr( stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if False: yield None def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]: assert stub.func.is_property if isinstance(runtime, property): return if inspect.isdatadescriptor(runtime): # It's enough like a property... return # Sometimes attributes pretend to be properties, for instance, to express that they # are read only. So allowlist if runtime_type matches the return type of stub. runtime_type = get_mypy_type_of_runtime_value(runtime) func_type = ( stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None ) if ( runtime_type is not None and func_type is not None and is_subtype_helper(runtime_type, func_type) ): return yield "is inconsistent, cannot reconcile @property on stub with runtime object" def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]: """Returns a FuncItem that corresponds to the output of the decorator. Returns None if we can't figure out what that would be. For convenience, this function also accepts FuncItems. """ if isinstance(dec, nodes.FuncItem): return dec if dec.func.is_property: return None def apply_decorator_to_funcitem( decorator: nodes.Expression, func: nodes.FuncItem ) -> Optional[nodes.FuncItem]: if not isinstance(decorator, nodes.RefExpr): return None if decorator.fullname is None: # Happens with namedtuple return None if decorator.fullname in ( "builtins.staticmethod", "typing.overload", "abc.abstractmethod", ): return func if decorator.fullname == "builtins.classmethod": assert func.arguments[0].variable.name in ("cls", "metacls") ret = copy.copy(func) # Remove the cls argument, since it's not present in inspect.signature of classmethods ret.arguments = ret.arguments[1:] return ret # Just give up on any other decorators. After excluding properties, we don't run into # anything else when running on typeshed's stdlib. return None func = dec.func # type: nodes.FuncItem for decorator in dec.original_decorators: resulting_func = apply_decorator_to_funcitem(decorator, func) if resulting_func is None: return None func = resulting_func return func @verify.register(nodes.Decorator) def verify_decorator( stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if isinstance(runtime, Missing): yield Error(object_path, "is not present at runtime", stub, runtime) return if stub.func.is_property: for message in _verify_property(stub, runtime): yield Error(object_path, message, stub, runtime) return func = _resolve_funcitem_from_decorator(stub) if func is not None: yield from verify(func, runtime, object_path) @verify.register(nodes.TypeAlias) def verify_typealias( stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str] ) -> Iterator[Error]: if False: yield None SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__") def is_dunder(name: str, exclude_special: bool = False) -> bool: """Returns whether name is a dunder name. :param exclude_special: Whether to return False for a couple special dunder methods. """ if exclude_special and name in SPECIAL_DUNDERS: return False return name.startswith("__") and name.endswith("__") def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool: """Checks whether ``left`` is a subtype of ``right``.""" left = mypy.types.get_proper_type(left) right = mypy.types.get_proper_type(right) if ( isinstance(left, mypy.types.LiteralType) and isinstance(left.value, int) and left.value in (0, 1) and isinstance(right, mypy.types.Instance) and right.type.fullname == "builtins.bool" ): # Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors. return True with mypy.state.strict_optional_set(True): return mypy.subtypes.is_subtype(left, right) def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]: """Returns a mypy type object representing the type of ``runtime``. Returns None if we can't find something that works. """ if runtime is None: return mypy.types.NoneType() if isinstance(runtime, property): # Give up on properties to avoid issues with things that are typed as attributes. return None def anytype() -> mypy.types.AnyType: return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated) if isinstance( runtime, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) ): builtins = get_stub("builtins") assert builtins is not None type_info = builtins.names["function"].node assert isinstance(type_info, nodes.TypeInfo) fallback = mypy.types.Instance(type_info, [anytype()]) try: signature = inspect.signature(runtime) arg_types = [] arg_kinds = [] arg_names = [] for arg in signature.parameters.values(): arg_types.append(anytype()) arg_names.append( None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name ) has_default = arg.default == inspect.Parameter.empty if arg.kind == inspect.Parameter.POSITIONAL_ONLY: arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT) elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT) elif arg.kind == inspect.Parameter.KEYWORD_ONLY: arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT) elif arg.kind == inspect.Parameter.VAR_POSITIONAL: arg_kinds.append(nodes.ARG_STAR) elif arg.kind == inspect.Parameter.VAR_KEYWORD: arg_kinds.append(nodes.ARG_STAR2) else: raise AssertionError except ValueError: arg_types = [anytype(), anytype()] arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2] arg_names = [None, None] return mypy.types.CallableType( arg_types, arg_kinds, arg_names, ret_type=anytype(), fallback=fallback, is_ellipsis_args=True, ) # Try and look up a stub for the runtime object stub = get_stub(type(runtime).__module__) if stub is None: return None type_name = type(runtime).__name__ if type_name not in stub.names: return None type_info = stub.names[type_name].node if isinstance(type_info, nodes.Var): return type_info.type if not isinstance(type_info, nodes.TypeInfo): return None if isinstance(runtime, tuple): # Special case tuples so we construct a valid mypy.types.TupleType optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime] items = [(i if i is not None else anytype()) for i in optional_items] fallback = mypy.types.Instance(type_info, [anytype()]) return mypy.types.TupleType(items, fallback) fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars]) try: # Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work # well (when not using mypyc, for which bytes and enums are also problematic). return mypy.types.LiteralType( value=runtime, fallback=fallback, ) except TypeError: # Ask for forgiveness if we're using mypyc. return fallback _all_stubs = {} # type: Dict[str, nodes.MypyFile] def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]: """Uses mypy to construct stub objects for the given modules. This sets global state that ``get_stub`` can access. Returns all modules we might want to check. If ``find_submodules`` is False, this is equal to ``modules``. :param modules: List of modules to build stubs for. :param options: Mypy options for finding and building stubs. :param find_submodules: Whether to attempt to find submodules of the given modules as well. """ data_dir = mypy.build.default_data_dir() search_path = mypy.modulefinder.compute_search_paths([], options, data_dir) find_module_cache = mypy.modulefinder.FindModuleCache( search_path, fscache=None, options=options ) all_modules = [] sources = [] for module in modules: all_modules.append(module) if not find_submodules: module_path = find_module_cache.find_module(module) if not isinstance(module_path, str): # test_module will yield an error later when it can't find stubs continue sources.append(mypy.modulefinder.BuildSource(module_path, module, None)) else: found_sources = find_module_cache.find_modules_recursive(module) sources.extend(found_sources) all_modules.extend(s.module for s in found_sources if s.module not in all_modules) try: res = mypy.build.build(sources=sources, options=options) except mypy.errors.CompileError as e: output = [ _style("error: ", color="red", bold=True), "not checking stubs due to failed mypy compile:\n", str(e), ] print("".join(output)) raise RuntimeError from e if res.errors: output = [ _style("error: ", color="red", bold=True), "not checking stubs due to mypy build errors:\n", ] print("".join(output) + "\n".join(res.errors)) raise RuntimeError global _all_stubs _all_stubs = res.files return all_modules def get_stub(module: str) -> Optional[nodes.MypyFile]: """Returns a stub object for the given module, if we've built one.""" return _all_stubs.get(module) def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]: """Returns a list of stdlib modules in typeshed (for current Python version).""" stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir) packages = set() # Typeshed doesn't cover Python 3.5. if sys.version_info < (3, 6): version_info = (3, 6) else: version_info = sys.version_info[0:2] for module, versions in stdlib_py_versions.items(): minver, maxver = versions if version_info >= minver and (maxver is None or version_info <= maxver): packages.add(module) if custom_typeshed_dir: typeshed_dir = Path(custom_typeshed_dir) else: typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed" stdlib_dir = typeshed_dir / "stdlib" modules = [] for path in stdlib_dir.rglob("*.pyi"): if path.stem == "__init__": path = path.parent module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,)) if module.split(".")[0] in packages: modules.append(module) return sorted(modules) def get_allowlist_entries(allowlist_file: str) -> Iterator[str]: def strip_comments(s: str) -> str: try: return s[: s.index("#")].strip() except ValueError: return s.strip() with open(allowlist_file) as f: for line in f.readlines(): entry = strip_comments(line) if entry: yield entry def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int: """This is stubtest! It's time to test the stubs!""" # Load the allowlist. This is a series of strings corresponding to Error.object_desc # Values in the dict will store whether we used the allowlist entry or not. allowlist = { entry: False for allowlist_file in args.allowlist for entry in get_allowlist_entries(allowlist_file) } allowlist_regexes = {entry: re.compile(entry) for entry in allowlist} # If we need to generate an allowlist, we store Error.object_desc for each error here. generated_allowlist = set() modules = args.modules if args.check_typeshed: assert not args.modules, "Cannot pass both --check-typeshed and a list of modules" modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir) annoying_modules = {"antigravity", "this"} modules = [m for m in modules if m not in annoying_modules] assert modules, "No modules to check" options = Options() options.incremental = False options.custom_typeshed_dir = args.custom_typeshed_dir options.config_file = args.mypy_config_file options.use_builtins_fixtures = use_builtins_fixtures if options.config_file: def set_strict_flags() -> None: # not needed yet return parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr) try: modules = build_stubs(modules, options, find_submodules=not args.check_typeshed) except RuntimeError: return 1 exit_code = 0 for module in modules: for error in test_module(module): # Filter errors if args.ignore_missing_stub and error.is_missing_stub(): continue if args.ignore_positional_only and error.is_positional_only_related(): continue if error.object_desc in allowlist: allowlist[error.object_desc] = True continue is_allowlisted = False for w in allowlist: if allowlist_regexes[w].fullmatch(error.object_desc): allowlist[w] = True is_allowlisted = True break if is_allowlisted: continue # We have errors, so change exit code, and output whatever necessary exit_code = 1 if args.generate_allowlist: generated_allowlist.add(error.object_desc) continue print(error.get_description(concise=args.concise)) # Print unused allowlist entries if not args.ignore_unused_allowlist: for w in allowlist: # Don't consider an entry unused if it regex-matches the empty string # This lets us allowlist errors that don't manifest at all on some systems if not allowlist[w] and not allowlist_regexes[w].fullmatch(""): exit_code = 1 print("note: unused allowlist entry {}".format(w)) # Print the generated allowlist if args.generate_allowlist: for e in sorted(generated_allowlist): print(e) exit_code = 0 return exit_code def parse_options(args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser( description="Compares stubs to objects introspected from the runtime." ) parser.add_argument("modules", nargs="*", help="Modules to test") parser.add_argument("--concise", action="store_true", help="Make output concise") parser.add_argument( "--ignore-missing-stub", action="store_true", help="Ignore errors for stub missing things that are present at runtime", ) parser.add_argument( "--ignore-positional-only", action="store_true", help="Ignore errors for whether an argument should or shouldn't be positional-only", ) parser.add_argument( "--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR" ) parser.add_argument( "--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed" ) parser.add_argument( "--allowlist", "--whitelist", action="append", metavar="FILE", default=[], help=( "Use file as an allowlist. Can be passed multiple times to combine multiple " "allowlists. Allowlists can be created with --generate-allowlist" ), ) parser.add_argument( "--generate-allowlist", "--generate-whitelist", action="store_true", help="Print an allowlist (to stdout) to be used with --allowlist", ) parser.add_argument( "--ignore-unused-allowlist", "--ignore-unused-whitelist", action="store_true", help="Ignore unused allowlist entries", ) config_group = parser.add_argument_group( title='mypy config file', description="Use a config file instead of command line arguments. " "Plugins and mypy path are the only supported " "configurations.", ) config_group.add_argument( '--mypy-config-file', help=( "An existing mypy configuration file, currently used by stubtest to help " "determine mypy path and plugins" ), ) return parser.parse_args(args) def main() -> int: mypy.util.check_python_version("stubtest") return test_stubs(parse_options(sys.argv[1:])) if __name__ == "__main__": sys.exit(main())
venv/Lib/site-packages/mypy/stubtest.py
49,711
Marker object for things that are missing (from a stub or the runtime). Represents an error found by stubtest. :param object_path: Location of the object with the error, e.g. ``["module", "Class", "method"]`` :param message: Error message :param stub_object: The mypy node representing the stub :param runtime_object: Actual object obtained from the runtime :param stub_desc: Specialised description for the stub object, should you wish :param runtime_desc: Specialised description for the runtime object, should you wish Returns a FuncItem that corresponds to the output of the decorator. Returns None if we can't figure out what that would be. For convenience, this function also accepts FuncItems. Wrapper around mypy.util for fancy formatting. Checks whether argument default values are compatible. Checks whether argument names match. Uses mypy to construct stub objects for the given modules. This sets global state that ``get_stub`` can access. Returns all modules we might want to check. If ``find_submodules`` is False, this is equal to ``modules``. :param modules: List of modules to build stubs for. :param options: Mypy options for finding and building stubs. :param find_submodules: Whether to attempt to find submodules of the given modules as well. Returns a Signature from an OverloadedFuncDef. If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we try and combine the overload's items into a single signature that is compatible with any lies it might try to tell. Returns a description of the error. :param concise: Whether to return a concise, one-line description Returns a mypy type object representing the type of ``runtime``. Returns None if we can't find something that works. Returns a stub object for the given module, if we've built one. Returns a list of stdlib modules in typeshed (for current Python version). Returns whether name is a dunder name. :param exclude_special: Whether to return False for a couple special dunder methods. Whether or not the error is for something missing from the stub. Whether or not the error is for something being (or not being) positional-only. Checks whether ``left`` is a subtype of ``right``. Tests a given module's stub against introspecting it at runtime. Requires the stub to have been built already, accomplished by a call to ``build_stubs``. :param module_name: The module to test This is stubtest! It's time to test the stubs! Entry point for comparing a stub to a runtime object. We use single dispatch based on the type of ``stub``. :param stub: The mypy node representing a part of the stub :param runtime: The runtime object corresponding to ``stub`` Tests for stubs. Verify that various things in stubs are consistent with how things behave at runtime. work around a bug in 3.5.2 and earlier's typing.py type: ignore TODO: This is hacky, use error codes or something more resilient type: None TODO: Find a way of getting the stub file Check things in the stub that are public Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported modules and infinitely recurse. Unfortunately, there's no way to detect an explicit reexport missing from the stubs (that isn't specified in __all__) Check all things declared in module's __all__, falling back to runtime_public_contents Check everything already defined in the stub There's a reasonable case to be made that we should always check all dunders, but it's currently quite noisy. We could turn this into a denylist instead of an allowlist. cast to workaround mypyc complaints Special cased by Python, so don't bother checking The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do something a little hacky that seems to work well Look the object up statically, to avoid binding by the descriptor protocol This can happen with mangled names, ignore for now. TODO: pass more information about ancestors of nodes/objects to verify, so we don't have to do this hacky lookup. Would be useful in a couple other places too. Ignore exact names for most dunder methods Be more permissive about names matching for positional-only arguments This comes up with namedtuples, so ignore Fallback to the type annotation type if var type is missing. The type annotation is an UnboundType, but I don't know enough to know what the pros and cons here are. UnboundTypes have ugly question marks following them, so default to var type. Note we do this same fallback when constructing signatures in from_overloadedfuncdef Avoid false positives for marker objects These are implicitly classmethods. If the stub chooses not to have @classmethod, we should remove the cls argument type: List[T] type: Dict[str, T] type: Optional[T] type: Optional[T] type: Signature[nodes.Argument] type: Signature[inspect.Parameter] For most dunder methods, just assume all args are positional-only type: Dict[str, List[Tuple[nodes.Argument, int]]] For positional-only args, we allow overloads to have different names for the same argument. To accomplish this, we just make up a fake index-based name. We just need this to return the positional args in the correct order. The logic here is based on two tenets: 1) If an arg is ever optional (or unspecified), it is optional 2) If an arg is ever positional, it is positional type: Signature[nodes.Argument] example_arg_name gives us a real name (in case we had a fake index-based name) Check positional arguments match up noisy for dunder methods Check unmatched positional args There are cases where the stub exhaustively lists out the extra parameters the function would take through *args. Hence, a) we can't check that the runtime actually takes those parameters and b) below, we don't enforce that the stub takes *args, since runtime logic may prevent those arguments from actually being accepted. If the variable is in runtime.kwonly, it's just mislabelled as not a keyword-only argument Checks involving *args Check keyword-only args Check unmatched keyword-only args There are cases where the stub exhaustively lists out the extra parameters the function would take through *kwargs. Hence, a) we only check if the runtime actually takes those parameters when the above condition holds and b) below, we don't enforce that the stub takes *kwargs, since runtime logic may prevent additional arguments from actually being accepted. Don't report this if we've reported it before Checks involving **kwargs As mentioned above, don't enforce that the stub takes **kwargs. Also check against positional parameters, to avoid a nitpicky message when an argument isn't marked as keyword-only Ideally we'd do a strict subset check, but in practice the errors from that aren't useful inspect.signature throws sometimes catch RuntimeError because of https://bugs.python.org/issue39504 Don't always yield an error here, because we often can't find instance variables Avoid errors when defining enums, since runtime_type is the enum itself, but we'd annotate it with the type of runtime.value We get here in cases of overloads from property.setter TODO: This is a little hacky, but the addition here is super useful It's enough like a property... Sometimes attributes pretend to be properties, for instance, to express that they are read only. So allowlist if runtime_type matches the return type of stub. Happens with namedtuple Remove the cls argument, since it's not present in inspect.signature of classmethods Just give up on any other decorators. After excluding properties, we don't run into anything else when running on typeshed's stdlib. type: nodes.FuncItem Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors. Give up on properties to avoid issues with things that are typed as attributes. Try and look up a stub for the runtime object Special case tuples so we construct a valid mypy.types.TupleType Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work well (when not using mypyc, for which bytes and enums are also problematic). Ask for forgiveness if we're using mypyc. type: Dict[str, nodes.MypyFile] test_module will yield an error later when it can't find stubs Typeshed doesn't cover Python 3.5. Load the allowlist. This is a series of strings corresponding to Error.object_desc Values in the dict will store whether we used the allowlist entry or not. If we need to generate an allowlist, we store Error.object_desc for each error here. not needed yet Filter errors We have errors, so change exit code, and output whatever necessary Print unused allowlist entries Don't consider an entry unused if it regex-matches the empty string This lets us allowlist errors that don't manifest at all on some systems Print the generated allowlist
8,841
en
0.864723
from __future__ import annotations from datetime import ( datetime, timedelta, ) from typing import Hashable import warnings import numpy as np from pandas._libs import ( index as libindex, lib, ) from pandas._libs.tslibs import ( BaseOffset, NaT, Period, Resolution, Tick, ) from pandas._libs.tslibs.parsing import ( DateParseError, parse_time_string, ) from pandas._typing import ( Dtype, DtypeObj, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_datetime64_any_dtype, is_integer, is_scalar, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.arrays.period import ( PeriodArray, period_array, raise_on_incompatible, validate_dtype_freq, ) import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas.core.indexes.datetimes import ( DatetimeIndex, Index, ) from pandas.core.indexes.extension import inherit_names from pandas.core.indexes.numeric import Int64Index _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"}) _shared_doc_kwargs = { "klass": "PeriodArray", } # --- Period index sketch def _new_PeriodIndex(cls, **d): # GH13277 for unpickling values = d.pop("data") if values.dtype == "int64": freq = d.pop("freq", None) values = PeriodArray(values, freq=freq) return cls._simple_new(values, **d) else: return cls(values, **d) @inherit_names( ["strftime", "start_time", "end_time"] + PeriodArray._field_ops, PeriodArray, wrap=True, ) @inherit_names(["is_leap_year", "_format_native_types"], PeriodArray) class PeriodIndex(DatetimeIndexOpsMixin): """ Immutable ndarray holding ordinal values indicating regular periods in time. Index keys are boxed to Period objects which carries the metadata (eg, frequency information). Parameters ---------- data : array-like (1d int np.ndarray or PeriodArray), optional Optional period-like data to construct index with. copy : bool Make a copy of input ndarray. freq : str or period object, optional One of pandas period strings or corresponding objects. year : int, array, or Series, default None month : int, array, or Series, default None quarter : int, array, or Series, default None day : int, array, or Series, default None hour : int, array, or Series, default None minute : int, array, or Series, default None second : int, array, or Series, default None dtype : str or PeriodDtype, default None Attributes ---------- day dayofweek day_of_week dayofyear day_of_year days_in_month daysinmonth end_time freq freqstr hour is_leap_year minute month quarter qyear second start_time week weekday weekofyear year Methods ------- asfreq strftime to_timestamp See Also -------- Index : The base pandas Index type. Period : Represents a period of time. DatetimeIndex : Index with datetime64 data. TimedeltaIndex : Index of timedelta64 data. period_range : Create a fixed-frequency PeriodIndex. Examples -------- >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) >>> idx PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]') """ _typ = "periodindex" _attributes = ["name"] _data: PeriodArray freq: BaseOffset _data_cls = PeriodArray _engine_type = libindex.PeriodEngine _supports_partial_string_indexing = True # -------------------------------------------------------------------- # methods that dispatch to array and wrap result in Index # These are defined here instead of via inherit_names for mypy @doc( PeriodArray.asfreq, other="pandas.arrays.PeriodArray", other_name="PeriodArray", **_shared_doc_kwargs, ) def asfreq(self, freq=None, how: str = "E") -> PeriodIndex: arr = self._data.asfreq(freq, how) return type(self)._simple_new(arr, name=self.name) @doc(PeriodArray.to_timestamp) def to_timestamp(self, freq=None, how="start") -> DatetimeIndex: arr = self._data.to_timestamp(freq, how) return DatetimeIndex._simple_new(arr, name=self.name) # https://github.com/python/mypy/issues/1362 # error: Decorated property not supported @property # type:ignore[misc] @doc(PeriodArray.hour.fget) def hour(self) -> Int64Index: return Int64Index(self._data.hour, name=self.name) # https://github.com/python/mypy/issues/1362 # error: Decorated property not supported @property # type:ignore[misc] @doc(PeriodArray.minute.fget) def minute(self) -> Int64Index: return Int64Index(self._data.minute, name=self.name) # https://github.com/python/mypy/issues/1362 # error: Decorated property not supported @property # type:ignore[misc] @doc(PeriodArray.second.fget) def second(self) -> Int64Index: return Int64Index(self._data.second, name=self.name) # ------------------------------------------------------------------------ # Index Constructors def __new__( cls, data=None, ordinal=None, freq=None, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, **fields, ) -> PeriodIndex: valid_field_set = { "year", "month", "day", "quarter", "hour", "minute", "second", } if not set(fields).issubset(valid_field_set): argument = list(set(fields) - valid_field_set)[0] raise TypeError(f"__new__() got an unexpected keyword argument {argument}") name = maybe_extract_name(name, data, cls) if data is None and ordinal is None: # range-based. data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields) # PeriodArray._generate range does validation that fields is # empty when really using the range-based constructor. freq = freq2 data = PeriodArray(data, freq=freq) else: freq = validate_dtype_freq(dtype, freq) # PeriodIndex allow PeriodIndex(period_index, freq=different) # Let's not encourage that kind of behavior in PeriodArray. if freq and isinstance(data, cls) and data.freq != freq: # TODO: We can do some of these with no-copy / coercion? # e.g. D -> 2D seems to be OK data = data.asfreq(freq) if data is None and ordinal is not None: # we strangely ignore `ordinal` if data is passed. ordinal = np.asarray(ordinal, dtype=np.int64) data = PeriodArray(ordinal, freq=freq) else: # don't pass copy here, since we copy later. data = period_array(data=data, freq=freq) if copy: data = data.copy() return cls._simple_new(data, name=name) # ------------------------------------------------------------------------ # Data @property def values(self) -> np.ndarray: return np.asarray(self, dtype=object) def _maybe_convert_timedelta(self, other): """ Convert timedelta-like input to an integer multiple of self.freq Parameters ---------- other : timedelta, np.timedelta64, DateOffset, int, np.ndarray Returns ------- converted : int, np.ndarray[int64] Raises ------ IncompatibleFrequency : if the input cannot be written as a multiple of self.freq. Note IncompatibleFrequency subclasses ValueError. """ if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)): if isinstance(self.freq, Tick): # _check_timedeltalike_freq_compat will raise if incompatible delta = self._data._check_timedeltalike_freq_compat(other) return delta elif isinstance(other, BaseOffset): if other.base == self.freq.base: return other.n raise raise_on_incompatible(self, other) elif is_integer(other): # integer is passed to .shift via # _add_datetimelike_methods basically # but ufunc may pass integer to _add_delta return other # raise when input doesn't have freq raise raise_on_incompatible(self, None) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if not isinstance(dtype, PeriodDtype): return False return dtype.freq == self.freq # ------------------------------------------------------------------------ # Index Methods def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: """ where : array of timestamps mask : np.ndarray[bool] Array of booleans where data is not NA. """ if isinstance(where, DatetimeIndex): where = PeriodIndex(where._values, freq=self.freq) elif not isinstance(where, PeriodIndex): raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex") return super().asof_locs(where, mask) @doc(Index.astype) def astype(self, dtype, copy: bool = True, how=lib.no_default): dtype = pandas_dtype(dtype) if how is not lib.no_default: # GH#37982 warnings.warn( "The 'how' keyword in PeriodIndex.astype is deprecated and " "will be removed in a future version. " "Use index.to_timestamp(how=how) instead", FutureWarning, stacklevel=2, ) else: how = "start" if is_datetime64_any_dtype(dtype): # 'how' is index-specific, isn't part of the EA interface. tz = getattr(dtype, "tz", None) return self.to_timestamp(how=how).tz_localize(tz) return super().astype(dtype, copy=copy) @property def is_full(self) -> bool: """ Returns True if this PeriodIndex is range-like in that all Periods between start and end are present, in order. """ if len(self) == 0: return True if not self.is_monotonic_increasing: raise ValueError("Index is not monotonic") values = self.asi8 return ((values[1:] - values[:-1]) < 2).all() @property def inferred_type(self) -> str: # b/c data is represented as ints make sure we can't have ambiguous # indexing return "period" # ------------------------------------------------------------------------ # Indexing Methods def _convert_tolerance(self, tolerance, target): # Returned tolerance must be in dtype/units so that # `|self._get_engine_target() - target._engine_target()| <= tolerance` # is meaningful. Since PeriodIndex returns int64 for engine_target, # we may need to convert timedelta64 tolerance to int64. tolerance = super()._convert_tolerance(tolerance, target) if self.dtype == target.dtype: # convert tolerance to i8 tolerance = self._maybe_convert_timedelta(tolerance) return tolerance def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label. Parameters ---------- key : Period, NaT, str, or datetime String or datetime key must be parsable as Period. Returns ------- loc : int or ndarray[int64] Raises ------ KeyError Key is not present in the index. TypeError If key is listlike or otherwise not hashable. """ orig_key = key if not is_scalar(key): raise InvalidIndexError(key) if is_valid_na_for_dtype(key, self.dtype): key = NaT elif isinstance(key, str): try: loc = self._get_string_slice(key) return loc except (TypeError, ValueError): pass try: asdt, reso_str = parse_time_string(key, self.freq) except (ValueError, DateParseError) as err: # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err reso = Resolution.from_attrname(reso_str) grp = reso.freq_group.value freqn = self.dtype.freq_group_code # _get_string_slice will handle cases where grp < freqn assert grp >= freqn # BusinessDay is a bit strange. It has a *lower* code, but we never parse # a string as "BusinessDay" resolution, just Day. if grp == freqn or ( reso == Resolution.RESO_DAY and self.dtype.freq.name == "B" ): key = Period(asdt, freq=self.freq) loc = self.get_loc(key, method=method, tolerance=tolerance) return loc elif method is None: raise KeyError(key) else: key = asdt elif isinstance(key, Period): sfreq = self.freq kfreq = key.freq if not ( sfreq.n == kfreq.n and sfreq._period_dtype_code == kfreq._period_dtype_code ): # GH#42247 For the subset of DateOffsets that can be Period freqs, # checking these two attributes is sufficient to check equality, # and much more performant than `self.freq == key.freq` raise KeyError(key) elif isinstance(key, datetime): try: key = Period(key, freq=self.freq) except ValueError as err: # we cannot construct the Period raise KeyError(orig_key) from err else: # in particular integer, which Period constructor would cast to string raise KeyError(key) try: key = Period(key, freq=self.freq) except ValueError as err: # we cannot construct the Period raise KeyError(orig_key) from err try: return Index.get_loc(self, key, method, tolerance) except KeyError as err: raise KeyError(orig_key) from err def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): """ If label is a string or a datetime, cast it to Period.ordinal according to resolution. Parameters ---------- label : object side : {'left', 'right'} kind : {'loc', 'getitem'}, or None Returns ------- bound : Period or object Notes ----- Value of `side` parameter should be validated in caller. """ assert kind in ["loc", "getitem", None, lib.no_default] self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") if isinstance(label, datetime): return Period(label, freq=self.freq) elif isinstance(label, str): try: parsed, reso_str = parse_time_string(label, self.freq) except ValueError as err: # string cannot be parsed as datetime-like raise self._invalid_indexer("slice", label) from err reso = Resolution.from_attrname(reso_str) lower, upper = self._parsed_string_to_bounds(reso, parsed) return lower if side == "left" else upper elif not isinstance(label, self._data._recognized_scalars): raise self._invalid_indexer("slice", label) return label def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): grp = reso.freq_group iv = Period(parsed, freq=grp.value) return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) def _validate_partial_date_slice(self, reso: Resolution): assert isinstance(reso, Resolution), (type(reso), reso) grp = reso.freq_group freqn = self.dtype.freq_group_code if not grp.value < freqn: # TODO: we used to also check for # reso in ["day", "hour", "minute", "second"] # why is that check not needed? raise ValueError def _get_string_slice(self, key: str): parsed, reso_str = parse_time_string(key, self.freq) reso = Resolution.from_attrname(reso_str) try: return self._partial_date_slice(reso, parsed) except KeyError as err: raise KeyError(key) from err def period_range( start=None, end=None, periods: int | None = None, freq=None, name=None ) -> PeriodIndex: """ Return a fixed frequency PeriodIndex. The day (calendar) is the default frequency. Parameters ---------- start : str or period-like, default None Left bound for generating periods. end : str or period-like, default None Right bound for generating periods. periods : int, default None Number of periods to generate. freq : str or DateOffset, optional Frequency alias. By default the freq is taken from `start` or `end` if those are Period objects. Otherwise, the default is ``"D"`` for daily frequency. name : str, default None Name of the resulting PeriodIndex. Returns ------- PeriodIndex Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the ``period_range`` constructor. >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]') """ if com.count_not_none(start, end, periods) != 2: raise ValueError( "Of the three parameters: start, end, and periods, " "exactly two must be specified" ) if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)): freq = "D" data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={}) data = PeriodArray(data, freq=freq) return PeriodIndex(data, name=name)
env/Lib/site-packages/pandas/core/indexes/period.py
19,698
Immutable ndarray holding ordinal values indicating regular periods in time. Index keys are boxed to Period objects which carries the metadata (eg, frequency information). Parameters ---------- data : array-like (1d int np.ndarray or PeriodArray), optional Optional period-like data to construct index with. copy : bool Make a copy of input ndarray. freq : str or period object, optional One of pandas period strings or corresponding objects. year : int, array, or Series, default None month : int, array, or Series, default None quarter : int, array, or Series, default None day : int, array, or Series, default None hour : int, array, or Series, default None minute : int, array, or Series, default None second : int, array, or Series, default None dtype : str or PeriodDtype, default None Attributes ---------- day dayofweek day_of_week dayofyear day_of_year days_in_month daysinmonth end_time freq freqstr hour is_leap_year minute month quarter qyear second start_time week weekday weekofyear year Methods ------- asfreq strftime to_timestamp See Also -------- Index : The base pandas Index type. Period : Represents a period of time. DatetimeIndex : Index with datetime64 data. TimedeltaIndex : Index of timedelta64 data. period_range : Create a fixed-frequency PeriodIndex. Examples -------- >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) >>> idx PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]') Can we compare values of the given dtype to our own? If label is a string or a datetime, cast it to Period.ordinal according to resolution. Parameters ---------- label : object side : {'left', 'right'} kind : {'loc', 'getitem'}, or None Returns ------- bound : Period or object Notes ----- Value of `side` parameter should be validated in caller. Convert timedelta-like input to an integer multiple of self.freq Parameters ---------- other : timedelta, np.timedelta64, DateOffset, int, np.ndarray Returns ------- converted : int, np.ndarray[int64] Raises ------ IncompatibleFrequency : if the input cannot be written as a multiple of self.freq. Note IncompatibleFrequency subclasses ValueError. where : array of timestamps mask : np.ndarray[bool] Array of booleans where data is not NA. Get integer location for requested label. Parameters ---------- key : Period, NaT, str, or datetime String or datetime key must be parsable as Period. Returns ------- loc : int or ndarray[int64] Raises ------ KeyError Key is not present in the index. TypeError If key is listlike or otherwise not hashable. Returns True if this PeriodIndex is range-like in that all Periods between start and end are present, in order. Return a fixed frequency PeriodIndex. The day (calendar) is the default frequency. Parameters ---------- start : str or period-like, default None Left bound for generating periods. end : str or period-like, default None Right bound for generating periods. periods : int, default None Number of periods to generate. freq : str or DateOffset, optional Frequency alias. By default the freq is taken from `start` or `end` if those are Period objects. Otherwise, the default is ``"D"`` for daily frequency. name : str, default None Name of the resulting PeriodIndex. Returns ------- PeriodIndex Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the ``period_range`` constructor. >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]') --- Period index sketch GH13277 for unpickling -------------------------------------------------------------------- methods that dispatch to array and wrap result in Index These are defined here instead of via inherit_names for mypy https://github.com/python/mypy/issues/1362 error: Decorated property not supported type:ignore[misc] https://github.com/python/mypy/issues/1362 error: Decorated property not supported type:ignore[misc] https://github.com/python/mypy/issues/1362 error: Decorated property not supported type:ignore[misc] ------------------------------------------------------------------------ Index Constructors range-based. PeriodArray._generate range does validation that fields is empty when really using the range-based constructor. PeriodIndex allow PeriodIndex(period_index, freq=different) Let's not encourage that kind of behavior in PeriodArray. TODO: We can do some of these with no-copy / coercion? e.g. D -> 2D seems to be OK we strangely ignore `ordinal` if data is passed. don't pass copy here, since we copy later. ------------------------------------------------------------------------ Data _check_timedeltalike_freq_compat will raise if incompatible integer is passed to .shift via _add_datetimelike_methods basically but ufunc may pass integer to _add_delta raise when input doesn't have freq ------------------------------------------------------------------------ Index Methods GH37982 'how' is index-specific, isn't part of the EA interface. b/c data is represented as ints make sure we can't have ambiguous indexing ------------------------------------------------------------------------ Indexing Methods Returned tolerance must be in dtype/units so that `|self._get_engine_target() - target._engine_target()| <= tolerance` is meaningful. Since PeriodIndex returns int64 for engine_target, we may need to convert timedelta64 tolerance to int64. convert tolerance to i8 A string with invalid format _get_string_slice will handle cases where grp < freqn BusinessDay is a bit strange. It has a *lower* code, but we never parse a string as "BusinessDay" resolution, just Day. GH42247 For the subset of DateOffsets that can be Period freqs, checking these two attributes is sufficient to check equality, and much more performant than `self.freq == key.freq` we cannot construct the Period in particular integer, which Period constructor would cast to string we cannot construct the Period string cannot be parsed as datetime-like TODO: we used to also check for reso in ["day", "hour", "minute", "second"] why is that check not needed?
6,819
en
0.590012
from __future__ import annotations import re from typing import Callable, ClassVar, List, Optional, Pattern, Sequence, Tuple, Union, cast import discord from discord.ext import commands _ID_RE = re.compile(r"([0-9]{15,21})$") _USER_MENTION_RE = re.compile(r"<@!?([0-9]{15,21})>$") _CHAN_MENTION_RE = re.compile(r"<#([0-9]{15,21})>$") _ROLE_MENTION_RE = re.compile(r"<@&([0-9]{15,21})>$") class MessagePredicate(Callable[[discord.Message], bool]): def __init__(self, predicate: Callable[["MessagePredicate", discord.Message], bool]) -> None: self._pred: Callable[["MessagePredicate", discord.Message], bool] = predicate self.result = None def __call__(self, message: discord.Message) -> bool: return self._pred(self, message) @classmethod def same_context( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": if ctx is not None: channel = channel or ctx.channel user = user or ctx.author return cls( lambda self, m: (user is None or user.id == m.author.id) and (channel is None or channel.id == m.channel.id) ) @classmethod def cancelled( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) return cls( lambda self, m: (same_context(m) and m.content.lower() == f"{ctx.prefix}cancel") ) @classmethod def yes_or_no( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False content = m.content.lower() if content in ("yes", "y"): self.result = True elif content in ("no", "n"): self.result = False else: return False return True return cls(predicate) @classmethod def valid_int( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False try: self.result = int(m.content) except ValueError: return False else: return True return cls(predicate) @classmethod def valid_float( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False try: self.result = float(m.content) except ValueError: return False else: return True return cls(predicate) @classmethod def positive( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False try: number = float(m.content) except ValueError: return False else: if number > 0: self.result = number return True else: return False return cls(predicate) @classmethod def valid_role( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) guild = cls._get_guild(ctx, channel, cast(discord.Member, user)) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False role = self._find_role(guild, m.content) if role is None: return False self.result = role return True return cls(predicate) @classmethod def valid_member( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) guild = cls._get_guild(ctx, channel, cast(discord.Member, user)) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False match = _ID_RE.match(m.content) or _USER_MENTION_RE.match(m.content) if match: result = guild.get_member(int(match.group(1))) else: result = guild.get_member_named(m.content) if result is None: return False self.result = result return True return cls(predicate) @classmethod def valid_text_channel( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) guild = cls._get_guild(ctx, channel, cast(discord.Member, user)) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False match = _ID_RE.match(m.content) or _CHAN_MENTION_RE.match(m.content) if match: result = guild.get_channel(int(match.group(1))) else: result = discord.utils.get(guild.text_channels, name=m.content) if not isinstance(result, discord.TextChannel): return False self.result = result return True return cls(predicate) @classmethod def has_role( cls, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) guild = cls._get_guild(ctx, channel, cast(discord.Member, user)) if user is None: if ctx is None: raise TypeError( "One of `user` or `ctx` must be supplied to `MessagePredicate.has_role`." ) user = ctx.author def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False role = self._find_role(guild, m.content) if role is None or role not in user.roles: return False self.result = role return True return cls(predicate) @classmethod def equal_to( cls, value: str, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) return cls(lambda self, m: same_context(m) and m.content == value) @classmethod def lower_equal_to( cls, value: str, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) return cls(lambda self, m: same_context(m) and m.content.lower() == value) @classmethod def less( cls, value: Union[int, float], ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": valid_int = cls.valid_int(ctx, channel, user) valid_float = cls.valid_float(ctx, channel, user) return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) < value) @classmethod def greater( cls, value: Union[int, float], ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": valid_int = cls.valid_int(ctx, channel, user) valid_float = cls.valid_float(ctx, channel, user) return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) > value) @classmethod def length_less( cls, length: int, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) return cls(lambda self, m: same_context(m) and len(m.content) <= length) @classmethod def length_greater( cls, length: int, ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) return cls(lambda self, m: same_context(m) and len(m.content) >= length) @classmethod def contained_in( cls, collection: Sequence[str], ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False try: self.result = collection.index(m.content) except ValueError: return False else: return True return cls(predicate) @classmethod def lower_contained_in( cls, collection: Sequence[str], ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False try: self.result = collection.index(m.content.lower()) except ValueError: return False else: return True return cls(predicate) @classmethod def regex( cls, pattern: Union[Pattern[str], str], ctx: Optional[commands.Context] = None, channel: Optional[discord.TextChannel] = None, user: Optional[discord.abc.User] = None, ) -> "MessagePredicate": same_context = cls.same_context(ctx, channel, user) def predicate(self: MessagePredicate, m: discord.Message) -> bool: if not same_context(m): return False if isinstance(pattern, str): pattern_obj = re.compile(pattern) else: pattern_obj = pattern match = pattern_obj.search(m.content) if match: self.result = match return True return False return cls(predicate) @staticmethod def _find_role(guild: discord.Guild, argument: str) -> Optional[discord.Role]: match = _ID_RE.match(argument) or _ROLE_MENTION_RE.match(argument) if match: result = guild.get_role(int(match.group(1))) else: result = discord.utils.get(guild.roles, name=argument) return result @staticmethod def _get_guild( ctx: commands.Context, channel: discord.TextChannel, user: discord.Member ) -> discord.Guild: if ctx is not None: return ctx.guild elif channel is not None: return channel.guild elif user is not None: return user.guild class ReactionPredicate(Callable[[discord.Reaction, discord.abc.User], bool]): YES_OR_NO_EMOJIS: ClassVar[Tuple[str, str]] = ( "\N{WHITE HEAVY CHECK MARK}", "\N{NEGATIVE SQUARED CROSS MARK}", ) """Tuple[str, str] : A tuple containing the tick emoji and cross emoji, in that order.""" ALPHABET_EMOJIS: ClassVar[List[str]] = [ chr(code) for code in range( ord("\N{REGIONAL INDICATOR SYMBOL LETTER A}"), ord("\N{REGIONAL INDICATOR SYMBOL LETTER Z}") + 1, ) ] """List[str] : A list of all 26 alphabetical letter emojis.""" NUMBER_EMOJIS: ClassVar[List[str]] = [ chr(code) + "\N{COMBINING ENCLOSING KEYCAP}" for code in range(ord("0"), ord("9") + 1) ] """List[str] : A list of all single-digit number emojis, 0 through 9.""" def __init__( self, predicate: Callable[["ReactionPredicate", discord.Reaction, discord.abc.User], bool] ) -> None: self._pred: Callable[ ["ReactionPredicate", discord.Reaction, discord.abc.User], bool ] = predicate self.result = None def __call__(self, reaction: discord.Reaction, user: discord.abc.User) -> bool: return self._pred(self, reaction, user) # noinspection PyUnusedLocal @classmethod def same_context( cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None ) -> "ReactionPredicate": # noinspection PyProtectedMember me_id = message._state.self_id return cls( lambda self, r, u: u.id != me_id and (message is None or r.message.id == message.id) and (user is None or u.id == user.id) ) @classmethod def with_emojis( cls, emojis: Sequence[Union[str, discord.Emoji, discord.PartialEmoji]], message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None, ) -> "ReactionPredicate": same_context = cls.same_context(message, user) def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User): if not same_context(r, u): return False try: self.result = emojis.index(r.emoji) except ValueError: return False else: return True return cls(predicate) @classmethod def yes_or_no( cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None ) -> "ReactionPredicate": same_context = cls.same_context(message, user) def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User) -> bool: if not same_context(r, u): return False try: self.result = not bool(self.YES_OR_NO_EMOJIS.index(r.emoji)) except ValueError: return False else: return True return cls(predicate)
bot/utils/messagepredicate.py
16,371
noinspection PyUnusedLocal noinspection PyProtectedMember
57
en
0.16882
# pylint: disable=g-bad-file-header # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Extremely random forest graph builder. go/brain-tree.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import random from tensorflow.contrib.tensor_forest.python import constants from tensorflow.contrib.tensor_forest.python.ops import inference_ops from tensorflow.contrib.tensor_forest.python.ops import training_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging # A convenience class for holding random forest hyperparameters. # # To just get some good default parameters, use: # hparams = ForestHParams(num_classes=2, num_features=40).fill() # # Note that num_classes can not be inferred and so must always be specified. # Also, either num_splits_to_consider or num_features should be set. # # To override specific values, pass them to the constructor: # hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill() # # TODO(thomaswc): Inherit from tf.HParams when that is publicly available. class ForestHParams(object): """A base class for holding hyperparameters and calculating good defaults.""" def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0, max_depth=0, num_splits_to_consider=0, feature_bagging_fraction=1.0, max_fertile_nodes=0, split_after_samples=250, min_split_samples=5, valid_leaf_threshold=1, **kwargs): self.num_trees = num_trees self.max_nodes = max_nodes self.bagging_fraction = bagging_fraction self.feature_bagging_fraction = feature_bagging_fraction self.max_depth = max_depth self.num_splits_to_consider = num_splits_to_consider self.max_fertile_nodes = max_fertile_nodes self.split_after_samples = split_after_samples self.min_split_samples = min_split_samples self.valid_leaf_threshold = valid_leaf_threshold for name, value in kwargs.items(): setattr(self, name, value) def values(self): return self.__dict__ def fill(self): """Intelligently sets any non-specific parameters.""" # Fail fast if num_classes or num_features isn't set. _ = getattr(self, 'num_classes') _ = getattr(self, 'num_features') self.bagged_num_features = int(self.feature_bagging_fraction * self.num_features) self.bagged_features = None if self.feature_bagging_fraction < 1.0: self.bagged_features = [random.sample( range(self.num_features), self.bagged_num_features) for _ in range(self.num_trees)] self.regression = getattr(self, 'regression', False) # Num_outputs is the actual number of outputs (a single prediction for # classification, a N-dimenensional point for regression). self.num_outputs = self.num_classes if self.regression else 1 # Add an extra column to classes for storing counts, which is needed for # regression and avoids having to recompute sums for classification. self.num_output_columns = self.num_classes + 1 # Allow each tree to be unbalanced by up to a factor of 2. self.max_depth = (self.max_depth or int(2 * math.ceil(math.log(self.max_nodes, 2)))) # The Random Forest literature recommends sqrt(# features) for # classification problems, and p/3 for regression problems. # TODO(thomaswc): Consider capping this for large number of features. self.num_splits_to_consider = ( self.num_splits_to_consider or max(10, int(math.ceil(math.sqrt(self.num_features))))) # max_fertile_nodes doesn't effect performance, only training speed. # We therefore set it primarily based upon space considerations. # Each fertile node takes up num_splits_to_consider times as much # as space as a non-fertile node. We want the fertile nodes to in # total only take up as much space as the non-fertile nodes, so num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider)) # But always use at least 1000 accumulate slots. num_fertile = max(num_fertile, 1000) self.max_fertile_nodes = self.max_fertile_nodes or num_fertile # But it also never needs to be larger than the number of leaves, # which is max_nodes / 2. self.max_fertile_nodes = min(self.max_fertile_nodes, int(math.ceil(self.max_nodes / 2.0))) # We have num_splits_to_consider slots to fill, and we want to spend # approximately split_after_samples samples initializing them. num_split_initializiations_per_input = max(1, int(math.floor( self.num_splits_to_consider / self.split_after_samples))) self.split_initializations_per_input = getattr( self, 'split_initializations_per_input', num_split_initializiations_per_input) # If base_random_seed is 0, the current time will be used to seed the # random number generators for each tree. If non-zero, the i-th tree # will be seeded with base_random_seed + i. self.base_random_seed = getattr(self, 'base_random_seed', 0) return self # A simple container to hold the training variables for a single tree. class TreeTrainingVariables(object): """Stores tf.Variables for training a single random tree. Uses tf.get_variable to get tree-specific names so that this can be used with a tf.learn-style implementation (one that trains a model, saves it, then relies on restoring that model to evaluate). """ def __init__(self, params, tree_num, training): self.tree = variable_scope.get_variable( name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32, shape=[params.max_nodes, 2], initializer=init_ops.constant_initializer(-2)) self.tree_thresholds = variable_scope.get_variable( name=self.get_tree_name('tree_thresholds', tree_num), shape=[params.max_nodes], initializer=init_ops.constant_initializer(-1.0)) self.tree_depths = variable_scope.get_variable( name=self.get_tree_name('tree_depths', tree_num), shape=[params.max_nodes], dtype=dtypes.int32, initializer=init_ops.constant_initializer(1)) self.end_of_tree = variable_scope.get_variable( name=self.get_tree_name('end_of_tree', tree_num), dtype=dtypes.int32, initializer=constant_op.constant([1])) self.start_epoch = tf_variables.Variable( [0] * (params.max_nodes), name='start_epoch') if training: self.node_to_accumulator_map = variable_scope.get_variable( name=self.get_tree_name('node_to_accumulator_map', tree_num), shape=[params.max_nodes], dtype=dtypes.int32, initializer=init_ops.constant_initializer(-1)) self.candidate_split_features = variable_scope.get_variable( name=self.get_tree_name('candidate_split_features', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider], dtype=dtypes.int32, initializer=init_ops.constant_initializer(-1)) self.candidate_split_thresholds = variable_scope.get_variable( name=self.get_tree_name('candidate_split_thresholds', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider], initializer=init_ops.constant_initializer(0.0)) # Statistics shared by classification and regression. self.node_sums = variable_scope.get_variable( name=self.get_tree_name('node_sums', tree_num), shape=[params.max_nodes, params.num_output_columns], initializer=init_ops.constant_initializer(0.0)) if training: self.candidate_split_sums = variable_scope.get_variable( name=self.get_tree_name('candidate_split_sums', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider, params.num_output_columns], initializer=init_ops.constant_initializer(0.0)) self.accumulator_sums = variable_scope.get_variable( name=self.get_tree_name('accumulator_sums', tree_num), shape=[params.max_fertile_nodes, params.num_output_columns], initializer=init_ops.constant_initializer(-1.0)) # Regression also tracks second order stats. if params.regression: self.node_squares = variable_scope.get_variable( name=self.get_tree_name('node_squares', tree_num), shape=[params.max_nodes, params.num_output_columns], initializer=init_ops.constant_initializer(0.0)) self.candidate_split_squares = variable_scope.get_variable( name=self.get_tree_name('candidate_split_squares', tree_num), shape=[params.max_fertile_nodes, params.num_splits_to_consider, params.num_output_columns], initializer=init_ops.constant_initializer(0.0)) self.accumulator_squares = variable_scope.get_variable( name=self.get_tree_name('accumulator_squares', tree_num), shape=[params.max_fertile_nodes, params.num_output_columns], initializer=init_ops.constant_initializer(-1.0)) else: self.node_squares = constant_op.constant( 0.0, name=self.get_tree_name('node_squares', tree_num)) self.candidate_split_squares = constant_op.constant( 0.0, name=self.get_tree_name('candidate_split_squares', tree_num)) self.accumulator_squares = constant_op.constant( 0.0, name=self.get_tree_name('accumulator_squares', tree_num)) def get_tree_name(self, name, num): return '{0}-{1}'.format(name, num) class ForestStats(object): def __init__(self, tree_stats, params): """A simple container for stats about a forest.""" self.tree_stats = tree_stats self.params = params def get_average(self, thing): val = 0.0 for i in range(self.params.num_trees): val += getattr(self.tree_stats[i], thing) return val / self.params.num_trees class TreeStats(object): def __init__(self, num_nodes, num_leaves): self.num_nodes = num_nodes self.num_leaves = num_leaves class ForestTrainingVariables(object): """A container for a forests training data, consisting of multiple trees. Instantiates a TreeTrainingVariables object for each tree. We override the __getitem__ and __setitem__ function so that usage looks like this: forest_variables = ForestTrainingVariables(params) ... forest_variables.tree ... """ def __init__(self, params, device_assigner, training=True, tree_variables_class=TreeTrainingVariables): self.variables = [] for i in range(params.num_trees): with ops.device(device_assigner.get_device(i)): self.variables.append(tree_variables_class(params, i, training)) def __setitem__(self, t, val): self.variables[t] = val def __getitem__(self, t): return self.variables[t] class RandomForestDeviceAssigner(object): """A device assigner that uses the default device. Write subclasses that implement get_device for control over how trees get assigned to devices. This assumes that whole trees are assigned to a device. """ def __init__(self): self.cached = None def get_device(self, unused_tree_num): if not self.cached: dummy = constant_op.constant(0) self.cached = dummy.device return self.cached class RandomForestGraphs(object): """Builds TF graphs for random forest training and inference.""" def __init__(self, params, device_assigner=None, variables=None, tree_variables_class=TreeTrainingVariables, tree_graphs=None, training=True, t_ops=training_ops, i_ops=inference_ops): self.params = params self.device_assigner = device_assigner or RandomForestDeviceAssigner() logging.info('Constructing forest with params = ') logging.info(self.params.__dict__) self.variables = variables or ForestTrainingVariables( self.params, device_assigner=self.device_assigner, training=training, tree_variables_class=tree_variables_class) tree_graph_class = tree_graphs or RandomTreeGraphs self.trees = [ tree_graph_class( self.variables[i], self.params, t_ops.Load(), i_ops.Load(), i) for i in range(self.params.num_trees)] def _bag_features(self, tree_num, input_data): split_data = array_ops.split(1, self.params.num_features, input_data) return array_ops.concat( 1, [split_data[ind] for ind in self.params.bagged_features[tree_num]]) def training_graph(self, input_data, input_labels, data_spec=None, epoch=None, **tree_kwargs): """Constructs a TF graph for training a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. data_spec: A list of tf.dtype values specifying the original types of each column. epoch: A tensor or placeholder for the epoch the training data comes from. **tree_kwargs: Keyword arguments passed to each tree's training_graph. Returns: The last op in the random forest training graph. """ data_spec = ([constants.DATA_FLOAT] * self.params.num_features if data_spec is None else data_spec) tree_graphs = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): seed = self.params.base_random_seed if seed != 0: seed += i # If using bagging, randomly select some of the input. tree_data = input_data tree_labels = input_labels if self.params.bagging_fraction < 1.0: # TODO(thomaswc): This does sampling without replacment. Consider # also allowing sampling with replacement as an option. batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1]) r = random_ops.random_uniform(batch_size, seed=seed) mask = math_ops.less( r, array_ops.ones_like(r) * self.params.bagging_fraction) gather_indices = array_ops.squeeze( array_ops.where(mask), squeeze_dims=[1]) # TODO(thomaswc): Calculate out-of-bag data and labels, and store # them for use in calculating statistics later. tree_data = array_ops.gather(input_data, gather_indices) tree_labels = array_ops.gather(input_labels, gather_indices) if self.params.bagged_features: tree_data = self._bag_features(i, tree_data) initialization = self.trees[i].tree_initialization() with ops.control_dependencies([initialization]): tree_graphs.append( self.trees[i].training_graph( tree_data, tree_labels, seed, data_spec=data_spec, epoch=([0] if epoch is None else epoch), **tree_kwargs)) return control_flow_ops.group(*tree_graphs) def inference_graph(self, input_data, data_spec=None): """Constructs a TF graph for evaluating a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. data_spec: A list of tf.dtype values specifying the original types of each column. Returns: The last op in the random forest inference graph. """ data_spec = ([constants.DATA_FLOAT] * self.params.num_features if data_spec is None else data_spec) probabilities = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): tree_data = input_data if self.params.bagged_features: tree_data = self._bag_features(i, input_data) probabilities.append(self.trees[i].inference_graph(tree_data, data_spec)) with ops.device(self.device_assigner.get_device(0)): all_predict = array_ops.pack(probabilities) return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees def average_size(self): """Constructs a TF graph for evaluating the average size of a forest. Returns: The average number of nodes over the trees. """ sizes = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): sizes.append(self.trees[i].size()) return math_ops.reduce_mean(array_ops.pack(sizes)) def training_loss(self): return math_ops.neg(self.average_size()) # pylint: disable=unused-argument def validation_loss(self, features, labels): return math_ops.neg(self.average_size()) def average_impurity(self): """Constructs a TF graph for evaluating the leaf impurity of a forest. Returns: The last op in the graph. """ impurities = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): impurities.append(self.trees[i].average_impurity()) return math_ops.reduce_mean(array_ops.pack(impurities)) def get_stats(self, session): tree_stats = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): tree_stats.append(self.trees[i].get_stats(session)) return ForestStats(tree_stats, self.params) class RandomTreeGraphs(object): """Builds TF graphs for random tree training and inference.""" def __init__(self, variables, params, t_ops, i_ops, tree_num): self.training_ops = t_ops self.inference_ops = i_ops self.variables = variables self.params = params self.tree_num = tree_num def tree_initialization(self): def _init_tree(): return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op def _nothing(): return control_flow_ops.no_op() return control_flow_ops.cond( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [1, 1])), -2), _init_tree, _nothing) def _gini(self, class_counts): """Calculate the Gini impurity. If c(i) denotes the i-th class count and c = sum_i c(i) then score = 1 - sum_i ( c(i) / c )^2 Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return 1.0 - sum_squares / (sums * sums) def _weighted_gini(self, class_counts): """Our split score is the Gini impurity times the number of examples. If c(i) denotes the i-th class count and c = sum_i c(i) then score = c * (1 - sum_i ( c(i) / c )^2 ) = c - sum_i c(i)^2 / c Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. """ smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1]) sums = math_ops.reduce_sum(smoothed, 1) sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1) return sums - sum_squares / sums def _variance(self, sums, squares): """Calculate the variance for each row of the input tensors. Variance is V = E[x^2] - (E[x])^2. Args: sums: A tensor containing output sums, usually a slice from variables.node_sums. Should contain the number of examples seen in index 0 so we can calculate expected value. squares: Same as sums, but sums of squares. Returns: A 1-D tensor of the variances for each row in the input. """ total_count = array_ops.slice(sums, [0, 0], [-1, 1]) e_x = sums / total_count e_x2 = squares / total_count return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1) def training_graph(self, input_data, input_labels, random_seed, data_spec, epoch=None): """Constructs a TF graph for training a random tree. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. random_seed: The random number generator seed to use for this tree. 0 means use the current time as the seed. data_spec: A list of tf.dtype values specifying the original types of each column. epoch: A tensor or placeholder for the epoch the training data comes from. Returns: The last op in the random tree training graph. """ epoch = [0] if epoch is None else epoch sparse_indices = [] sparse_values = [] sparse_shape = [] if isinstance(input_data, ops.SparseTensor): sparse_indices = input_data.indices sparse_values = input_data.values sparse_shape = input_data.shape input_data = [] # Count extremely random stats. (node_sums, node_squares, splits_indices, splits_sums, splits_squares, totals_indices, totals_sums, totals_squares, input_leaves) = ( self.training_ops.count_extremely_random_stats( input_data, sparse_indices, sparse_values, sparse_shape, data_spec, input_labels, self.variables.tree, self.variables.tree_thresholds, self.variables.node_to_accumulator_map, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, self.variables.start_epoch, epoch, num_classes=self.params.num_output_columns, regression=self.params.regression)) node_update_ops = [] node_update_ops.append( state_ops.assign_add(self.variables.node_sums, node_sums)) splits_update_ops = [] splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.candidate_split_sums, splits_indices, splits_sums)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.accumulator_sums, totals_indices, totals_sums)) if self.params.regression: node_update_ops.append(state_ops.assign_add(self.variables.node_squares, node_squares)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.candidate_split_squares, splits_indices, splits_squares)) splits_update_ops.append(self.training_ops.scatter_add_ndim( self.variables.accumulator_squares, totals_indices, totals_squares)) # Sample inputs. update_indices, feature_updates, threshold_updates = ( self.training_ops.sample_inputs( input_data, sparse_indices, sparse_values, sparse_shape, self.variables.node_to_accumulator_map, input_leaves, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, split_initializations_per_input=( self.params.split_initializations_per_input), split_sampling_random_seed=random_seed)) update_features_op = state_ops.scatter_update( self.variables.candidate_split_features, update_indices, feature_updates) update_thresholds_op = state_ops.scatter_update( self.variables.candidate_split_thresholds, update_indices, threshold_updates) # Calculate finished nodes. with ops.control_dependencies(splits_update_ops): children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) finished, stale = self.training_ops.finished_nodes( leaves, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, self.variables.start_epoch, epoch, num_split_after_samples=self.params.split_after_samples, min_split_samples=self.params.min_split_samples) # Update leaf scores. non_fertile_leaves = array_ops.boolean_mask( leaves, math_ops.less(array_ops.gather( self.variables.node_to_accumulator_map, leaves), 0)) # TODO(gilberth): It should be possible to limit the number of non # fertile leaves we calculate scores for, especially since we can only take # at most array_ops.shape(finished)[0] of them. with ops.control_dependencies(node_update_ops): sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves) if self.params.regression: squares = array_ops.gather(self.variables.node_squares, non_fertile_leaves) non_fertile_leaf_scores = self._variance(sums, squares) else: non_fertile_leaf_scores = self._weighted_gini(sums) # Calculate best splits. with ops.control_dependencies(splits_update_ops): split_indices = self.training_ops.best_splits( finished, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, regression=self.params.regression) # Grow tree. with ops.control_dependencies([update_features_op, update_thresholds_op]): (tree_update_indices, tree_children_updates, tree_threshold_updates, tree_depth_updates, new_eot) = ( self.training_ops.grow_tree( self.variables.end_of_tree, self.variables.tree_depths, self.variables.node_to_accumulator_map, finished, split_indices, self.variables.candidate_split_features, self.variables.candidate_split_thresholds)) tree_update_op = state_ops.scatter_update( self.variables.tree, tree_update_indices, tree_children_updates) thresholds_update_op = state_ops.scatter_update( self.variables.tree_thresholds, tree_update_indices, tree_threshold_updates) depth_update_op = state_ops.scatter_update( self.variables.tree_depths, tree_update_indices, tree_depth_updates) # TODO(thomaswc): Only update the epoch on the new leaves. new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates) epoch_update_op = state_ops.scatter_update( self.variables.start_epoch, tree_update_indices, new_epoch_updates) # Update fertile slots. with ops.control_dependencies([depth_update_op]): (node_map_updates, accumulators_cleared, accumulators_allocated) = ( self.training_ops.update_fertile_slots( finished, non_fertile_leaves, non_fertile_leaf_scores, self.variables.end_of_tree, self.variables.tree_depths, self.variables.accumulator_sums, self.variables.node_to_accumulator_map, stale, max_depth=self.params.max_depth, regression=self.params.regression)) # Ensure end_of_tree doesn't get updated until UpdateFertileSlots has # used it to calculate new leaves. gated_new_eot, = control_flow_ops.tuple([new_eot], control_inputs=[node_map_updates]) eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot) updates = [] updates.append(eot_update_op) updates.append(tree_update_op) updates.append(thresholds_update_op) updates.append(epoch_update_op) updates.append(state_ops.scatter_update( self.variables.node_to_accumulator_map, array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]), squeeze_dims=[0]), array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]), squeeze_dims=[0]))) cleared_and_allocated_accumulators = array_ops.concat( 0, [accumulators_cleared, accumulators_allocated]) # Calculate values to put into scatter update for candidate counts. # Candidate split counts are always reset back to 0 for both cleared # and allocated accumulators. This means some accumulators might be doubly # reset to 0 if the were released and not allocated, then later allocated. split_values = array_ops.tile( array_ops.expand_dims(array_ops.expand_dims( array_ops.zeros_like(cleared_and_allocated_accumulators, dtype=dtypes.float32), 1), 2), [1, self.params.num_splits_to_consider, self.params.num_output_columns]) updates.append(state_ops.scatter_update( self.variables.candidate_split_sums, cleared_and_allocated_accumulators, split_values)) if self.params.regression: updates.append(state_ops.scatter_update( self.variables.candidate_split_squares, cleared_and_allocated_accumulators, split_values)) # Calculate values to put into scatter update for total counts. total_cleared = array_ops.tile( array_ops.expand_dims( math_ops.neg(array_ops.ones_like(accumulators_cleared, dtype=dtypes.float32)), 1), [1, self.params.num_output_columns]) total_reset = array_ops.tile( array_ops.expand_dims( array_ops.zeros_like(accumulators_allocated, dtype=dtypes.float32), 1), [1, self.params.num_output_columns]) accumulator_updates = array_ops.concat(0, [total_cleared, total_reset]) updates.append(state_ops.scatter_update( self.variables.accumulator_sums, cleared_and_allocated_accumulators, accumulator_updates)) if self.params.regression: updates.append(state_ops.scatter_update( self.variables.accumulator_squares, cleared_and_allocated_accumulators, accumulator_updates)) # Calculate values to put into scatter update for candidate splits. split_features_updates = array_ops.tile( array_ops.expand_dims( math_ops.neg(array_ops.ones_like( cleared_and_allocated_accumulators)), 1), [1, self.params.num_splits_to_consider]) updates.append(state_ops.scatter_update( self.variables.candidate_split_features, cleared_and_allocated_accumulators, split_features_updates)) updates += self.finish_iteration() return control_flow_ops.group(*updates) def finish_iteration(self): """Perform any operations that should be done at the end of an iteration. This is mostly useful for subclasses that need to reset variables after an iteration, such as ones that are used to finish nodes. Returns: A list of operations. """ return [] def inference_graph(self, input_data, data_spec): """Constructs a TF graph for evaluating a random tree. Args: input_data: A tensor or SparseTensor or placeholder for input data. data_spec: A list of tf.dtype values specifying the original types of each column. Returns: The last op in the random tree inference graph. """ sparse_indices = [] sparse_values = [] sparse_shape = [] if isinstance(input_data, ops.SparseTensor): sparse_indices = input_data.indices sparse_values = input_data.values sparse_shape = input_data.shape input_data = [] return self.inference_ops.tree_predictions( input_data, sparse_indices, sparse_values, sparse_shape, data_spec, self.variables.tree, self.variables.tree_thresholds, self.variables.node_sums, valid_leaf_threshold=self.params.valid_leaf_threshold) def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big) def size(self): """Constructs a TF graph for evaluating the current number of nodes. Returns: The current number of nodes in the tree. """ return self.variables.end_of_tree - 1 def get_stats(self, session): num_nodes = self.variables.end_of_tree.eval(session=session) - 1 num_leaves = array_ops.where( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE) ).eval(session=session).shape[0] return TreeStats(num_nodes, num_leaves)
tensorflow/contrib/tensor_forest/python/tensor_forest.py
34,959
A base class for holding hyperparameters and calculating good defaults. A container for a forests training data, consisting of multiple trees. Instantiates a TreeTrainingVariables object for each tree. We override the __getitem__ and __setitem__ function so that usage looks like this: forest_variables = ForestTrainingVariables(params) ... forest_variables.tree ... A device assigner that uses the default device. Write subclasses that implement get_device for control over how trees get assigned to devices. This assumes that whole trees are assigned to a device. Builds TF graphs for random forest training and inference. Builds TF graphs for random tree training and inference. Stores tf.Variables for training a single random tree. Uses tf.get_variable to get tree-specific names so that this can be used with a tf.learn-style implementation (one that trains a model, saves it, then relies on restoring that model to evaluate). A simple container for stats about a forest. Calculate the Gini impurity. If c(i) denotes the i-th class count and c = sum_i c(i) then score = 1 - sum_i ( c(i) / c )^2 Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. Calculate the variance for each row of the input tensors. Variance is V = E[x^2] - (E[x])^2. Args: sums: A tensor containing output sums, usually a slice from variables.node_sums. Should contain the number of examples seen in index 0 so we can calculate expected value. squares: Same as sums, but sums of squares. Returns: A 1-D tensor of the variances for each row in the input. Our split score is the Gini impurity times the number of examples. If c(i) denotes the i-th class count and c = sum_i c(i) then score = c * (1 - sum_i ( c(i) / c )^2 ) = c - sum_i c(i)^2 / c Args: class_counts: A 2-D tensor of per-class counts, usually a slice or gather from variables.node_sums. Returns: A 1-D tensor of the Gini impurities for each row in the input. Constructs a TF graph for evaluating the leaf impurity of a forest. Returns: The last op in the graph. Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. Constructs a TF graph for evaluating the average size of a forest. Returns: The average number of nodes over the trees. Intelligently sets any non-specific parameters. Perform any operations that should be done at the end of an iteration. This is mostly useful for subclasses that need to reset variables after an iteration, such as ones that are used to finish nodes. Returns: A list of operations. Constructs a TF graph for evaluating a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. data_spec: A list of tf.dtype values specifying the original types of each column. Returns: The last op in the random forest inference graph. Constructs a TF graph for evaluating a random tree. Args: input_data: A tensor or SparseTensor or placeholder for input data. data_spec: A list of tf.dtype values specifying the original types of each column. Returns: The last op in the random tree inference graph. Constructs a TF graph for evaluating the current number of nodes. Returns: The current number of nodes in the tree. Constructs a TF graph for training a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. data_spec: A list of tf.dtype values specifying the original types of each column. epoch: A tensor or placeholder for the epoch the training data comes from. **tree_kwargs: Keyword arguments passed to each tree's training_graph. Returns: The last op in the random forest training graph. Constructs a TF graph for training a random tree. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. random_seed: The random number generator seed to use for this tree. 0 means use the current time as the seed. data_spec: A list of tf.dtype values specifying the original types of each column. epoch: A tensor or placeholder for the epoch the training data comes from. Returns: The last op in the random tree training graph. Extremely random forest graph builder. go/brain-tree. pylint: disable=g-bad-file-header Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== A convenience class for holding random forest hyperparameters. To just get some good default parameters, use: hparams = ForestHParams(num_classes=2, num_features=40).fill() Note that num_classes can not be inferred and so must always be specified. Also, either num_splits_to_consider or num_features should be set. To override specific values, pass them to the constructor: hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill() TODO(thomaswc): Inherit from tf.HParams when that is publicly available. Fail fast if num_classes or num_features isn't set. Num_outputs is the actual number of outputs (a single prediction for classification, a N-dimenensional point for regression). Add an extra column to classes for storing counts, which is needed for regression and avoids having to recompute sums for classification. Allow each tree to be unbalanced by up to a factor of 2. The Random Forest literature recommends sqrt( features) for classification problems, and p/3 for regression problems. TODO(thomaswc): Consider capping this for large number of features. max_fertile_nodes doesn't effect performance, only training speed. We therefore set it primarily based upon space considerations. Each fertile node takes up num_splits_to_consider times as much as space as a non-fertile node. We want the fertile nodes to in total only take up as much space as the non-fertile nodes, so But always use at least 1000 accumulate slots. But it also never needs to be larger than the number of leaves, which is max_nodes / 2. We have num_splits_to_consider slots to fill, and we want to spend approximately split_after_samples samples initializing them. If base_random_seed is 0, the current time will be used to seed the random number generators for each tree. If non-zero, the i-th tree will be seeded with base_random_seed + i. A simple container to hold the training variables for a single tree. Statistics shared by classification and regression. Regression also tracks second order stats. If using bagging, randomly select some of the input. TODO(thomaswc): This does sampling without replacment. Consider also allowing sampling with replacement as an option. TODO(thomaswc): Calculate out-of-bag data and labels, and store them for use in calculating statistics later. pylint: disable=unused-argument Count extremely random stats. Sample inputs. Calculate finished nodes. Update leaf scores. TODO(gilberth): It should be possible to limit the number of non fertile leaves we calculate scores for, especially since we can only take at most array_ops.shape(finished)[0] of them. Calculate best splits. Grow tree. TODO(thomaswc): Only update the epoch on the new leaves. Update fertile slots. Ensure end_of_tree doesn't get updated until UpdateFertileSlots has used it to calculate new leaves. Calculate values to put into scatter update for candidate counts. Candidate split counts are always reset back to 0 for both cleared and allocated accumulators. This means some accumulators might be doubly reset to 0 if the were released and not allocated, then later allocated. Calculate values to put into scatter update for total counts. Calculate values to put into scatter update for candidate splits. Guard against step 1, when there often are no leaves yet. Since average impurity can be used for loss, when there's no data just return a big number so that loss always decreases.
8,715
en
0.827136
# coding: utf-8 import re import six from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class NeutronCreateFloatingIpRequestBody: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'floatingip': 'CreateFloatingIpOption' } attribute_map = { 'floatingip': 'floatingip' } def __init__(self, floatingip=None): """NeutronCreateFloatingIpRequestBody - a model defined in huaweicloud sdk""" self._floatingip = None self.discriminator = None self.floatingip = floatingip @property def floatingip(self): """Gets the floatingip of this NeutronCreateFloatingIpRequestBody. :return: The floatingip of this NeutronCreateFloatingIpRequestBody. :rtype: CreateFloatingIpOption """ return self._floatingip @floatingip.setter def floatingip(self, floatingip): """Sets the floatingip of this NeutronCreateFloatingIpRequestBody. :param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody. :type: CreateFloatingIpOption """ self._floatingip = floatingip def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): """For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NeutronCreateFloatingIpRequestBody): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py
3,115
Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. Returns true if both objects are equal NeutronCreateFloatingIpRequestBody - a model defined in huaweicloud sdk Returns true if both objects are not equal For `print` Gets the floatingip of this NeutronCreateFloatingIpRequestBody. :return: The floatingip of this NeutronCreateFloatingIpRequestBody. :rtype: CreateFloatingIpOption Sets the floatingip of this NeutronCreateFloatingIpRequestBody. :param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody. :type: CreateFloatingIpOption Returns the model properties as a dict Returns the string representation of the model coding: utf-8
840
en
0.677838
#!/usr/bin/env python # Copyright (c) 2020, Palo Alto Networks # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ log_forwarding_profile.py ========================== Ensure that all security rules have the same log forwarding profile assigned. This script checks if any rules are missing the specified log forwarding profile and applies the profile if it is missing. This is done with as few API calls as possible. Environment variables required: PAN_HOSTNAME: The hostname or IP of the Firewall PAN_USERNAME: The username of a firewall admin PAN_PASSWORD: The password of a firewall admin PAN_LOG_PROFILE: The name of the log forwarding profile to apply """ import os from panos.firewall import Firewall from panos.policies import Rulebase, SecurityRule HOSTNAME = os.environ["PAN_HOSTNAME"] USERNAME = os.environ["PAN_USERNAME"] PASSWORD = os.environ["PAN_PASSWORD"] LOG_PROFILE = os.environ["PAN_LOG_PROFILE"] def main(): # Create a connection to a firewall and a rulebase to work inside fw = Firewall(HOSTNAME, USERNAME, PASSWORD) rulebase = fw.add(Rulebase()) # Fetch all the security rules from the firewall into a list rules = SecurityRule.refreshall(rulebase, add=False) print(f"Checking {len(rules)} rules...") # Iterate over the list and collect names of rules that are # missing the log forwarding profile for rule in rules: if rule.log_setting != LOG_PROFILE: print(f"{rule.name}") rule.log_setting = LOG_PROFILE rule.log_start = 0 rule.log_end = 1 rule.apply() # At this point, we've added SecurityRule objects to the Firewall # for each rule that doesn't have the right log forwarding profile. # Now, trigger a commit # In this case, we'll wait for the commit to finish and trigger an exception # if the commit finished with any errors. print("Starting commit") fw.commit(sync=True, exception=True) print("Commit finished successfully") if __name__ == "__main__": main()
examples/log_forwarding_profile.py
2,725
log_forwarding_profile.py ========================== Ensure that all security rules have the same log forwarding profile assigned. This script checks if any rules are missing the specified log forwarding profile and applies the profile if it is missing. This is done with as few API calls as possible. Environment variables required: PAN_HOSTNAME: The hostname or IP of the Firewall PAN_USERNAME: The username of a firewall admin PAN_PASSWORD: The password of a firewall admin PAN_LOG_PROFILE: The name of the log forwarding profile to apply !/usr/bin/env python Copyright (c) 2020, Palo Alto Networks Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. Create a connection to a firewall and a rulebase to work inside Fetch all the security rules from the firewall into a list Iterate over the list and collect names of rules that are missing the log forwarding profile At this point, we've added SecurityRule objects to the Firewall for each rule that doesn't have the right log forwarding profile. Now, trigger a commit In this case, we'll wait for the commit to finish and trigger an exception if the commit finished with any errors.
1,792
en
0.888289
#!/usr/bin/env python # coding: utf-8 # ## sircat # # Makes a catalog of solar wind stream interaction regions (SIRs) and high speed solar wind streams (HSS) for the Wind, STEREO and MAVEN spacecraft since 2007. # # Authors: [C. Möstl](https://www.iwf.oeaw.ac.at/en/user-site/christian-moestl/) (twitter @chrisoutofspace), A. J. Weiss, R. L. Bailey, IWF Graz, Austria; Lan Jian, NASA, USA, Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China. # # # **current status: work in progress** # # If you want to use parts of this code for generating results for peer-reviewed scientific publications, please contact us per email (christian.moestl@oeaw.ac.at, lan.jian@nasa.gov, maxime.grandin@helsinki.fi) for co-authorships. # # # part of https://github.com/cmoestl/heliocats, last update June 2020 # # --- # # ### Installation # In a command line, do: "git clone https://github.com/cmoestl/heliocats". # # Install a specific conda environment to run this code, see README at https://github.com/cmoestl/heliocats # # Download the files from https://doi.org/10.6084/m9.figshare.11973693 and place them in the /data folder. # # # # ### Updates # # Adding a new SIR event: change the source files, or add the sir and hss times in section 2 before the master file sircat/HELIO4CAST_SIRCAT_v10_master.xlsx is produced. Then delete the file for the respective spacecraft under sircat/indices_sircat, and run this notebook or script. # # Convert this notebook to a script with "jupyter nbconvert --to script sircat.ipynb" in a command line # # --- # # # ### Data sources # # # **PSP SIR list**: Allen et al. 2021: https://www.aanda.org/articles/aa/full_html/2021/06/aa39833-20/aa39833-20.html, list at https://sppgway.jhuapl.edu/event_list # # # **STEREO SIR list**: Lan Jian, https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level3/ # published in: L. K. Jian et al. https://doi.org/10.1007/s11207-019-1416-8, 2019. # # This catalog contains the SIR start and end times, as well as the Pt max time for the stream interface. We use their SIR start and ends time as our *sir_start_time* and *sir_end_time*, and set the *hss_start_time* with the Pt max time. For 4 Pt max times that were nan in the Jian et al. list, the *hss_start_time* has been set similar to the *sir_end_time*. # # **To do**: create our own *hss_end_time* by setting it as the first time when the total bulk speed drops below 450 km/s after *sir_end_time*. Lan: For the STEREO HSS catalog, you can opt to list only the events with the fastest speed reaching at least 500 km/s, to be consistent with Grandin et al. (2019)." # # # **Earth SIR/HSS list**: Maxim Grandin et al., 2018, https://doi.org/10.1029/2018JA026396 # # This catalog directly gives the *hss_start_time* and the *hss_end_time*. This list was determined by an algorithm and there are no specifics about the the SIR times, instead the start time is determined as the start of the increasing speed and is thus is likely closer to an SIR start time than to a stream interface time, which we use as a *hss_start_time*. For simplicity, we have nevertheless taken the given start time as the hss_start_time. # The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were originally given for the magnetopause, but the Wind spacecraft is located at the L1 point. One hour is practically equivalent to the propagation time of a 400 km/s slow solar wind from the L1 point to the magnetopause. # # **To do**: In future updates, we may change hss_start_time to the sir_start_time and add a proper hss_start_time by searching for ptmax after a new sir_start_time. The Grandin et al. (2019) catalogue only contains events for which the solar wind speed reached at least 500 km/s. Lan: "For Grandin et al. (2019), you can use the peak of total pressure to approximate the stream interface time." # # # **MARS SIR/HSS list**: Hui Huang et al., 2019, https://doi.org/10.3847/1538-4357/ab25e9 (open access not available). # This catalog gives the sir_start_time, hss_start_time (=stream interface time) and the sir_end_time. # # **To do**: Similar to the STEREO-list, with have added the hss_end_time. # # # All other parameters are calculated from scratch from the spacecraft data via this notebook or script. # # --- # # ### Other resourcess # # # **Great review on SIRs** by Ian G. Richardson: https://link.springer.com/article/10.1007/s41116-017-0011-z # # # --- # # # # # # # start with importing packages, get paths from config.py file and make directories # In[405]: last_update='2021-July-13' # In[11]: import numpy as np import scipy.io import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.dates import DateFormatter from datetime import timedelta import seaborn as sns import datetime import astropy import astropy.constants as const from sunpy.time import parse_time import time import pickle import sys import os import urllib import json import importlib import pandas as pd import copy import openpyxl import h5py from heliocats import plot as hp importlib.reload(hp) #reload again while debugging from heliocats import data as hd importlib.reload(hd) #reload again while debugging from heliocats import cats as hc importlib.reload(hc) #reload again while debugging from heliocats import stats as hs importlib.reload(hs) #reload again while debugging #where the in situ data files are located is read #from config.py import config importlib.reload(config) from config import data_path from config import data_path_ML ########### make directories first time if not there resdir='results' if os.path.isdir(resdir) == False: os.mkdir(resdir) datadir='data' if os.path.isdir(datadir) == False: os.mkdir(datadir) indexdir='sircat/indices_sircat' if os.path.isdir(indexdir) == False: os.mkdir(indexdir) catdir='sircat' if os.path.isdir(catdir) == False: os.mkdir(catdir) sirplotsdir='sircat/plots_sircat/' if os.path.isdir(sirplotsdir) == False: os.mkdir(sirplotsdir) #Convert this notebook to a script with jupyter nbconvert --to script icmecat.ipynb os.system('jupyter nbconvert --to script sircat.ipynb') #in situ data files are updated via the icmecat.ipynb notebook ## (1) load data # ## (1) load data from STEREO-B, STEREO-A, Wind, PSP, and MAVEN # # In[2]: load_data=1 if load_data > 0: #print('load Ulysses RTN') #made with heliocats.data.save_ulysses_data #fileuly='ulysses_1990_2009_rtn.p' #[uly,huly]=pickle.load(open(data_path+fileuly, "rb" ) ) print('load STEREO-B data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ filestb='stereob_2007_2014_sceq.p' [stb,hstb]=pickle.load(open(data_path+filestb, "rb" ) ) ########### CURRENT ACTIVE SPACECRAFT # ADD BepiColombo # ADD Solar Orbiter print('load MAVEN data MSO') #removed magnetosphere by C. Simon Wedlund, 1 data point per orbit, MSO #filemav='maven_2014_2018.p' #[mav,hmav]=pickle.load(open(filemav, 'rb' ) ) #filemav='maven_2014_2018_removed.p' #[mav,hmav]=pickle.load(open(filemav, 'rb' ) ) filemav='maven_2014_2018_removed_smoothed.p' [mav,hmav]=pickle.load(open(data_path+filemav, 'rb' ) ) #print('load MSL RAD') #MSL RAD #rad=hd.load_msl_rad()#, rad.time,rad.dose_sol ############################################## print('load PSP data SCEQ') #from heliosat, converted to SCEQ similar to STEREO-A/B filepsp='psp_2018_2021_sceq.p' [psp,hpsp]=pickle.load(open(data_path+filepsp, "rb" ) ) ########### STA print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ filesta1='stereoa_2007_2020_sceq.p' sta1=pickle.load(open(data_path+filesta1, "rb" ) ) #beacon data #filesta2="stereoa_2019_2020_sceq_beacon.p" #filesta2='stereoa_2019_2020_sept_sceq_beacon.p' #filesta2='stereoa_2019_now_sceq_beacon.p' #filesta2="stereoa_2020_august_november_sceq_beacon.p" filesta2='stereoa_2020_now_sceq_beacon.p' [sta2,hsta2]=pickle.load(open(data_path+filesta2, "rb" ) ) #cutoff with end of science data sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]] #make array sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)]) #convert to recarray sta = sta.view(np.recarray) sta.time=np.hstack((sta1.time,sta2.time)) sta.bx=np.hstack((sta1.bx,sta2.bx)) sta.by=np.hstack((sta1.by,sta2.by)) sta.bz=np.hstack((sta1.bz,sta2.bz)) sta.bt=np.hstack((sta1.bt,sta2.bt)) sta.vt=np.hstack((sta1.vt,sta2.vt)) sta.np=np.hstack((sta1.np,sta2.np)) sta.tp=np.hstack((sta1.tp,sta2.tp)) sta.x=np.hstack((sta1.x,sta2.x)) sta.y=np.hstack((sta1.y,sta2.y)) sta.z=np.hstack((sta1.z,sta2.z)) sta.r=np.hstack((sta1.r,sta2.r)) sta.lon=np.hstack((sta1.lon,sta2.lon)) sta.lat=np.hstack((sta1.lat,sta2.lat)) print('STA Merging done') ########### Wind print('load and merge Wind data HEEQ') #from HELCATS HEEQ until 2018 1 1 + new self-processed data with heliosat and hd.save_wind_data filewin="wind_2007_2018_heeq_helcats.p" [win1,hwin1]=pickle.load(open(data_path+filewin, "rb" ) ) filewin2="wind_2018_now_heeq.p" [win2,hwin2]=pickle.load(open(data_path+filewin2, "rb" ) ) #function for spike removal, see list with times in that function win2=hd.remove_wind_spikes_gaps(win2) #merge Wind old and new data #cut off HELCATS data at end of 2017, win2 begins exactly after this win1=win1[np.where(win1.time < parse_time('2018-Jan-01 00:00').datetime)[0]] #make array win=np.zeros(np.size(win1.time)+np.size(win2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)]) #convert to recarray win = win.view(np.recarray) win.time=np.hstack((win1.time,win2.time)) win.bx=np.hstack((win1.bx,win2.bx)) win.by=np.hstack((win1.by,win2.by)) win.bz=np.hstack((win1.bz,win2.bz)) win.bt=np.hstack((win1.bt,win2.bt)) win.vt=np.hstack((win1.vt,win2.vt)) win.np=np.hstack((win1.np,win2.np)) win.tp=np.hstack((win1.tp,win2.tp)) win.x=np.hstack((win1.x,win2.x)) win.y=np.hstack((win1.y,win2.y)) win.z=np.hstack((win1.z,win2.z)) win.r=np.hstack((win1.r,win2.r)) win.lon=np.hstack((win1.lon,win2.lon)) win.lat=np.hstack((win1.lat,win2.lat)) print('Wind merging done') print() print() print('time ranges of the in situ data: ') print() print('active spacecraft:') print('Wind ',str(win.time[0])[0:10],str(win.time[-1])[0:10]) print('STEREO-A ',str(sta.time[0])[0:10],str(sta.time[-1])[0:10]) print('Parker Solar Probe ',str(psp.time[0])[0:10],str(psp.time[-1])[0:10]) print('MAVEN ',str(mav.time[0])[0:10],str(mav.time[-1])[0:10]) #print('MSL/RAD ',str(rad.time[0])[0:10],str(rad.time[-1])[0:10]) print() print('missions finished:') #print('VEX ',str(vex.time[0])[0:10],str(vex.time[-1])[0:10]) #print('MESSENGER ',str(mes.time[0])[0:10],str(mes.time[-1])[0:10]) print('STEREO-B ',str(stb.time[0])[0:10],str(stb.time[-1])[0:10]) #print('Ulysses ',str(uly.time[0])[0:10],str(uly.time[-1])[0:10]) print() # print('catalogs:') # print() # print('HELCATS HIGeoCAT ',str(higeocat_time[0])[0:10],str(higeocat_time[-1])[0:10]) print('done') # ## (2) make SIRCAT masterfile from STEREO and Wind catalogs # Here we read raw STEREO SIR and Earth SIR catalogs from Robert Allen, Lan Jian, Maxim Grandin, and Hui Huang et al. and convert to master catalog xlsx file that contains all times in a consistent way. # In[302]: #make list for all basic times, ids etc. for master file rows_list = [] def convert_time(p_time): #from Allen catalog format to datetime object p_time_obj=[] for i in np.arange(0,len(p_time)): p_str=p_time[i][0:10]+'T'+p_time[i][11:16]+'Z' p_time_obj.append(parse_time(p_str).datetime) #print(p_time_obj[i]) #dates with year 1 set to nan: if mdates.date2num(p_time_obj[i])< 10: p_time_obj[i]=np.nan return p_time_obj #read all Allen catalogs psp_sir_file='sircat/sources/SIR_CIR_List_PSP.csv' psp_l1_sir_file='sircat/sources/SIR_CIR_List_L1_corr_PSP.csv' psp_sta_sir_file='sircat/sources/SIR_CIR_List_STA_corr_PSP.csv' #psp p_raw=pd.read_csv(psp_sir_file, header=49) #wind pw_raw=pd.read_csv(psp_l1_sir_file, header=51) #sta pa_raw=pd.read_csv(psp_sta_sir_file, header=51) print(p_raw.keys()) print() ################################# ############ PSP print() p_raw['Start time']=convert_time(p_raw['Start time']) p_raw['End time']=convert_time(p_raw['End time']) p_raw['Time of max P']=convert_time(p_raw['Time of max P']) #print(p_raw['Start time']) #print(p_raw['End time']) #print(p_raw['Time of max P']) for i in np.arange(0,len(p_raw)): #make id for event id_time=parse_time(p_raw['Start time'][i]).isot sc_idstring='SIR_PSP_ALLEN_' sc_string='PSP' sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' #put all data for this event in a list list1 = [sircat_id,sc_string,np.nan,parse_time(p_raw['Start time'][i]).isot, np.nan, parse_time(p_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list1) #append to full list rows_list.append(list1) print(rows_list[1]) ############ Wind print() pw_raw['Start time']=convert_time(pw_raw['Start time']) pw_raw['End time']=convert_time(pw_raw['End time']) pw_raw['Time of max P']=convert_time(pw_raw['Time of max P']) #print(pw_raw['Start time']) #print(pw_raw['End time']) #print(pw_raw['Time of max P']) for i in np.arange(0,len(pw_raw)): #make id for event id_time=parse_time(pw_raw['Start time'][i]).isot sc_idstring='SIR_WIND_ALLEN_' sc_string='Wind' sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' #put all data for this event in a list list2 = [sircat_id,sc_string,np.nan,parse_time(pw_raw['Start time'][i]).isot, np.nan, parse_time(pw_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list1) #append to full list rows_list.append(list2) print(rows_list[-1]) ############STA print() pa_raw['Start time']=convert_time(pa_raw['Start time']) pa_raw['End time']=convert_time(pa_raw['End time']) pa_raw['Time of max P']=convert_time(pa_raw['Time of max P']) #print(pa_raw['Start time']) #print(pa_raw['End time']) #print(pa_raw['Time of max P']) for i in np.arange(0,len(pa_raw)): #make id for event id_time=parse_time(pa_raw['Start time'][i]).isot sc_idstring='SIR_STEREO_A_ALLEN_' sc_string='STEREO-A' sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' #put all data for this event in a list list3 = [sircat_id,sc_string,np.nan,parse_time(pa_raw['Start time'][i]).isot, np.nan, parse_time(pa_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list1) #append to full list rows_list.append(list3) print(rows_list[-1]) # #pw_raw['Start time'] #ptime=parse_time(p_raw['Start time']).datetime ###################### read raw STEREO SIR catalog file='sircat/sources/STEREO_Level3_SIR_data.xlsx' print('load Jian STEREO catalog from excel file:', file) sraw=pd.read_excel(file) #get 2 times: HSS start (equivalent to SIR start as defined in the L. Jian catalog), HSS end (where speed again < 450km/s) print('Events in STEREO SIR cat:', sraw.shape[0]) print() sc=sraw.loc[:,'spacecraft'] year_start=sraw.loc[:,'year_start'] stime=sraw.loc[:,'start_time'] year_end=sraw.loc[:,'year_end'] etime=sraw.loc[:,'end_time'] year_pt=sraw.loc[:,'year_pt'] ptime=sraw.loc[:,'pt_time'] for i in np.arange(0,sraw.shape[0]): s=stime[i] y=year_start[i] doy=int(s[0:3]) hour=int(s[-5:-3]) minute=int(s[-2:]) #print(y,doy,hour, min) sir_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute) e=etime[i] y=year_end[i] doy=int(e[0:3]) hour=int(e[-5:-3]) minute=int(e[-2:]) #print(y,doy,hour, min) sir_end_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute) #print(i) p=ptime[i] #print(ptime[i]) y=year_pt[i] doy=int(p[0:3]) hour=int(p[-5:-3]) minute=int(p[-2:]) #print(y,doy,hour, min) hss_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute) #make id for event id_time=parse_time(hss_start_time).isot if sc[i]=='A': sc_idstring='SIR_STEREO_A_JIAN_' if sc[i]=='B': sc_idstring='SIR_STEREO_B_JIAN_' if sc[i]=='A': sc_string='STEREO-A' if sc[i]=='B': sc_string='STEREO-B' sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' #put all data for this event in a list list4 = [sircat_id,sc_string,parse_time(sir_start_time).isot,parse_time(hss_start_time).isot, parse_time(sir_end_time).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list1) #append to full list rows_list.append(list4) ########################## read raw Wind catalog #Grandin et al. 2018 - OMNI #removed 2 SIRs due to data gap of Wind in oct 2014 filewin='sircat/sources/grandin_2018_list_modified.txt' wraw=np.loadtxt(filewin,skiprows=9) print('load Grandin Earth HSS catalog from:', filewin) print('Events in Wind SIR/HSS cat:', wraw.shape[0]) print() #2 times: SIR/HSS start, HSS end (where speed again < 450km/s) #begin with 2007 begin2007=np.where(wraw[:,1]>=2007)[0][0] for i in np.arange(begin2007,len(wraw),1): #SIR HSS start time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause wstart=datetime.datetime(wraw[i,1].astype(int),wraw[i,2].astype(int), wraw[i,3].astype(int),wraw[i,4].astype(int), 0)-datetime.timedelta(hours=1) #SIR HSS end time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause wend=datetime.datetime(wraw[i,11].astype(int),wraw[i,12].astype(int), wraw[i,13].astype(int),wraw[i,14].astype(int), 0)-datetime.timedelta(hours=1) sc_idstring='SIR_WIND_GRANDIN_' id_time=parse_time(wstart).isot sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' sc_string='Wind' list5 = [sircat_id,sc_string,np.nan,parse_time(wstart).isot,np.nan,parse_time(wend).isot,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list2) rows_list.append(list5) ########################## read MAVEN catalog from heliocats import data as hd importlib.reload(hd) #reload again while debugging #this is a recarray mavsir_all=hd.load_maven_sir_huang() #check which events overlap with the available MAVEN data mavsir_ind=np.where(mavsir_all.start < mav.time[-1])[0] mavsir=mavsir_all[mavsir_ind] print('Events in MAVEN SIR/HSS cat:', mavsir.shape[0]) print() #go through all events for i in mavsir_ind: sc_idstring='SIR_MAVEN_HUANG_' id_time=parse_time(mavsir.start[i][0]).isot sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01' sc_string='MAVEN' list6 = [sircat_id,sc_string,parse_time(mavsir.start[i][0]).isot,parse_time(mavsir.si[i][0]).isot,parse_time(mavsir.end[i][0]).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan] #print(list3) rows_list.append(list6) ################################### add new events **** to be done #for measuring new events use this function from heliocats.plot #plt.close('all') #works in jupyter notebooks #works in scripts #matplotlib.use('qt5agg') #plt.ion() #STEREO-A #hp.plot_insitu_measure(sta, '2018-Jan-01 12:00','2018-Feb-01 12:00', 'STEREO-A', 'results/') #Wind #hp.plot_insitu_measure(win, '2019-Jan-29','2019-Feb-28', 'Wind', 'results/') ################ make pandas data frame for master file parameters =['sircat_id','sc_insitu','sir_start_time','hss_start_time','sir_end_time', 'hss_end_time','hss_vtmax_time','sc_heliodistance', 'sc_long_heeq', 'sc_lat_heeq', 'hss_vtmax','hss_vtmean','hss_vtstd','hss_btmax','hss_btmean',\ 'hss_btstd','hss_bzmin', 'hss_bzmean','hss_bzstd','hss_duration',\ 'sir_vtmax','sir_vtmean', 'sir_vtstd','sir_btmax','sir_btmean',\ 'sir_btstd','sir_bzmin', 'sir_bzmean','sir_bzstd','sir_duration'] master=pd.DataFrame(rows_list,columns=parameters) #sort by spacecraft indicator and start time master=master.sort_values(['sc_insitu','hss_start_time']) master = master.reset_index(drop=True) #drop extra index value master #save master file as Excel file='sircat/HELIO4CAST_SIRCAT_v10_master.xlsx' master.to_excel(file,sheet_name='SIRCATv1.0') print() print('SIRCAT master saved as '+file) print('total events', master.shape[0]) print('done') # ## (3) make SIRCAT # In[418]: from heliocats import cats as hc importlib.reload(hc) #reload again while debugging from heliocats import plot as hp importlib.reload(hp) #reload again while debugging #load master file scat=hc.load_helio4cast_sircat_master_from_excel('sircat/HELIO4CAST_SIRCAT_v10_master.xlsx') scat ####### 3a get indices for all spacecraft wini=np.where(scat.sc_insitu == 'Wind')[:][0] pspi=np.where(scat.sc_insitu == 'PSP')[:][0] stai=np.where(scat.sc_insitu == 'STEREO-A')[:][0] stbi=np.where(scat.sc_insitu == 'STEREO-B')[:][0] mavi=np.where(scat.sc_insitu == 'MAVEN')[:][0] print('done') ####### 3b get parameters for all spacecraft one after another # remove indices if the events in the master file have changed #os.system('rm sircat/indices_sircat/SIRCAT_indices_Wind.p') #os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-A.p') #os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-B.p') #os.system('rm sircat/indices_sircat/SIRCAT_indices_MAVEN.p') #os.system('rm sircat/indices_sircat/SIRCAT_indices_PSP.p') #hss times scat=hc.get_sircat_parameters(psp,pspi,scat,'PSP') scat=hc.get_sircat_parameters(win,wini,scat,'Wind') #sir times scat=hc.get_sircat_parameters(mav,mavi,scat,'MAVEN') scat=hc.get_sircat_parameters(stb,stbi,scat,'STEREO-B') #both allen and jian cats scat=hc.get_sircat_parameters(sta,stai,scat,'STEREO-A') # ###### 3c make all plots if wanted #matplotlib.use('Agg') #hp.plot_sircat_events(sta,stai,scat,'STEREO-A',sirplotsdir) #hp.plot_sircat_events(stb,stbi,scat,'STEREO-B',sirplotsdir) #hp.plot_sircat_events(win,wini,scat,'Wind',sirplotsdir) #hp.plot_sircat_events(mav,mavi,scat,'MAVEN',sirplotsdir) print('done') #kick out MAVEN events without data ############### sort SIRCAt by date scat = scat.sort_values(by='hss_start_time',ascending=False) scat = ic.reset_index(drop=True) # ### (4) save SIRCAT # ### 4a save header # In[410]: #save header and parameters as text file and prepare for html website header='SIR CATALOGUE v1.0 \n\nThis is the HELIO4CAST stream interaction region (SIR) and high speed stream (HSS) catalog,\nbased on in situ magnetic field and bulk plasma observations in the heliosphere. \nIt is a merged catalog created from individual ones made by Robert Allen et al., Lan Jian et al., Maxim Grandin et al. and Hui Huang et al. (see references).\n\nThis is version 1.0, released 2020-06-10, updated '+last_update+' doi: 10.6084/m9.figshare.12416906 \n\nThe catalog is available as python pandas dataframe (pickle), json, csv, xlsx, txt, html at \nhttps://helioforecast.space/sircat \n\nNumber of events in SIRCAT: '+str(len(scat))+' \nICME observatories: Parker Solar Probe, Wind, STEREO-A, STEREO-B, MAVEN \nTime ranges: Parker Solar Probe: Oct 2018 - May 2020, Wind: Jan 2007 - Sep 2019, STEREO-A/B: Jan 2007 - Sep 2019, MAVEN: Dec 2014 - Jan 2018. \n\nAuthors: Christian Moestl, Andreas J. Weiss, R. L. Bailey, Martin A. Reiss, Space Research Institute, Austrian Academy of Sciences, Graz, Austria. \nRobert Allen, JHU/APL, USA; Lan Jian, NASA, USA; Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China. \n\nRules: If results are produced with this catalog for peer-reviewed scientific publications, \nplease contact christian.moestl@oeaw.ac.at, robert.allen@jhuapl.edu, lan.jian@nasa.gov, maxime.grandin@helsinki.fi for possible co-authorships. \n\nThis catalog has been made by getting the start and end times of each high speed stream from the \nindividual catalogs, and then calculating all parameters again consistently from the data by us. \nThe in situ data that were used for the creating catalog, with a size of 8 GB in total, including extra data \nfiles with magnetic field components in RTN coordinates and other spacecrat that are not used for producing this catalog, \ncan be downloaded in python pickle format as recarrays from https://doi.org/10.6084/m9.figshare.11973693.v7 \nThe python code for producing this catalog is available at https://github.com/cmoestl/heliocats sircat.ipynb \n\nEach sircat_id has a tag in it that indicates from which catalog the ICME times were taken: \n\nParker Solar Probe: Allen et al. 2021, tag: ALLEN, \nWind: Grandin et al. (2019), tag: GRANDIN \nSTEREO-A: Jian et al. (2019), tag: JIAN. \nSTEREO-B: Jian et al. (2019), tag: JIAN. \nMAVEN: Huang et al. (2019), tag: HUANG. \n\nReferences \nAllen et al. (2021), https://doi.org/10.1051/0004-6361/202039833 \nGrandin, M. et al. (2019), https://doi.org/10.1029/2018JA026396 \nJian, L. et al. (2019), https://doi.org/10.1007/s11207-019-1416-8 \nHuang, H. et al. (2019), https://doi.org/10.3847/1538-4357/ab25e9 \n\nComments: \n- The STEREO catalog contains the SIR start, stream interface and SIR end times. We use their stream interface time as our hss_start_time. \n- The MAVEN catalog has similar times as the STEREO catalog.\n- Earth SIR/HSS list: This catalog directly gives the hss_start_time and the hss_end_time, but no SIR times. \n- The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were \noriginally given for the magnetopause, but the Wind spacecraft is located at the L1 point. \nOne hour is practically equivalent to the propagation time of a 400 km/s slow solar wind \nfrom the L1 point to the magnetopause.\n- Spacecraft positions are given in Heliocentric Earth Equatorial Coordinates (HEEQ) coordinates. \n- The coordinate system for all magnetic field components is SCEQ, except for Wind (HEEQ, which is the equivalent for SCEQ for Earth). \n Definition of SpaceCraft Equatorial Coordinates (SCEQ): \n Z is the solar rotation axis. \n Y is the cross product of Z and R, with R being the vector that points from the Sun to the spacecraft.\n X completes the right handed triad (and points away from the Sun). \nThis system is thus like HEEQ but centered on the respective in situ spacecraft, so the SCEQ X and Y \nbase vectors are rotated by the HEEQ longitude of the in situ spacecraft from HEEQ X and Y.\nThe Y vector is similar to the T vector in an RTN system for each spacecraft, but the X and Z vectors \nare rotated around Y compared to an RTN system. The differences between RTN and SCEQ for spacecraft within \na few degrees of the solar equatorial plane are very small (within a few 0.1 nT usually).\nWe choose SCEQ because it has the advantage that a comparison between multipoint CME events \nand for comparison to simulations there is always a similar reference plane (the solar equatorial plane). \n\n ' parameters_text='Parameters:\n00: sircat_id: The unique identifier for the observed stream interaction region (SIR). unit: string. \n01: sc insitu: The name of the in situ observing spacecraft. unit: string. \n02: sir_start_time: Stream interaction region start time. unit: UTC. \n03: hss_start_time: High speed stream start time, equal to the stream interface time (for STEREO, MAVEN catalogs). unit: UTC. \n04: sir_end_time: End time of the stream interaction region. unit: UTC. \n05: hss_end_time: High speed stream end time, criterion at Wind: speed < 450 km/s. unit: UTC. \n06: hss_vtmax_time: High speed stream maxmimum speed time. unit: UTC. \n07: sc_heliodistance: Heliocentric distance of the spacecraft at hss_start_time. unit: AU.\n08: sc_long_heeq: Heliospheric longitude of the spacecraft at hss_start_time, range [-180,180]. unit: degree (HEEQ).\n09: sc_lat_heeq: Heliospheric latitude of the spacecraft at hss_start_time, range [-90,90]. unit: degree (HEEQ).\n10: hss_vt_max: Maximum proton speed from hss_start_time to hss_end_time. unit: km/s.\n11: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\n12: hss_vt_std: Standard deviation of proton speed from hss_start_time to hss_end_time. unit: km/s.\n13: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\n14: hss_bt_max: Maximum total magnetic field from hss_start_time to hss_end_time. unit: nT.\n15: hss_bt_mean: Mean total magnetic field from hss_start_time to hss_end_time. unit: nT.\n16: hss_bt_std: Standard deviation of total magnetic field from hss_start_time to hss_end_time. unit: nT.\n17: hss_bz_min: Minimum Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n18: hss_bz_mean: Mean Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n19: hss_bz_std: Standard deviation of Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n20: hss_duration: Duration of high speed stream from hss_start_time to hss_end_time. unit: hours.\n21: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\n22: sir_vt_std: Standard deviation of proton speed from sir_start_time to hss_end_time. unit: km/s.\n23: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\n24: sir_bt_max: Maximum total magnetic field from sir_start_time to hss_end_time. unit: nT.\n25: sir_bt_mean: Mean total magnetic field from sir_start_time to sir_end_time. unit: nT.\n26: sir_bt_std: Standard deviation of total magnetic field from sir_start_time to sir_end_time. unit: nT.\n27: sir_bz_min: Minimum Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n28: sir_bz_mean: Mean Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n29: sir_bz_std: Standard deviation of Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n30: sir_duration: Duration of stream interaction region from sir_start_time to sir_end_time. unit: hours.\n\n\n' print(header) print(parameters_text) #make header file file='sircat/HELIO4CAST_SIRCAT_v10_header.txt' with open(file, "w") as text_file: text_file.write(header) text_file.write(parameters_text) print() print('header saved as '+file) print() #Convert to html regarding line breaks, paragraph beginning and spaces header_spaces=header.replace(" ", "&nbsp;") header_html= "<p>" +header_spaces.replace('\n', '<br>')+ "</p>" parameters_spaces=parameters_text.replace(" ", "&nbsp;") parameters_html= "<p>" +parameters_text.replace('\n', '<br>')+ "</p>" print('header converted to HTML') print() print() # ### 4b save into different formats # In[413]: ########## python formats # save ICMECAT as pandas dataframe with times as datetime objects as pickle file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p' pickle.dump([scat,header,parameters], open(file, 'wb')) print('SIRCAT saved as '+file) #load sircat as pandas dataframe file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p' [scat_pandas,h,p]=pickle.load( open(file, 'rb')) scat.keys() scat # # save SIRCAT as numpy array with times as matplotlib datetime as pickle # scat_num=copy.deepcopy(scat) # scat_num.icme_start_time=parse_time(scat_num.icme_start_time).plot_date # scat_num.mo_start_time=parse_time(scat_num.mo_start_time).plot_date # scat_num.mo_end_time=parse_time(scat_num.mo_end_time).plot_date # #convert to recarray # scat_num_rec=scat_num.to_records() # #create structured array # dtype1=[('index','i8'),('icmecat_id', '<U30'),('sc_insitu', '<U20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]] # scat_num_struct=np.array(scat_num_rec,dtype=dtype1) # file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p' # pickle.dump([scat_num,scat_num_struct,header,parameters], open(file, 'wb')) # print('ICMECAT saved as '+file) ################ save to different formats #get beginning of tags for STA to identify allen and jian events tag_list=[] for i in np.arange(0,len(scat)): tag_list.append(scat.sircat_id[i][13]) #j stai_jian=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='J'))[:][0] stai_allen=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='A'))[:][0] #get indices of all SIR spacecraft in SIRCAT sir_sc=np.hstack([stai_jian,stbi,mavi]) #get indices of all HSS spacecraft in SIRCAT hss_sc=np.hstack([pspi,wini,stai_allen]) #copy pandas dataframe first to change time format consistent with HELIO4CAST scat_copy=copy.deepcopy(scat) scat_copy.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).isot scat_copy.hss_start_time=parse_time(scat.hss_start_time).isot scat_copy.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).isot scat_copy.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).isot #scat_copy.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).isot #change time format for sir for i in sir_sc: dum=scat_copy.sir_start_time[i] scat_copy.at[i,'sir_start_time']=dum[0:16]+'Z' dum=scat_copy.hss_start_time[i] scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z' dum=scat_copy.sir_end_time[i] scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z' for i in hss_sc: dum=scat_copy.hss_start_time[i] scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z' dum=scat_copy.hss_end_time[i] scat_copy.at[i,'hss_end_time']=dum[0:16]+'Z' #dum=scat_copy.hss_vtmax_time[i] #scat_copy.at[i,'hss_vtmax_time']=dum[0:16]+'Z' # for i in stbi: # dum=scat_copy.sir_end_time[i] # scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z' # for i in stai: # dum=scat_copy.sir_end_time[i] # scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z' #save as Excel file='sircat/HELIO4CAST_SIRCAT_v10.xlsx' scat_copy.to_excel(file,sheet_name='SIRCATv1.0') print('SIRCAT saved as '+file) #save as json file='sircat/HELIO4CAST_SIRCAT_v10.json' scat_copy.to_json(file) print('SIRCAT saved as '+file) #save as csv file='sircat/HELIO4CAST_SIRCAT_v10.csv' scat_copy.to_csv(file) print('SIRCAT saved as '+file) #save as txt file='sircat/HELIO4CAST_SIRCAT_v10.txt' np.savetxt(file, scat_copy.values.astype(str), fmt='%s' ) print('SIRCAT saved as '+file) # In[415]: ######################### # #########save into hdf5 format , use S for strings http://docs.h5py.org/en/stable/strings.html#what-about-numpy-s-u-type # dtype2=[('index','i8'),('icmecat_id', 'S30'),('sc_insitu', 'S20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]] # ich5=np.array(scat_num_rec,dtype=dtype2) # file='icmecat/HELIO4CAST_ICMECAT_v20.h5' # f=h5py.File(file,mode='w') # f["icmecat"]= ich5 # #add attributes # #************************ # #*********************** # print('ICMECAT saved as '+file) # f.close() # #reading h5py files http://docs.h5py.org/en/latest/quick.html # #fr = h5py.File('icmecat/HELIO4CAST_ICMECAT_v20.h5', 'r') # #list(fr.keys()) # #ich5=fr['icmecat'] # #ich5['mo_bstd'] # #ich5.dtype # #fr.close() # ################## # #save as .npy without pickle # file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.npy' # np.save(file,ich5, allow_pickle=False) # print('ICMECAT saved as '+file) # #for loading do: # #icnpy=np.load(file) # #decode strings: # #icnpy['icmecat_id'][0].decode() #copy pandas dataframe first to change time format consistent with HELIO4CAST scat_copy2=copy.deepcopy(scat) scat_copy2.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).iso scat_copy2.hss_start_time=parse_time(scat.hss_start_time).iso scat_copy2.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).iso scat_copy2.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).iso #scat_copy2.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).iso #change time format for sir for i in sir_sc: dum=scat_copy2.sir_start_time[i] scat_copy2.at[i,'sir_start_time']=dum[0:16] dum=scat_copy2.hss_start_time[i] scat_copy2.at[i,'hss_start_time']=dum[0:16] dum=scat_copy2.sir_end_time[i] scat_copy2.at[i,'sir_end_time']=dum[0:16] for i in hss_sc: dum=scat_copy2.hss_start_time[i] scat_copy2.at[i,'hss_start_time']=dum[0:16] dum=scat_copy2.hss_end_time[i] scat_copy2.at[i,'hss_end_time']=dum[0:16] #dum=scat_copy2.hss_vtmax_time[i] #scat_copy2.at[i,'hss_vtmax_time']=dum[0:16] #save as json for webpage with different time format file='sircat/HELIO4CAST_SIRCAT_v10_isot.json' scat_copy2.to_json(file) print('SIRCAT saved as '+file) #save as html no header file='sircat/HELIO4CAST_SIRCAT_v10_simple.html' scat_copy.to_html(file) print('SIRCAT saved as '+file) ############ save as html file with header #save as html file='sircat/HELIO4CAST_SIRCAT_v10.html' #ic.to_html(file,justify='center') #ichtml='{% extends "_base.html" %} \n \n {% block content %} \n \n \n ' ichtml = header_html ichtml += parameters_html ichtml += scat_copy.to_html() #ichtml +='\n \n {% endblock %}' with open(file,'w') as f: f.write(ichtml) f.close() print('SIRCAT saved as '+file) # ## 4c load ICMECAT pickle files # In[416]: #load sircat as pandas dataframe file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p' [scat_pandas,h,p]=pickle.load( open(file, 'rb')) scat.keys() scat #load icmecat as numpy array # file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p' # [ic_nprec,ic_np,h,p]=pickle.load( open(file, 'rb')) # In[417]: scat_pandas scat_pandas.keys() # In[ ]: # In[ ]: # In[ ]:
sircat.py
39,800
!/usr/bin/env python coding: utf-8 sircat Makes a catalog of solar wind stream interaction regions (SIRs) and high speed solar wind streams (HSS) for the Wind, STEREO and MAVEN spacecraft since 2007. Authors: [C. Möstl](https://www.iwf.oeaw.ac.at/en/user-site/christian-moestl/) (twitter @chrisoutofspace), A. J. Weiss, R. L. Bailey, IWF Graz, Austria; Lan Jian, NASA, USA, Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China. **current status: work in progress** If you want to use parts of this code for generating results for peer-reviewed scientific publications, please contact us per email (christian.moestl@oeaw.ac.at, lan.jian@nasa.gov, maxime.grandin@helsinki.fi) for co-authorships. part of https://github.com/cmoestl/heliocats, last update June 2020 --- Installation In a command line, do: "git clone https://github.com/cmoestl/heliocats". Install a specific conda environment to run this code, see README at https://github.com/cmoestl/heliocats Download the files from https://doi.org/10.6084/m9.figshare.11973693 and place them in the /data folder. Updates Adding a new SIR event: change the source files, or add the sir and hss times in section 2 before the master file sircat/HELIO4CAST_SIRCAT_v10_master.xlsx is produced. Then delete the file for the respective spacecraft under sircat/indices_sircat, and run this notebook or script. Convert this notebook to a script with "jupyter nbconvert --to script sircat.ipynb" in a command line --- Data sources **PSP SIR list**: Allen et al. 2021: https://www.aanda.org/articles/aa/full_html/2021/06/aa39833-20/aa39833-20.html, list at https://sppgway.jhuapl.edu/event_list **STEREO SIR list**: Lan Jian, https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level3/ published in: L. K. Jian et al. https://doi.org/10.1007/s11207-019-1416-8, 2019. This catalog contains the SIR start and end times, as well as the Pt max time for the stream interface. We use their SIR start and ends time as our *sir_start_time* and *sir_end_time*, and set the *hss_start_time* with the Pt max time. For 4 Pt max times that were nan in the Jian et al. list, the *hss_start_time* has been set similar to the *sir_end_time*. **To do**: create our own *hss_end_time* by setting it as the first time when the total bulk speed drops below 450 km/s after *sir_end_time*. Lan: For the STEREO HSS catalog, you can opt to list only the events with the fastest speed reaching at least 500 km/s, to be consistent with Grandin et al. (2019)." **Earth SIR/HSS list**: Maxim Grandin et al., 2018, https://doi.org/10.1029/2018JA026396 This catalog directly gives the *hss_start_time* and the *hss_end_time*. This list was determined by an algorithm and there are no specifics about the the SIR times, instead the start time is determined as the start of the increasing speed and is thus is likely closer to an SIR start time than to a stream interface time, which we use as a *hss_start_time*. For simplicity, we have nevertheless taken the given start time as the hss_start_time. The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were originally given for the magnetopause, but the Wind spacecraft is located at the L1 point. One hour is practically equivalent to the propagation time of a 400 km/s slow solar wind from the L1 point to the magnetopause. **To do**: In future updates, we may change hss_start_time to the sir_start_time and add a proper hss_start_time by searching for ptmax after a new sir_start_time. The Grandin et al. (2019) catalogue only contains events for which the solar wind speed reached at least 500 km/s. Lan: "For Grandin et al. (2019), you can use the peak of total pressure to approximate the stream interface time." **MARS SIR/HSS list**: Hui Huang et al., 2019, https://doi.org/10.3847/1538-4357/ab25e9 (open access not available). This catalog gives the sir_start_time, hss_start_time (=stream interface time) and the sir_end_time. **To do**: Similar to the STEREO-list, with have added the hss_end_time. All other parameters are calculated from scratch from the spacecraft data via this notebook or script. --- Other resourcess **Great review on SIRs** by Ian G. Richardson: https://link.springer.com/article/10.1007/s41116-017-0011-z --- start with importing packages, get paths from config.py file and make directories In[405]: In[11]:reload again while debuggingreload again while debuggingreload again while debuggingreload again while debuggingwhere the in situ data files are located is read from config.py make directories first time if not thereConvert this notebook to a script with jupyter nbconvert --to script icmecat.ipynbin situ data files are updated via the icmecat.ipynb notebook (1) load data (1) load data from STEREO-B, STEREO-A, Wind, PSP, and MAVEN In[2]:print('load Ulysses RTN') made with heliocats.data.save_ulysses_datafileuly='ulysses_1990_2009_rtn.p'[uly,huly]=pickle.load(open(data_path+fileuly, "rb" ) ) yearly magplasma files from stereo science center, conversion to SCEQ CURRENT ACTIVE SPACECRAFT ADD BepiColombo ADD Solar Orbiterremoved magnetosphere by C. Simon Wedlund, 1 data point per orbit, MSO filemav='maven_2014_2018.p'[mav,hmav]=pickle.load(open(filemav, 'rb' ) )filemav='maven_2014_2018_removed.p'[mav,hmav]=pickle.load(open(filemav, 'rb' ) ) print('load MSL RAD')MSL RADrad=hd.load_msl_rad(), rad.time,rad.dose_solfrom heliosat, converted to SCEQ similar to STEREO-A/B STAyearly magplasma files from stereo science center, conversion to SCEQ beacon datafilesta2="stereoa_2019_2020_sceq_beacon.p"filesta2='stereoa_2019_2020_sept_sceq_beacon.p'filesta2='stereoa_2019_now_sceq_beacon.p'filesta2="stereoa_2020_august_november_sceq_beacon.p" cutoff with end of science datamake arrayconvert to recarray Windfrom HELCATS HEEQ until 2018 1 1 + new self-processed data with heliosat and hd.save_wind_datafunction for spike removal, see list with times in that functionmerge Wind old and new data cut off HELCATS data at end of 2017, win2 begins exactly after thismake arrayconvert to recarrayprint('MSL/RAD ',str(rad.time[0])[0:10],str(rad.time[-1])[0:10])print('VEX ',str(vex.time[0])[0:10],str(vex.time[-1])[0:10])print('MESSENGER ',str(mes.time[0])[0:10],str(mes.time[-1])[0:10])print('Ulysses ',str(uly.time[0])[0:10],str(uly.time[-1])[0:10]) print('catalogs:') print() print('HELCATS HIGeoCAT ',str(higeocat_time[0])[0:10],str(higeocat_time[-1])[0:10]) (2) make SIRCAT masterfile from STEREO and Wind catalogs Here we read raw STEREO SIR and Earth SIR catalogs from Robert Allen, Lan Jian, Maxim Grandin, and Hui Huang et al. and convert to master catalog xlsx file that contains all times in a consistent way. In[302]:make list for all basic times, ids etc. for master filefrom Allen catalog format to datetime objectprint(p_time_obj[i])dates with year 1 set to nan:read all Allen catalogspspwindsta PSPprint(p_raw['Start time'])print(p_raw['End time'])print(p_raw['Time of max P'])make id for event put all data for this event in a listprint(list1) append to full list Windprint(pw_raw['Start time'])print(pw_raw['End time'])print(pw_raw['Time of max P'])make id for event put all data for this event in a listprint(list1) append to full listSTAprint(pa_raw['Start time'])print(pa_raw['End time'])print(pa_raw['Time of max P'])make id for event put all data for this event in a listprint(list1) append to full listpw_raw['Start time']ptime=parse_time(p_raw['Start time']).datetime read raw STEREO SIR catalogget 2 times: HSS start (equivalent to SIR start as defined in the L. Jian catalog), HSS end (where speed again < 450km/s)print(y,doy,hour, min)print(y,doy,hour, min)print(i)print(ptime[i])print(y,doy,hour, min)make id for event put all data for this event in a listprint(list1) append to full list read raw Wind catalogGrandin et al. 2018 - OMNIremoved 2 SIRs due to data gap of Wind in oct 20142 times: SIR/HSS start, HSS end (where speed again < 450km/s)begin with 2007SIR HSS start time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopauseSIR HSS end time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopauseprint(list2) read MAVEN catalog reload again while debuggingthis is a recarray check which events overlap with the available MAVEN datago through all eventsprint(list3) add new events **** to be donefor measuring new events use this function from heliocats.plot plt.close('all')works in jupyter notebooksworks in scriptsmatplotlib.use('qt5agg') plt.ion()STEREO-Ahp.plot_insitu_measure(sta, '2018-Jan-01 12:00','2018-Feb-01 12:00', 'STEREO-A', 'results/')Windhp.plot_insitu_measure(win, '2019-Jan-29','2019-Feb-28', 'Wind', 'results/') make pandas data frame for master filesort by spacecraft indicator and start timedrop extra index valuesave master file as Excel (3) make SIRCAT In[418]:reload again while debuggingreload again while debuggingload master file 3a get indices for all spacecraft 3b get parameters for all spacecraft one after another remove indices if the events in the master file have changedos.system('rm sircat/indices_sircat/SIRCAT_indices_Wind.p')os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-A.p')os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-B.p')os.system('rm sircat/indices_sircat/SIRCAT_indices_MAVEN.p')os.system('rm sircat/indices_sircat/SIRCAT_indices_PSP.p')hss timessir timesboth allen and jian cats 3c make all plots if wantedmatplotlib.use('Agg')hp.plot_sircat_events(sta,stai,scat,'STEREO-A',sirplotsdir)hp.plot_sircat_events(stb,stbi,scat,'STEREO-B',sirplotsdir)hp.plot_sircat_events(win,wini,scat,'Wind',sirplotsdir)hp.plot_sircat_events(mav,mavi,scat,'MAVEN',sirplotsdir)kick out MAVEN events without data sort SIRCAt by date (4) save SIRCAT 4a save header In[410]:save header and parameters as text file and prepare for html websitemake header fileConvert to html regarding line breaks, paragraph beginning and spaces 4b save into different formats In[413]: python formats save ICMECAT as pandas dataframe with times as datetime objects as pickleload sircat as pandas dataframe save SIRCAT as numpy array with times as matplotlib datetime as pickle scat_num=copy.deepcopy(scat) scat_num.icme_start_time=parse_time(scat_num.icme_start_time).plot_date scat_num.mo_start_time=parse_time(scat_num.mo_start_time).plot_date scat_num.mo_end_time=parse_time(scat_num.mo_end_time).plot_date convert to recarray scat_num_rec=scat_num.to_records() create structured array dtype1=[('index','i8'),('icmecat_id', '<U30'),('sc_insitu', '<U20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]] scat_num_struct=np.array(scat_num_rec,dtype=dtype1) file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p' pickle.dump([scat_num,scat_num_struct,header,parameters], open(file, 'wb')) print('ICMECAT saved as '+file) save to different formatsget beginning of tags for STA to identify allen and jian eventsjget indices of all SIR spacecraft in SIRCATget indices of all HSS spacecraft in SIRCATcopy pandas dataframe first to change time format consistent with HELIO4CASTscat_copy.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).isotchange time format for sirdum=scat_copy.hss_vtmax_time[i] scat_copy.at[i,'hss_vtmax_time']=dum[0:16]+'Z' for i in stbi: dum=scat_copy.sir_end_time[i] scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z' for i in stai: dum=scat_copy.sir_end_time[i] scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'save as Excelsave as jsonsave as csvsave as txt In[415]: save into hdf5 format , use S for strings http://docs.h5py.org/en/stable/strings.htmlwhat-about-numpy-s-u-type dtype2=[('index','i8'),('icmecat_id', 'S30'),('sc_insitu', 'S20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]] ich5=np.array(scat_num_rec,dtype=dtype2) file='icmecat/HELIO4CAST_ICMECAT_v20.h5' f=h5py.File(file,mode='w') f["icmecat"]= ich5 add attributes ************************ *********************** print('ICMECAT saved as '+file) f.close() reading h5py files http://docs.h5py.org/en/latest/quick.html fr = h5py.File('icmecat/HELIO4CAST_ICMECAT_v20.h5', 'r') list(fr.keys()) ich5=fr['icmecat'] ich5['mo_bstd'] ich5.dtype fr.close() save as .npy without pickle file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.npy' np.save(file,ich5, allow_pickle=False) print('ICMECAT saved as '+file) for loading do: icnpy=np.load(file) decode strings: icnpy['icmecat_id'][0].decode()copy pandas dataframe first to change time format consistent with HELIO4CASTscat_copy2.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).isochange time format for sirdum=scat_copy2.hss_vtmax_time[i] scat_copy2.at[i,'hss_vtmax_time']=dum[0:16]save as json for webpage with different time formatsave as html no header save as html file with headersave as htmlic.to_html(file,justify='center')ichtml='{% extends "_base.html" %} \n \n {% block content %} \n \n \n 'ichtml +='\n \n {% endblock %}' 4c load ICMECAT pickle files In[416]:load sircat as pandas dataframeload icmecat as numpy array file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p' [ic_nprec,ic_np,h,p]=pickle.load( open(file, 'rb')) In[417]: In[ ]: In[ ]: In[ ]:
13,256
en
0.65528
import pandas as pd import numpy as np from scipy import stats def columns_views(player_1_df, player_2_df): columns = list(player_1_df.columns) if list(player_1_df.columns) == list(player_2_df.columns): columns = list(player_1_df.columns) player_1 = list(player_1_df.values[0]) player_2 = list(player_2_df.values[0]) views = [] for column, player1, player2 in zip(columns, player_1, player_2): print('column : {} _ player1-{} , player2-{} < diff : {} >'.format( column, player1, player2, abs(player1 - player2) )) views.append(abs(player1 - player2)) print(views) def convert_preferred_foot(df): df['preferred_foot'] = df['preferred_foot'].replace('Right', 1) df['preferred_foot'] = df['preferred_foot'].replace('Left', 2) return df def convert_work_rate(df): convert = { 'High': 3, 'Medium': 2, 'Low': 1 } work_rate = df['work_rate'].values[0].split('/') attack = work_rate[0] defense = work_rate[1] df['attack'] = convert[attack] df['defense'] = convert[defense] # work_rateの削除処理 df = df.drop(columns='work_rate') return df def euclidean_distance(v1, v2): # ユーグリッド距離を算出 # https://qiita.com/shim0mura/items/64918dad83d162ef2ac2#ユークリッド距離 # どちらも同じ値を返す # distance = np.linalg.norm(v1 - v2) distance = np.sqrt(np.power(v1 - v2, 2).sum()) # 0から1までの値で似ていれば似ているほど1に近くなる、みたいな類似度として分かりやすい値が欲しい。 # 0での除算エラーを防ぐためにこのdに1を足して逆数をとるとそのような値を取ることが出来る。 # 1/(1+d) # print('distance', distance) return 1 / (1 + distance) def cos_similarity(v1, v2): # Scipyを使ってコサイン類似度を求める方法 # import scipy.spatial.distance as dis # print(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))) # print(dis.cosine(v1, v2)) # return dis.cosine(v1, v2) # cos類似度を算出 return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) # ピアソンの積率相関係数 def pearson_product_moment_correlation_coefficien(v1, v2): # corr = np.corrcoef(v1, v2)[0, 1] corr = stats.pearsonr(v1, v2) return corr # スピアマンの順位相関係数 def spearman_rank_correlation_coefficient(v1, v2): corr = stats.spearmanr(v1, v2) return corr # ケンドールの順位相関係数 def kendalltau_rank_correlation_coefficient(v1, v2): corr = stats.kendalltau(v1, v2) return corr def similarity(v1_df, v2_df): v1_value = v1_df.values[0] v2_value = v2_df.values[0] print('v1_value', v1_value) print('v2_value', v2_value) # リストをps.Seriesに変換 s1 = pd.Series(list(v1_value)) s2 = pd.Series(list(v2_value)) # 相関係数を計算 res = s1.corr(s2) print(res) corr = pearson_product_moment_correlation_coefficien( v1_value, v2_value ) print('pearson_product_moment_correlation_coefficien', corr) corr = spearman_rank_correlation_coefficient( v1_value, v2_value ) print('spearman_rank_correlation_coefficient', corr) corr = kendalltau_rank_correlation_coefficient( v1_value, v2_value ) print('kendalltau_rank_correlation_coefficient', corr) e_distance = euclidean_distance(v1_value, v2_value) print('e_distance', e_distance) # return euclidean_distance(v1_value, v2_value) # return res return cos_similarity(v1_value, v2_value) # 数値型の整形 def shaping_num(value): if '+' in value: value = str(value).split('+') value = int(value[0]) + int(value[1]) return value if '-' in value: value = str(value).split('-') value = int(value[0]) - int(value[1]) return value return value def need_columns(df): columns = [ 'height_cm', 'weight_kg', 'preferred_foot', 'weak_foot', 'skill_moves', 'work_rate', 'player_tags', 'pace', 'shooting', 'passing', 'dribbling', 'defending', 'physic', 'player_traits', 'attacking_crossing', 'attacking_finishing', 'attacking_heading_accuracy', 'attacking_short_passing', 'attacking_volleys', 'skill_dribbling', 'skill_curve', 'skill_fk_accuracy', 'skill_long_passing', 'skill_ball_control', 'movement_acceleration', 'movement_sprint_speed', 'movement_agility', 'movement_reactions', 'movement_balance', 'power_shot_power', 'power_jumping', 'power_stamina', 'power_strength', 'power_long_shots', 'mentality_aggression', 'mentality_interceptions', 'mentality_positioning', 'mentality_vision', 'mentality_penalties', 'mentality_composure', 'defending_marking', 'defending_standing_tackle', 'defending_sliding_tackle' ] columns += [ 'ls', 'st', 'rs', 'lw', 'lf', 'cf', 'rf', 'rw', 'lam', 'cam', 'ram', 'lm', 'lcm', 'cm', 'rcm', 'rm', 'lwb', 'ldm', 'cdm', 'rdm', 'rwb', 'lb', 'lcb', 'cb', 'rcb', 'rb' ] # ls,st,rs,lw,lf,cf,rf,rw, # lam,cam,ram,lm,lcm,cm,rcm,rm, # lwb,ldm,cdm,rdm,rwb,lb,lcb,cb,rcb,rb return df[columns] def convert_num_values(player_1_df, player_2_df): num_values = [ 'pace', 'shooting', 'passing', 'dribbling', 'defending', 'physic', 'attacking_crossing', 'attacking_finishing', 'attacking_heading_accuracy', 'attacking_short_passing', 'attacking_volleys', 'skill_dribbling', 'skill_curve', 'skill_fk_accuracy', 'skill_long_passing', 'skill_ball_control', 'movement_acceleration', 'movement_sprint_speed', 'movement_agility', 'movement_reactions', 'movement_balance', 'power_shot_power', 'power_jumping', 'power_stamina', 'power_strength', 'power_long_shots', 'mentality_aggression', 'mentality_interceptions', 'mentality_positioning', 'mentality_vision', 'mentality_penalties', 'mentality_composure', 'defending_marking', 'defending_standing_tackle', 'defending_sliding_tackle' ] num_values += [ 'ls', 'st', 'rs', 'lw', 'lf', 'cf', 'rf', 'rw', 'lam', 'cam', 'ram', 'lm', 'lcm', 'cm', 'rcm', 'rm', 'lwb', 'ldm', 'cdm', 'rdm', 'rwb', 'lb', 'lcb', 'cb', 'rcb', 'rb' ] for v in num_values: # player1のデータの数値の整形 value = player_1_df[v].values.astype(str)[0] value = shaping_num(str(value)) # player_1_df[v] = float(value) * 0.01 player_1_df[v] = float(value) # player2のデータの数値の整形 value = player_2_df[v].values.astype(str)[0] value = shaping_num(str(value)) # player_2_df[v] = float(value) * 0.01 player_2_df[v] = float(value) return player_1_df, player_2_df def convert_traits(player_1_df, player_2_df): # 選手特性関連の処理 traits_list = [ 'Backs Into Player', # FIFA 18だけの項目 'Bicycle Kicks', 'Chip Shot', 'Dives Into Tackles', 'Early Crosser', 'Fancy Passes', 'Finesse Shot', 'Flair', 'Giant Throw-In', 'GK Cautious With Crosses', 'GK Comes For Crosses', 'GK Flat Kick', 'GK Long Thrower', 'GK Save With Foot', 'Injury Prone', 'Leadership', 'Long Passer', 'Long Shot Taker', 'Long Throw-In', 'One Club Player', 'Outside Foot Shot', 'Play Maker', 'Power Header', 'Rushes Out Of Goal', 'Second Wind', 'Set Play Specialist', 'Solid Player', 'Speed Dribbler', 'Swerve', 'Takes Powerful Driven Free Kicks', 'Team Player', 'Technical Dribbler' ] player_1_df_player_traits = player_1_df['player_traits'] player_2_df_player_traits = player_2_df['player_traits'] player_1_df = player_1_df.drop(columns='player_traits') player_2_df = player_2_df.drop(columns='player_traits') for trait in traits_list: trait_value = 0 for p_trait in player_1_df_player_traits.values[0].split(','): if trait in p_trait: trait_value = 1 break player_1_df[trait] = trait_value trait_value = 0 for p_trait in player_2_df_player_traits.values[0].split(','): if trait in p_trait: trait_value = 1 break player_2_df[trait] = trait_value return player_1_df, player_2_df def players_comparison(player_1, player_2): df = pd.read_csv('data/players_18.csv') player_1_df = df.query('sofifa_id == {}'.format(player_1)) player_2_df = df.query('sofifa_id == {}'.format(player_2)) # david_silva = df.query('sofifa_id == 189881') player_1_df = need_columns(player_1_df) player_2_df = need_columns(player_2_df) # num_valuesの変換処理 player_1_df, player_2_df = convert_num_values(player_1_df, player_2_df) # 選手特性関連の処理 player_1_df, player_2_df = convert_traits(player_1_df, player_2_df) # 選手タグ関連の処理 player_1_df = player_1_df.drop(columns='player_tags') player_2_df = player_2_df.drop(columns='player_tags') # 利き足の変換 player_1_df = convert_preferred_foot(player_1_df) player_2_df = convert_preferred_foot(player_2_df) # 攻撃/守備の優先度の変換 player_1_df = convert_work_rate(player_1_df) player_2_df = convert_work_rate(player_2_df) # print(player_1_df.values[0]) # print(player_2_df.values) cos = similarity(player_1_df, player_2_df) print('cos', cos) # カラムの表示 # columns_views(player_1_df, player_2_df) shinji_kagawa = 189358 david_silva = 178088 # david_silva = 41 # 香川真司 : 189358 # 本田圭佑 : 186581 # 清武弘嗣 : 210126 # イニエスタ: 41 # スモーリング : 189881 # セルヒオ・ラモス : 155862 # マリオ・ゲッツェ : 192318 # ユリアン・ヴァイグル : 222028 # ファン・マタ : 178088 # イスコ : 197781 # ダビド・シルバ : 168542 # マルク・バルトラ : 198141 # ロメル・ルカク : 192505 # デブルイネ : 192985 # モドリッチ : 177003 # クロース : 182521 # ラキティッチ : 168651 # ウサマ・デンベレ : 231443 # リオネル・メッシ : 158023 # フンメルス : 178603 # ピケ: 152729 # ボアテング : 183907 # メスト・エジル : 176635 # マルコ・ロイス : 188350 # イヴァン・ペリシッチ : 181458 # トーマス・ミュラー : 189596 # オスカル : 188152 # ヤルモレンコ : 194794 # エデン・アザール : 183277 # ネイマール : 190871 # ロッベン : 9014 # サラー : 209331 # ハリー・ケイン : 202126 # ムバッペ : 231747 # グリーズマン : 194765 # ジェラール・ピケ : 152729 players_comparison(shinji_kagawa, david_silva) # columns_views(shinji_kagawa, david_silva) # Weak Foot(逆足) # https://www.fifplay.com/encyclopedia/weak-foot/ # Work Rate(作業率) # https://www.fifplay.com/encyclopedia/work-rate/ # ユークリッド距離 vs コサイン類似度 # https://enjoyworks.jp/tech-blog/2242
playstyle_similar/playstyle_similar2.py
11,887
work_rateの削除処理 ユーグリッド距離を算出 https://qiita.com/shim0mura/items/64918dad83d162ef2ac2ユークリッド距離 どちらも同じ値を返す distance = np.linalg.norm(v1 - v2) 0から1までの値で似ていれば似ているほど1に近くなる、みたいな類似度として分かりやすい値が欲しい。 0での除算エラーを防ぐためにこのdに1を足して逆数をとるとそのような値を取ることが出来る。 1/(1+d) print('distance', distance) Scipyを使ってコサイン類似度を求める方法 import scipy.spatial.distance as dis print(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))) print(dis.cosine(v1, v2)) return dis.cosine(v1, v2) cos類似度を算出 ピアソンの積率相関係数 corr = np.corrcoef(v1, v2)[0, 1] スピアマンの順位相関係数 ケンドールの順位相関係数 リストをps.Seriesに変換 相関係数を計算 return euclidean_distance(v1_value, v2_value) return res 数値型の整形 ls,st,rs,lw,lf,cf,rf,rw, lam,cam,ram,lm,lcm,cm,rcm,rm, lwb,ldm,cdm,rdm,rwb,lb,lcb,cb,rcb,rb player1のデータの数値の整形 player_1_df[v] = float(value) * 0.01 player2のデータの数値の整形 player_2_df[v] = float(value) * 0.01 選手特性関連の処理 FIFA 18だけの項目 david_silva = df.query('sofifa_id == 189881') num_valuesの変換処理 選手特性関連の処理 選手タグ関連の処理 利き足の変換 攻撃/守備の優先度の変換 print(player_1_df.values[0]) print(player_2_df.values) カラムの表示 columns_views(player_1_df, player_2_df) david_silva = 41 香川真司 : 189358 本田圭佑 : 186581 清武弘嗣 : 210126 イニエスタ: 41 スモーリング : 189881 セルヒオ・ラモス : 155862 マリオ・ゲッツェ : 192318 ユリアン・ヴァイグル : 222028 ファン・マタ : 178088 イスコ : 197781 ダビド・シルバ : 168542 マルク・バルトラ : 198141 ロメル・ルカク : 192505 デブルイネ : 192985 モドリッチ : 177003 クロース : 182521 ラキティッチ : 168651 ウサマ・デンベレ : 231443 リオネル・メッシ : 158023 フンメルス : 178603 ピケ: 152729 ボアテング : 183907 メスト・エジル : 176635 マルコ・ロイス : 188350 イヴァン・ペリシッチ : 181458 トーマス・ミュラー : 189596 オスカル : 188152 ヤルモレンコ : 194794 エデン・アザール : 183277 ネイマール : 190871 ロッベン : 9014 サラー : 209331 ハリー・ケイン : 202126 ムバッペ : 231747 グリーズマン : 194765 ジェラール・ピケ : 152729 columns_views(shinji_kagawa, david_silva) Weak Foot(逆足) https://www.fifplay.com/encyclopedia/weak-foot/ Work Rate(作業率) https://www.fifplay.com/encyclopedia/work-rate/ ユークリッド距離 vs コサイン類似度 https://enjoyworks.jp/tech-blog/2242
1,853
ja
0.94026
import virtualbox, json, pprint, configparser, time, psutil, sys from pypresence import Presence class RichPresence: def __init__(self): # Initialize the VirtualBox instance, config, and assets. self.virtualbox = virtualbox.VirtualBox() self.config = configparser.ConfigParser() self.config.read("config.ini") self.assets = json.load(open("assets.json", "r")) # Initialize the Rich Presence. client_id = self.config["Rich Presence"]["client_id"] self.RPC = Presence(client_id) self.RPC.connect() # Initialize format dictionary. self.format_dict = {"start": time.time()} while True: # Check if VirtualBox is running, and that the current OS is Windows. # [TODO] Add support for other operating systems. if ( "VirtualBox.exe" in (p.name() for p in psutil.process_iter()) or "VirtualBoxVM.exe" in (p.name() for p in psutil.process_iter()) ) and (sys.platform.startswith("win32")): # Generate the list of machines. self.machine_list = self.generate_machine_list() # Set the previous format dictionary, and then update the current one. self.previous_format_dict = self.format_dict # Generate the format dictionary using the list of machines. self.format_dict = self.generate_format_dict( machine_list=self.machine_list, previous_format_dict=self.previous_format_dict, ) # Generate the presence dictionary using the format dictionary and the previous format dictionary. self.presence_dict = self.generate_presence_dict( format_dict=self.format_dict ) # Update the Rich Presence. self.RPC.update(**self.presence_dict) # Print the current presence to the terminal. # [TODO] Print the presence dictionary more neatly pprint.pprint(self.presence_dict) print("--------------------") # Stop updating the Rich Presence if VirtualBox is not running. elif sys.platform.startswith("win32"): print("VirtualBox is not running") self.RPC.clear() # Exit the program if the user is not on Windows. else: print("Sorry, your platform is not supported.") exit() time.sleep(15) def generate_machine_list(self): # Initialize list to store machine information. machine_list = [] # Get information for each machine. machines = self.virtualbox.machines machine_names = [machine.name for machine in machines] machine_states = [machine.state for machine in machines] machine_operating_systems = [machine.os_type_id for machine in machines] # Iterate through the machines and store information about them in the machine list. for machine_index in range(len(machines)): # Initialize dictionary to store machine information. machine_list.append({}) # Obtain OS and architecture information. os_version, architecture = self.generate_os_and_architecture( machine_operating_systems[-1] ) # Assign the corresponding information to the keys in the dictionary machine_list[-1]["name"] = machine_names[-1] machine_list[-1]["architecture"] = architecture machine_list[-1]["state"] = str(machine_states[-1]) # Iterate through assets and find the correct OS. for os in self.assets["operating systems"]: # If the OS version is found in any of the OS dictionaries, set the version to that key. if os_version in self.assets["operating systems"][os]["versions"]: machine_list[-1]["os"] = os machine_list[-1]["os version"] = os_version return machine_list def generate_os_and_architecture(self, os_type_id: str): # Split OS type ID to obtain the OS and architecture. if "_" in os_type_id: self.oa_operating_system, self.oa_architecture = os_type_id.split("_", 1) # If an architecture is not stated, it is 32-bit. else: self.oa_operating_system = os_type_id self.oa_architecture = "32" return self.oa_operating_system, self.oa_architecture def generate_format_dict( self, machine_list: list[dict], previous_format_dict: dict ): # Store previous start time and remove it from the dictionary, to help with previous_start = previous_format_dict.pop("start") # Initialize dictionary to store Rich Presence formatting. format_dict = {} # Assume there is no machine active. format_dict["machine active"] = False # Iterate through machine dictionary and find a machine that is online. for machine in machine_list: if machine["state"] == "FirstOnline": # Recognize that the user is in a machine. format_dict["machine active"] = True # Fill the rest of the formatting dictionary with information from the machine dictionary. format_dict["os name"] = machine["os"] format_dict["os version"] = machine["os version"] format_dict["os version name"] = self.assets["operating systems"][ machine["os"] ]["versions"][machine["os version"]]["name"] format_dict["os version image"] = self.assets["operating systems"][ machine["os"] ]["versions"][machine["os version"]]["image"] format_dict["architecture"] = machine["architecture"] format_dict["architecture image"] = machine["architecture"] # End the loop now that we have found the active machine. break format_dict["icon"] = "icon" # If the format dictionary has not changed, then use the same start time as last time. if format_dict == previous_format_dict: format_dict["start"] = previous_start # If the format dictionary has changed since the last loop, then reset the timer. else: # Set the start time of the Rich Presence to now. format_dict["start"] = time.time() return format_dict def generate_presence_dict(self, format_dict: dict): # Initialize dictionary to store the Rich Presence. presence_dict = {} # If there is an active machine, display it on the presence. if format_dict["machine active"] == True: # For each field in the config, set the Rich Presence to show that the user is in a machine. for field in self.config["In Machine"]: presence_dict[field] = self.config["In Machine"][field].format( **format_dict ) # Set the start time using the format dictionary. presence_dict["start"] = format_dict["start"] # For each field in the config, set the Rich Presence to show that the user is in the menu. else: # Fill each presence dictionary field with the corresponding formatting set by the user in the config. for field in self.config["In Menu"]: presence_dict[field] = self.config["In Menu"][field].format( **format_dict ) # If the user is in the menu, there is no need to show the time elapsed. presence_dict["start"] = None # Set all empty strings or empty lists to None. for field in presence_dict: if presence_dict[field] == "" or presence_dict[field] == []: presence_dict[field] = None return presence_dict RichPresence()
main.py
8,339
Initialize the VirtualBox instance, config, and assets. Initialize the Rich Presence. Initialize format dictionary. Check if VirtualBox is running, and that the current OS is Windows. [TODO] Add support for other operating systems. Generate the list of machines. Set the previous format dictionary, and then update the current one. Generate the format dictionary using the list of machines. Generate the presence dictionary using the format dictionary and the previous format dictionary. Update the Rich Presence. Print the current presence to the terminal. [TODO] Print the presence dictionary more neatly Stop updating the Rich Presence if VirtualBox is not running. Exit the program if the user is not on Windows. Initialize list to store machine information. Get information for each machine. Iterate through the machines and store information about them in the machine list. Initialize dictionary to store machine information. Obtain OS and architecture information. Assign the corresponding information to the keys in the dictionary Iterate through assets and find the correct OS. If the OS version is found in any of the OS dictionaries, set the version to that key. Split OS type ID to obtain the OS and architecture. If an architecture is not stated, it is 32-bit. Store previous start time and remove it from the dictionary, to help with Initialize dictionary to store Rich Presence formatting. Assume there is no machine active. Iterate through machine dictionary and find a machine that is online. Recognize that the user is in a machine. Fill the rest of the formatting dictionary with information from the machine dictionary. End the loop now that we have found the active machine. If the format dictionary has not changed, then use the same start time as last time. If the format dictionary has changed since the last loop, then reset the timer. Set the start time of the Rich Presence to now. Initialize dictionary to store the Rich Presence. If there is an active machine, display it on the presence. For each field in the config, set the Rich Presence to show that the user is in a machine. Set the start time using the format dictionary. For each field in the config, set the Rich Presence to show that the user is in the menu. Fill each presence dictionary field with the corresponding formatting set by the user in the config. If the user is in the menu, there is no need to show the time elapsed. Set all empty strings or empty lists to None.
2,464
en
0.85264
#!/usr/bin/env python3 from . import util import json from electrum_civx.network import filter_protocol peers = filter_protocol(util.get_peers()) results = util.send_request(peers, 'blockchain.estimatefee', [2]) print(json.dumps(results, indent=4))
electrum/scripts/estimate_fee.py
249
!/usr/bin/env python3
21
fr
0.448822
''' 实验名称:以太网MQTT通信 版本:v1.0 日期:2020.12 作者:01Studio 说明:通过Socket编程实现以太MQTT通信 订阅者(subscribe)。 ''' import network,usocket,time from simple import MQTTClient from tftlcd import LCD43M #定义常用颜色 RED = (255,0,0) GREEN = (0,255,0) BLUE = (0,0,255) BLACK = (0,0,0) #4.3寸LCD初始化 d = LCD43M(portrait=1) d.fill((255,255,255)) #填充白色 #socket数据接收中断标志位 socket_node = 0 #以太网初始化 nic = network.Ethernet() nic.active(True) nic.ifconfig('dhcp') #设置MQTT回调函数,有信息时候执行 def MQTT_callback(topic, msg): print('topic: {}'.format(topic)) print('msg: {}'.format(msg)) #判断网络是否连接成功 if nic.isconnected(): print(nic.ifconfig()) #打印IP信息 #显示标题 d.printStr('01Studio Network', 40, 10, BLACK, size=4) #显示IP信息 d.printStr('IP: ' + nic.ifconfig()[0], 10, 100, BLACK, size=3) d.printStr('Subnet: ' + nic.ifconfig()[1], 10, 150, BLACK, size=3) d.printStr('Gateway: ' + nic.ifconfig()[2], 10, 200, BLACK, size=3) #MQTT配置 SERVER = 'mqtt.p2hp.com' PORT = 1883 CLIENT_ID = '01Studio-pyBoard' # 客户端ID TOPIC = '/public/01Studio/1' # TOPIC名称 client = MQTTClient(CLIENT_ID, SERVER, PORT) client.set_callback(MQTT_callback) #配置回调函数 client.connect() client.subscribe(TOPIC) #订阅主题 while (True): client.check_msg() #检测是否收到信息,收到则执行回调函数打印。 time.sleep_ms(300) #接收间隔
哥伦布(STM32F407)/3.通讯实验/2.以太网/3.MQTT通信/2.订阅者(subscribe)/main.py
1,599
实验名称:以太网MQTT通信 版本:v1.0 日期:2020.12 作者:01Studio 说明:通过Socket编程实现以太MQTT通信 订阅者(subscribe)。 定义常用颜色4.3寸LCD初始化填充白色socket数据接收中断标志位以太网初始化设置MQTT回调函数,有信息时候执行判断网络是否连接成功打印IP信息显示标题显示IP信息MQTT配置 客户端ID TOPIC名称配置回调函数订阅主题检测是否收到信息,收到则执行回调函数打印。接收间隔
228
zh
0.953738
from math import ceil import pytest from scipy.stats import norm, randint import numpy as np from sklearn.datasets import make_classification from sklearn.dummy import DummyClassifier from sklearn.experimental import enable_halving_search_cv # noqa from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import HalvingGridSearchCV from sklearn.model_selection import HalvingRandomSearchCV from sklearn.model_selection import KFold, ShuffleSplit from sklearn.svm import LinearSVC from sklearn.model_selection._search_successive_halving import ( _SubsampleMetaSplitter, _top_k, ) class FastClassifier(DummyClassifier): """Dummy classifier that accepts parameters a, b, ... z. These parameter don't affect the predictions and are useful for fast grid searching.""" def __init__( self, strategy="stratified", random_state=None, constant=None, **kwargs ): super().__init__( strategy=strategy, random_state=random_state, constant=constant ) def get_params(self, deep=False): params = super().get_params(deep=deep) for char in range(ord("a"), ord("z") + 1): params[chr(char)] = "whatever" return params @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) @pytest.mark.parametrize( "aggressive_elimination," "max_resources," "expected_n_iterations," "expected_n_required_iterations," "expected_n_possible_iterations," "expected_n_remaining_candidates," "expected_n_candidates," "expected_n_resources,", [ # notice how it loops at the beginning # also, the number of candidates evaluated at the last iteration is # <= factor (True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]), # no aggressive elimination: we end up with less iterations, and # the number of candidates at the last iter is > factor, which isn't # ideal (False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]), # # When the amount of resource isn't limited, aggressive_elimination # # has no effect. Here the default min_resources='exhaust' will take # # over. (True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), (False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), ], ) def test_aggressive_elimination( Est, aggressive_elimination, max_resources, expected_n_iterations, expected_n_required_iterations, expected_n_possible_iterations, expected_n_remaining_candidates, expected_n_candidates, expected_n_resources, ): # Test the aggressive_elimination parameter. n_samples = 1000 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": ("l1", "l2"), "b": list(range(30))} base_estimator = FastClassifier() if max_resources == "limited": max_resources = 180 else: max_resources = n_samples sh = Est( base_estimator, param_grid, aggressive_elimination=aggressive_elimination, max_resources=max_resources, factor=3, ) sh.set_params(verbose=True) # just for test coverage if Est is HalvingRandomSearchCV: # same number of candidates as with the grid sh.set_params(n_candidates=2 * 30, min_resources="exhaust") sh.fit(X, y) assert sh.n_iterations_ == expected_n_iterations assert sh.n_required_iterations_ == expected_n_required_iterations assert sh.n_possible_iterations_ == expected_n_possible_iterations assert sh.n_resources_ == expected_n_resources assert sh.n_candidates_ == expected_n_candidates assert sh.n_remaining_candidates_ == expected_n_remaining_candidates assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) @pytest.mark.parametrize( "min_resources," "max_resources," "expected_n_iterations," "expected_n_possible_iterations," "expected_n_resources,", [ # with enough resources ("smallest", "auto", 2, 4, [20, 60]), # with enough resources but min_resources set manually (50, "auto", 2, 3, [50, 150]), # without enough resources, only one iteration can be done ("smallest", 30, 1, 1, [20]), # with exhaust: use as much resources as possible at the last iter ("exhaust", "auto", 2, 2, [333, 999]), ("exhaust", 1000, 2, 2, [333, 999]), ("exhaust", 999, 2, 2, [333, 999]), ("exhaust", 600, 2, 2, [200, 600]), ("exhaust", 599, 2, 2, [199, 597]), ("exhaust", 300, 2, 2, [100, 300]), ("exhaust", 60, 2, 2, [20, 60]), ("exhaust", 50, 1, 1, [20]), ("exhaust", 20, 1, 1, [20]), ], ) def test_min_max_resources( Est, min_resources, max_resources, expected_n_iterations, expected_n_possible_iterations, expected_n_resources, ): # Test the min_resources and max_resources parameters, and how they affect # the number of resources used at each iteration n_samples = 1000 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": [1, 2], "b": [1, 2, 3]} base_estimator = FastClassifier() sh = Est( base_estimator, param_grid, factor=3, min_resources=min_resources, max_resources=max_resources, ) if Est is HalvingRandomSearchCV: sh.set_params(n_candidates=6) # same number as with the grid sh.fit(X, y) expected_n_required_iterations = 2 # given 6 combinations and factor = 3 assert sh.n_iterations_ == expected_n_iterations assert sh.n_required_iterations_ == expected_n_required_iterations assert sh.n_possible_iterations_ == expected_n_possible_iterations assert sh.n_resources_ == expected_n_resources if min_resources == "exhaust": assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_) @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) @pytest.mark.parametrize( "max_resources, n_iterations, n_possible_iterations", [ ("auto", 5, 9), # all resources are used (1024, 5, 9), (700, 5, 8), (512, 5, 8), (511, 5, 7), (32, 4, 4), (31, 3, 3), (16, 3, 3), (4, 1, 1), # max_resources == min_resources, only one iteration is # possible ], ) def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations): # test the number of actual iterations that were run depending on # max_resources n_samples = 1024 X, y = make_classification(n_samples=n_samples, random_state=1) param_grid = {"a": [1, 2], "b": list(range(10))} base_estimator = FastClassifier() factor = 2 sh = Est( base_estimator, param_grid, cv=2, factor=factor, max_resources=max_resources, min_resources=4, ) if Est is HalvingRandomSearchCV: sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV sh.fit(X, y) assert sh.n_required_iterations_ == 5 assert sh.n_iterations_ == n_iterations assert sh.n_possible_iterations_ == n_possible_iterations @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) def test_resource_parameter(Est): # Test the resource parameter n_samples = 1000 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": [1, 2], "b": list(range(10))} base_estimator = FastClassifier() sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3) sh.fit(X, y) assert set(sh.n_resources_) == set([1, 3, 9]) for r_i, params, param_c in zip( sh.cv_results_["n_resources"], sh.cv_results_["params"], sh.cv_results_["param_c"], ): assert r_i == params["c"] == param_c with pytest.raises( ValueError, match="Cannot use resource=1234 which is not supported " ): sh = HalvingGridSearchCV( base_estimator, param_grid, cv=2, resource="1234", max_resources=10 ) sh.fit(X, y) with pytest.raises( ValueError, match=( "Cannot use parameter c as the resource since it is part " "of the searched parameters." ), ): param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]} sh = HalvingGridSearchCV( base_estimator, param_grid, cv=2, resource="c", max_resources=10 ) sh.fit(X, y) @pytest.mark.parametrize( "max_resources, n_candidates, expected_n_candidates", [ (512, "exhaust", 128), # generate exactly as much as needed (32, "exhaust", 8), (32, 8, 8), (32, 7, 7), # ask for less than what we could (32, 9, 9), # ask for more than 'reasonable' ], ) def test_random_search(max_resources, n_candidates, expected_n_candidates): # Test random search and make sure the number of generated candidates is # as expected n_samples = 1024 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": norm, "b": norm} base_estimator = FastClassifier() sh = HalvingRandomSearchCV( base_estimator, param_grid, n_candidates=n_candidates, cv=2, max_resources=max_resources, factor=2, min_resources=4, ) sh.fit(X, y) assert sh.n_candidates_[0] == expected_n_candidates if n_candidates == "exhaust": # Make sure 'exhaust' makes the last iteration use as much resources as # we can assert sh.n_resources_[-1] == max_resources @pytest.mark.parametrize( "param_distributions, expected_n_candidates", [ ({"a": [1, 2]}, 2), # all lists, sample less than n_candidates ({"a": randint(1, 3)}, 10), # not all list, respect n_candidates ], ) def test_random_search_discrete_distributions( param_distributions, expected_n_candidates ): # Make sure random search samples the appropriate number of candidates when # we ask for more than what's possible. How many parameters are sampled # depends whether the distributions are 'all lists' or not (see # ParameterSampler for details). This is somewhat redundant with the checks # in ParameterSampler but interaction bugs were discovered during # developement of SH n_samples = 1024 X, y = make_classification(n_samples=n_samples, random_state=0) base_estimator = FastClassifier() sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10) sh.fit(X, y) assert sh.n_candidates_[0] == expected_n_candidates @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) @pytest.mark.parametrize( "params, expected_error_message", [ ({"scoring": {"accuracy", "accuracy"}}, "Multimetric scoring is not supported"), ( {"resource": "not_a_parameter"}, "Cannot use resource=not_a_parameter which is not supported", ), ( {"resource": "a", "max_resources": 100}, "Cannot use parameter a as the resource since it is part of", ), ({"max_resources": "not_auto"}, "max_resources must be either"), ({"max_resources": 100.5}, "max_resources must be either"), ({"max_resources": -10}, "max_resources must be either"), ({"min_resources": "bad str"}, "min_resources must be either"), ({"min_resources": 0.5}, "min_resources must be either"), ({"min_resources": -10}, "min_resources must be either"), ( {"max_resources": "auto", "resource": "b"}, "max_resources can only be 'auto' if resource='n_samples'", ), ( {"min_resources": 15, "max_resources": 14}, "min_resources_=15 is greater than max_resources_=14", ), ({"cv": KFold(shuffle=True)}, "must yield consistent folds"), ({"cv": ShuffleSplit()}, "must yield consistent folds"), ({"refit": "whatever"}, "refit is expected to be a boolean"), ], ) def test_input_errors(Est, params, expected_error_message): base_estimator = FastClassifier() param_grid = {"a": [1]} X, y = make_classification(100) sh = Est(base_estimator, param_grid, **params) with pytest.raises(ValueError, match=expected_error_message): sh.fit(X, y) @pytest.mark.parametrize( "params, expected_error_message", [ ( {"n_candidates": "exhaust", "min_resources": "exhaust"}, "cannot be both set to 'exhaust'", ), ({"n_candidates": "bad"}, "either 'exhaust' or a positive integer"), ({"n_candidates": 0}, "either 'exhaust' or a positive integer"), ], ) def test_input_errors_randomized(params, expected_error_message): # tests specific to HalvingRandomSearchCV base_estimator = FastClassifier() param_grid = {"a": [1]} X, y = make_classification(100) sh = HalvingRandomSearchCV(base_estimator, param_grid, **params) with pytest.raises(ValueError, match=expected_error_message): sh.fit(X, y) @pytest.mark.parametrize( "fraction, subsample_test, expected_train_size, expected_test_size", [ (0.5, True, 40, 10), (0.5, False, 40, 20), (0.2, True, 16, 4), (0.2, False, 16, 20), ], ) def test_subsample_splitter_shapes( fraction, subsample_test, expected_train_size, expected_test_size ): # Make sure splits returned by SubsampleMetaSplitter are of appropriate # size n_samples = 100 X, y = make_classification(n_samples) cv = _SubsampleMetaSplitter( base_cv=KFold(5), fraction=fraction, subsample_test=subsample_test, random_state=None, ) for train, test in cv.split(X, y): assert train.shape[0] == expected_train_size assert test.shape[0] == expected_test_size if subsample_test: assert train.shape[0] + test.shape[0] == int(n_samples * fraction) else: assert test.shape[0] == n_samples // cv.base_cv.get_n_splits() @pytest.mark.parametrize("subsample_test", (True, False)) def test_subsample_splitter_determinism(subsample_test): # Make sure _SubsampleMetaSplitter is consistent across calls to split(): # - we're OK having training sets differ (they're always sampled with a # different fraction anyway) # - when we don't subsample the test set, we want it to be always the same. # This check is the most important. This is ensured by the determinism # of the base_cv. # Note: we could force both train and test splits to be always the same if # we drew an int seed in _SubsampleMetaSplitter.__init__ n_samples = 100 X, y = make_classification(n_samples) cv = _SubsampleMetaSplitter( base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None ) folds_a = list(cv.split(X, y, groups=None)) folds_b = list(cv.split(X, y, groups=None)) for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b): assert not np.all(train_a == train_b) if subsample_test: assert not np.all(test_a == test_b) else: assert np.all(test_a == test_b) assert np.all(X[test_a] == X[test_b]) @pytest.mark.parametrize( "k, itr, expected", [ (1, 0, ["c"]), (2, 0, ["a", "c"]), (4, 0, ["d", "b", "a", "c"]), (10, 0, ["d", "b", "a", "c"]), (1, 1, ["e"]), (2, 1, ["f", "e"]), (10, 1, ["f", "e"]), (1, 2, ["i"]), (10, 2, ["g", "h", "i"]), ], ) def test_top_k(k, itr, expected): results = { # this isn't a 'real world' result dict "iter": [0, 0, 0, 0, 1, 1, 2, 2, 2], "mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9], "params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"], } got = _top_k(results, k=k, itr=itr) assert np.all(got == expected) @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) def test_cv_results(Est): # test that the cv_results_ matches correctly the logic of the # tournament: in particular that the candidates continued in each # successive iteration are those that were best in the previous iteration pd = pytest.importorskip("pandas") rng = np.random.RandomState(0) n_samples = 1000 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": ("l1", "l2"), "b": list(range(30))} base_estimator = FastClassifier() # generate random scores: we want to avoid ties, which would otherwise # mess with the ordering and make testing harder def scorer(est, X, y): return rng.rand() sh = Est(base_estimator, param_grid, factor=2, scoring=scorer) if Est is HalvingRandomSearchCV: # same number of candidates as with the grid sh.set_params(n_candidates=2 * 30, min_resources="exhaust") sh.fit(X, y) # non-regression check for # https://github.com/scikit-learn/scikit-learn/issues/19203 assert isinstance(sh.cv_results_["iter"], np.ndarray) assert isinstance(sh.cv_results_["n_resources"], np.ndarray) cv_results_df = pd.DataFrame(sh.cv_results_) # just make sure we don't have ties assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df) cv_results_df["params_str"] = cv_results_df["params"].apply(str) table = cv_results_df.pivot( index="params_str", columns="iter", values="mean_test_score" ) # table looks like something like this: # iter 0 1 2 3 4 5 # params_str # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN # ... # where a NaN indicates that the candidate wasn't evaluated at a given # iteration, because it wasn't part of the top-K at some previous # iteration. We here make sure that candidates that aren't in the top-k at # any given iteration are indeed not evaluated at the subsequent # iterations. nan_mask = pd.isna(table) n_iter = sh.n_iterations_ for it in range(n_iter - 1): already_discarded_mask = nan_mask[it] # make sure that if a candidate is already discarded, we don't evaluate # it later assert ( already_discarded_mask & nan_mask[it + 1] == already_discarded_mask ).all() # make sure that the number of discarded candidate is correct discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1] kept_mask = ~already_discarded_mask & ~discarded_now_mask assert kept_mask.sum() == sh.n_candidates_[it + 1] # make sure that all discarded candidates have a lower score than the # kept candidates discarded_max_score = table[it].where(discarded_now_mask).max() kept_min_score = table[it].where(kept_mask).min() assert discarded_max_score < kept_min_score # We now make sure that the best candidate is chosen only from the last # iteration. # We also make sure this is true even if there were higher scores in # earlier rounds (this isn't generally the case, but worth ensuring it's # possible). last_iter = cv_results_df["iter"].max() idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][ "mean_test_score" ].idxmax() idx_best_all_iters = cv_results_df["mean_test_score"].idxmax() assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"] assert ( cv_results_df.iloc[idx_best_last_iter]["mean_test_score"] < cv_results_df.iloc[idx_best_all_iters]["mean_test_score"] ) assert ( cv_results_df.iloc[idx_best_last_iter]["params"] != cv_results_df.iloc[idx_best_all_iters]["params"] ) @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) def test_base_estimator_inputs(Est): # make sure that the base estimators are passed the correct parameters and # number of samples at each iteration. pd = pytest.importorskip("pandas") passed_n_samples_fit = [] passed_n_samples_predict = [] passed_params = [] class FastClassifierBookKeeping(FastClassifier): def fit(self, X, y): passed_n_samples_fit.append(X.shape[0]) return super().fit(X, y) def predict(self, X): passed_n_samples_predict.append(X.shape[0]) return super().predict(X) def set_params(self, **params): passed_params.append(params) return super().set_params(**params) n_samples = 1024 n_splits = 2 X, y = make_classification(n_samples=n_samples, random_state=0) param_grid = {"a": ("l1", "l2"), "b": list(range(30))} base_estimator = FastClassifierBookKeeping() sh = Est( base_estimator, param_grid, factor=2, cv=n_splits, return_train_score=False, refit=False, ) if Est is HalvingRandomSearchCV: # same number of candidates as with the grid sh.set_params(n_candidates=2 * 30, min_resources="exhaust") sh.fit(X, y) assert len(passed_n_samples_fit) == len(passed_n_samples_predict) passed_n_samples = [ x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict) ] # Lists are of length n_splits * n_iter * n_candidates_at_i. # Each chunk of size n_splits corresponds to the n_splits folds for the # same candidate at the same iteration, so they contain equal values. We # subsample such that the lists are of length n_iter * n_candidates_at_it passed_n_samples = passed_n_samples[::n_splits] passed_params = passed_params[::n_splits] cv_results_df = pd.DataFrame(sh.cv_results_) assert len(passed_params) == len(passed_n_samples) == len(cv_results_df) uniques, counts = np.unique(passed_n_samples, return_counts=True) assert (sh.n_resources_ == uniques).all() assert (sh.n_candidates_ == counts).all() assert (cv_results_df["params"] == passed_params).all() assert (cv_results_df["n_resources"] == passed_n_samples).all() @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) def test_groups_support(Est): # Check if ValueError (when groups is None) propagates to # HalvingGridSearchCV and HalvingRandomSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=50, n_classes=2, random_state=0) groups = rng.randint(0, 3, 50) clf = LinearSVC(random_state=0) grid = {"C": [1]} group_cvs = [ LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit(random_state=0), ] error_msg = "The 'groups' parameter should not be None." for cv in group_cvs: gs = Est(clf, grid, cv=cv) with pytest.raises(ValueError, match=error_msg): gs.fit(X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)] for cv in non_group_cvs: gs = Est(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) @pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV]) def test_min_resources_null(SearchCV): """Check that we raise an error if the minimum resources is set to 0.""" base_estimator = FastClassifier() param_grid = {"a": [1]} X = np.empty(0).reshape(0, 3) search = SearchCV(base_estimator, param_grid, min_resources="smallest") err_msg = "min_resources_=0: you might have passed an empty dataset X." with pytest.raises(ValueError, match=err_msg): search.fit(X, []) @pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV]) def test_select_best_index(SearchCV): """Check the selection strategy of the halving search.""" results = { # this isn't a 'real world' result dict "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]), } # we expect the index of 'i' best_index = SearchCV._select_best_index(None, None, results) assert best_index == 8
sklearn/model_selection/tests/test_successive_halving.py
25,125
Dummy classifier that accepts parameters a, b, ... z. These parameter don't affect the predictions and are useful for fast grid searching. Check that we raise an error if the minimum resources is set to 0. Check the selection strategy of the halving search. noqa notice how it loops at the beginning also, the number of candidates evaluated at the last iteration is <= factor no aggressive elimination: we end up with less iterations, and the number of candidates at the last iter is > factor, which isn't ideal When the amount of resource isn't limited, aggressive_elimination has no effect. Here the default min_resources='exhaust' will take over. Test the aggressive_elimination parameter. just for test coverage same number of candidates as with the grid with enough resources with enough resources but min_resources set manually without enough resources, only one iteration can be done with exhaust: use as much resources as possible at the last iter Test the min_resources and max_resources parameters, and how they affect the number of resources used at each iteration same number as with the grid given 6 combinations and factor = 3 all resources are used max_resources == min_resources, only one iteration is possible test the number of actual iterations that were run depending on max_resources same as for HalvingGridSearchCV Test the resource parameter generate exactly as much as needed ask for less than what we could ask for more than 'reasonable' Test random search and make sure the number of generated candidates is as expected Make sure 'exhaust' makes the last iteration use as much resources as we can all lists, sample less than n_candidates not all list, respect n_candidates Make sure random search samples the appropriate number of candidates when we ask for more than what's possible. How many parameters are sampled depends whether the distributions are 'all lists' or not (see ParameterSampler for details). This is somewhat redundant with the checks in ParameterSampler but interaction bugs were discovered during developement of SH tests specific to HalvingRandomSearchCV Make sure splits returned by SubsampleMetaSplitter are of appropriate size Make sure _SubsampleMetaSplitter is consistent across calls to split(): - we're OK having training sets differ (they're always sampled with a different fraction anyway) - when we don't subsample the test set, we want it to be always the same. This check is the most important. This is ensured by the determinism of the base_cv. Note: we could force both train and test splits to be always the same if we drew an int seed in _SubsampleMetaSplitter.__init__ this isn't a 'real world' result dict test that the cv_results_ matches correctly the logic of the tournament: in particular that the candidates continued in each successive iteration are those that were best in the previous iteration generate random scores: we want to avoid ties, which would otherwise mess with the ordering and make testing harder same number of candidates as with the grid non-regression check for https://github.com/scikit-learn/scikit-learn/issues/19203 just make sure we don't have ties table looks like something like this: iter 0 1 2 3 4 5 params_str {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN ... where a NaN indicates that the candidate wasn't evaluated at a given iteration, because it wasn't part of the top-K at some previous iteration. We here make sure that candidates that aren't in the top-k at any given iteration are indeed not evaluated at the subsequent iterations. make sure that if a candidate is already discarded, we don't evaluate it later make sure that the number of discarded candidate is correct make sure that all discarded candidates have a lower score than the kept candidates We now make sure that the best candidate is chosen only from the last iteration. We also make sure this is true even if there were higher scores in earlier rounds (this isn't generally the case, but worth ensuring it's possible). make sure that the base estimators are passed the correct parameters and number of samples at each iteration. same number of candidates as with the grid Lists are of length n_splits * n_iter * n_candidates_at_i. Each chunk of size n_splits corresponds to the n_splits folds for the same candidate at the same iteration, so they contain equal values. We subsample such that the lists are of length n_iter * n_candidates_at_it Check if ValueError (when groups is None) propagates to HalvingGridSearchCV and HalvingRandomSearchCV And also check if groups is correctly passed to the cv object Should not raise an error this isn't a 'real world' result dict we expect the index of 'i'
4,973
en
0.913946
# -*- coding: utf-8 -*- # Copyright 2018 Spanish National Research Council (CSIC) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Given two dates and region, download N Sentinel Collections scenes from ESA Sentinel dataHUB. The downloaded Sentinel collection scenes are compatible with: S2MSI1C: Top-of-atmosphere reflectances in cartographic geometry or S2MSI2A: Bottom-of-atmosphere reflectance in cartographic geometry Parameters ---------- inidate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d") enddate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d") region: name of one reservoir saved in the "coord_reservoirs.json" file coordinates : dict. Coordinates of the region to search. Example: {"W": -2.830, "S": 41.820, "E": -2.690, "N": 41.910}} platform : str. Satellite to use from the Sentinel family producttype : str. Dataset type. cloud: int path : path Author: Daniel García Díaz Email: garciad@ifca.unican.es Institute of Physics of Cantabria (IFCA) Advanced Computing and e-Science Date: Sep 2018 """ #imports apis import requests import os # Subfunctions from wq_sat.utils import config class download_sentinel: def __init__(self, inidate, enddate, region, coordinates=None, platform='Sentinel-2', producttype="S2MSI1C", cloud=100, output_path=None): self.session = requests.Session() #Search parameters self.inidate = inidate self.enddate = enddate self.coord = coordinates self.producttype = producttype self.platform = platform self.region = region self.cloud = int(cloud) #work path if output_path: self.output_path = os.path.join(output_path, self.region) else: path = config.get_data_path() self.output_path = os.path.join(path, self.region) if not os.path.isdir(self.output_path): os.mkdir(self.output_path) #ESA APIs self.api_url = 'https://scihub.copernicus.eu/apihub/' self.credentials = config.load_credentials()['sentinel'] def search(self, omit_corners=True): # Post the query to Copernicus query = {'footprint': '"Intersects(POLYGON(({0} {1},{2} {1},{2} {3},{0} {3},{0} {1})))"'.format(self.coord['W'], self.coord['S'], self.coord['E'], self.coord['N']), 'producttype': self.producttype, 'platformname': self.platform, 'beginposition': '[{} TO {}]'.format(self.inidate, self.enddate), 'cloudcoverpercentage': '[0 TO {}]'.format(self.cloud) } data = {'format': 'json', 'start': 0, # offset 'rows': 100, 'limit': 100, 'orderby': '', 'q': ' '.join(['{}:{}'.format(k, v) for k, v in query.items()]) } response = self.session.post(self.api_url + 'search?', data=data, auth=(self.credentials['user'], self.credentials['password']), headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}) response.raise_for_status() # Parse the response json_feed = response.json()['feed'] if 'entry' in json_feed.keys(): results = json_feed['entry'] if isinstance(results, dict): # if the query returns only one product, products will be a dict not a list results = [results] else: results = [] # Remove results that are mainly corners def keep(r): for item in r['str']: if item['name'] == 'size': units = item['content'].split(' ')[1] mult = {'KB': 1, 'MB': 1e3, 'GB': 1e6}[units] size = float(item['content'].split(' ')[0]) * mult break if size > 0.5e6: # 500MB return True else: return False results[:] = [r for r in results if keep(r)] print('Found {} results from Sentinel'.format(json_feed['opensearch:totalResults'])) print('Retrieving {} results'.format(len(results))) return results def download(self): #results of the search results = self.search() if not isinstance(results, list): results = [results] downloaded_data = [f for f in os.listdir(self.output_path) if os.path.isfile(os.path.join(self.output_path, f))] s2_tiles = [] for r in results: url, tile_id = r['link'][0]['href'], r['title'] save_dir = os.path.join(self.output_path, '{}.zip'.format(tile_id)) if '{}.tif'.format(tile_id) in downloaded_data: print ('File {} already downloaded'.format(tile_id)) continue print('Downloading {} ...'.format(tile_id)) s2_tiles.append(tile_id) response = self.session.get(url, stream=True, allow_redirects=True, auth=(self.credentials['user'], self.credentials['password'])) with open(save_dir, 'wb') as f: f.write(response.content) return s2_tiles
wq_sat/satellites/sentinel_download.py
6,229
Given two dates and region, download N Sentinel Collections scenes from ESA Sentinel dataHUB. The downloaded Sentinel collection scenes are compatible with: S2MSI1C: Top-of-atmosphere reflectances in cartographic geometry or S2MSI2A: Bottom-of-atmosphere reflectance in cartographic geometry Parameters ---------- inidate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d") enddate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d") region: name of one reservoir saved in the "coord_reservoirs.json" file coordinates : dict. Coordinates of the region to search. Example: {"W": -2.830, "S": 41.820, "E": -2.690, "N": 41.910}} platform : str. Satellite to use from the Sentinel family producttype : str. Dataset type. cloud: int path : path Author: Daniel García Díaz Email: garciad@ifca.unican.es Institute of Physics of Cantabria (IFCA) Advanced Computing and e-Science Date: Sep 2018 -*- coding: utf-8 -*- Copyright 2018 Spanish National Research Council (CSIC) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.imports apis SubfunctionsSearch parameterswork pathESA APIs Post the query to Copernicus offset Parse the response if the query returns only one product, products will be a dict not a list Remove results that are mainly corners 500MBresults of the search
1,728
en
0.71605
from autosar.writer.writer_base import ElementWriter import autosar.constant class XMLConstantWriter(ElementWriter): def __init__(self,version, patch): super().__init__(version, patch) def getSupportedXML(self): return ['Constant'] def getSupportedCode(self): return [] def writeElementXML(self, elem): if type(elem).__name__ == 'Constant': return self.writeConstantXML(elem) else: return None def writeElementCode(self, elem, localvars): raise NotImplementedError('writeElementCode') def writeConstantXML(self,elem): lines = [] assert(isinstance(elem,autosar.constant.Constant)) lines.append('<CONSTANT-SPECIFICATION>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) if elem.adminData is not None: lines.extend(self.indent(self.writeAdminDataXML(elem.adminData),1)) if self.version>=4.0: lines.extend(self.indent(self._writeValueXMLV4(elem.value),1)) else: lines.extend(self.indent(self._writeValueXMLV3(elem.value),1)) lines.append('</CONSTANT-SPECIFICATION>') return lines def _writeValueXMLV3(self,elem): lines=[] lines.append('<VALUE>') lines.extend(self.indent(self._writeLiteralValueXML(elem),1)) lines.append('</VALUE>') return lines def _writeLiteralValueXML(self,elem): if isinstance(elem,autosar.constant.IntegerValue): return self._writeIntegerLiteralXML(elem) elif isinstance(elem,autosar.constant.RecordValue): return self._writeRecordSpecificationXML(elem) elif isinstance(elem,autosar.constant.StringValue): return self._writeStringLiteralXML(elem) elif isinstance(elem,autosar.constant.BooleanValue): return self._writeBooleanLiteralXML(elem) elif isinstance(elem,autosar.constant.ArrayValue): return self._writeArraySpecificationXML(elem) else: raise NotImplementedError(type(elem)) def _writeIntegerLiteralXML(self,elem): assert(isinstance(elem,autosar.constant.IntegerValue)) lines=[] lines.append('<INTEGER-LITERAL>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) tag = elem.rootWS().find(elem.typeRef).tag(self.version) lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1)) lines.append(self.indent('<VALUE>%d</VALUE>'%elem.value,1)) lines.append('</INTEGER-LITERAL>') return lines def _writeRecordSpecificationXML(self,elem): assert(isinstance(elem,autosar.constant.RecordValue)) lines=[] lines.append('<RECORD-SPECIFICATION>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) tag = elem.rootWS().find(elem.typeRef).tag(self.version) lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1)) if len(elem.elements)==0: lines.append('<ELEMENTS/>') else: lines.append(self.indent('<ELEMENTS>',1)) for childElem in elem.elements: lines.extend(self.indent(self._writeLiteralValueXML(childElem),2)) lines.append(self.indent('</ELEMENTS>',1)) lines.append('</RECORD-SPECIFICATION>') return lines def _writeStringLiteralXML(self,elem): assert(isinstance(elem,autosar.constant.StringValue)) lines=[] lines.append('<STRING-LITERAL>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) tag = elem.rootWS().find(elem.typeRef).tag(self.version) lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1)) lines.append(self.indent('<VALUE>%s</VALUE>'%elem.value,1)) lines.append('</STRING-LITERAL>') return lines def _writeBooleanLiteralXML(self,elem): assert(isinstance(elem,autosar.constant.BooleanValue)) lines=[] lines.append('<BOOLEAN-LITERAL>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) tag = elem.rootWS().find(elem.typeRef).tag(self.version) lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1)) lines.append(self.indent('<VALUE>%s</VALUE>'%('true' if elem.value is True else 'false'),1)) lines.append('</BOOLEAN-LITERAL>') return lines def _writeArraySpecificationXML(self,elem): assert(isinstance(elem,autosar.constant.ArrayValue)) lines=[] lines.append('<ARRAY-SPECIFICATION>') lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1)) tag = elem.rootWS().find(elem.typeRef).tag(self.version) lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1)) if len(elem.elements)==0: lines.append('<ELEMENTS/>') else: lines.append(self.indent('<ELEMENTS>',1)) for childElem in elem.elements: lines.extend(self.indent(self._writeLiteralValueXML(childElem),2)) lines.append(self.indent('</ELEMENTS>',1)) lines.append('</ARRAY-SPECIFICATION>') return lines def _writeValueXMLV4(self, value): lines=[] lines.append('<VALUE-SPEC>') lines.extend(self.indent(self.writeValueSpecificationXML(value),1)) lines.append('</VALUE-SPEC>') return lines class CodeConstantWriter(ElementWriter): def __init__(self,version, patch): super().__init__(version, patch) def getSupportedXML(self): return [] def getSupportedCode(self): return ['Constant'] def writeElementXML(self, elem): raise NotImplementedError('writeElementXML') def writeElementCode(self, elem, localvars): if type(elem).__name__ == 'Constant': return self.writeConstantCode(elem, localvars) else: return None def writeConstantCode(self, constant, localvars): lines=[] ws=localvars['ws'] if not isinstance(constant, autosar.constant.Constant): raise ValueError('expected type autosar.constant.Constant') if constant.value is not None: dataType = ws.find(constant.value.typeRef, role='DataType') constructor=None if dataType is None: raise ValueError('invalid reference: '+constant.value.typeRef) if isinstance(constant.value, autosar.constant.ArrayValue): initValue = self._writeArrayValueConstantCode(constant.value, localvars) elif isinstance(constant.value, autosar.constant.IntegerValue): initValue = self._writeIntegerValueConstantCode(constant.value, localvars) elif isinstance(constant.value, autosar.constant.StringValue): initValue = self._writeStringValueConstantCode(constant.value, localvars) elif isinstance(constant.value, autosar.constant.BooleanValue): initValue = self._writeBooleanValueConstantCode(constant.value, localvars) elif isinstance(constant.value, autosar.constant.RecordValue): initValue = self._writeRecordValueConstantCode(constant.value, localvars) else: raise ValueError('unknown value type: '+type(constant.value)) params=[repr(constant.name)] if ws.roles['DataType'] is not None: params.append(repr(dataType.name)) #use name only else: params.append(repr(dataType.ref)) #use full reference if initValue is not None: if isinstance(initValue, list): lines.extend(self.writeDictCode('initValue', initValue)) params.append('initValue') else: params.append(initValue) else: print(constant.name) if constant.adminData is not None: param = self.writeAdminDataCode(constant.adminData, localvars) assert(len(param)>0) params.append('adminData='+param) lines.append("package.createConstant(%s)"%(', '.join(params))) return lines def _writeArrayValueConstantCode(self, value, localvars): ws=localvars['ws'] assert(isinstance(value, autosar.constant.ArrayValue)) params=[] for elem in value.elements: if isinstance(elem, autosar.constant.ArrayValue): initValue = self._writeArrayValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.IntegerValue): initValue = self._writeIntegerValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.StringValue): initValue = self._writeStringValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.BooleanValue): initValue = self._writeBooleanValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.RecordValue): initValue = self._writeRecordValueConstantCode(elem, localvars) if isinstance(initValue, list): initValue="{%s}"%(', '.join(initValue)) #join any inner record init values else: raise ValueError('unknown value type: '+type(constant.value)) params.append(initValue) if len(params)>0: return "[%s]"%(', '.join(params)) return None def _writeIntegerValueConstantCode(self, value, localvars): return str(value.value) def _writeStringValueConstantCode(self, value, localvars): return repr(value.value) def _writeBooleanValueConstantCode(self, value, localvars): return str(value.value) def _writeRecordValueConstantCode(self, value, localvars): ws=localvars['ws'] assert(isinstance(value, autosar.constant.RecordValue)) params=[] for elem in value.elements: if isinstance(elem, autosar.constant.ArrayValue): initValue = self._writeArrayValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.IntegerValue): initValue = self._writeIntegerValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.StringValue): initValue = self._writeStringValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.BooleanValue): initValue = self._writeBooleanValueConstantCode(elem, localvars) elif isinstance(elem, autosar.constant.RecordValue): initValue = self._writeRecordValueConstantCode(elem, localvars) if isinstance(initValue, list): initValue="{%s}"%(', '.join(initValue)) #join any inner record init values else: raise ValueError('unknown value type: '+type(constant.value)) params.append('"%s": %s'%(elem.name, initValue)) if len(params)>0: text = "{%s}"%(', '.join(params)) if len(text)>200: #line will be way too long return params else: return text return None
autosar/writer/constant_writer.py
11,086
use name onlyuse full referencejoin any inner record init valuesjoin any inner record init valuesline will be way too long
122
en
0.323139
# INIT data = [] numeric = [] normal = [] keyL = [] KeyL = [] key = input("Enter Key Value: ") # File - Load Function def load(file): handle = open(file) return handle.read() # Text Format def form(file): format = load(file) format = format.replace(' ', '') format = format.replace(',', '') format = format.replace('-', '') format = format.replace('–', '') format = format.replace('—', '') format = format.replace('.', '') format = format.replace(';', '') format = format.replace('\n', '') return format #ADDS TO LIST for letter in format: data.append(letter) #REMOVE NUM for letter in data: global numbers numbers = "" if not letter.isdigit(): normal.append(letter) else: numeric.append(letter) numbers = ''.join(numeric) return format, numbers #Mod Inv def modInverse(a, m): a = a % m for x in range(1, m): if ((a * x) % m == 1): return x return 1 #Calc dif def dif(a,b): if a > b: return a - b if a < b: return b - a else: return 0 #Key Creator def getKey(key): lenKey = len(key) lenPtext = len(normal) difP = lenPtext/lenKey #Calc diff of Plain text if difP % 1 == 0: KEY = "" difP = difP KEY = key*difP keyL.append(KEY) else: KEY = "" difP = int(difP)+1 KEY = key*difP keyL.append(KEY) for word in keyL: for letter in word: KeyL.append(letter) i = 0 for i in range(2): print(i) print("test") form('project2plaintext.txt.txt') #print(len(normal)) #print(numbers) #print(dif(len(getKey(key)),len(normal))) getKey(key) print(KeyL)
crypto/vigenere/crypto.py
1,817
INIT File - Load Function Text FormatADDS TO LISTREMOVE NUMMod InvCalc difKey CreatorCalc diff of Plain text print(len(normal))print(numbers)print(dif(len(getKey(key)),len(normal)))
181
en
0.383185